diff options
725 files changed, 17244 insertions, 16794 deletions
diff --git a/.travis.compiler.sh b/.travis.compiler.sh index 35e79e177ef..6058d95a170 100755 --- a/.travis.compiler.sh +++ b/.travis.compiler.sh @@ -39,8 +39,4 @@ else fi fi -# main.mysqlhotcopy_myisam consitently failed in travis containers -# https://travis-ci.org/grooverdan/mariadb-server/builds/217661580 -echo 'main.mysqlhotcopy_myisam : unstable in containers' >> ${TRAVIS_BUILD_DIR}/mysql-test/unstable-tests -echo 'archive.mysqlhotcopy_archive : unstable in containers' >> ${TRAVIS_BUILD_DIR}/mysql-test/unstable-tests set +v +x diff --git a/.travis.yml b/.travis.yml index f33de076289..d7657509297 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,13 +42,33 @@ matrix: compiler: gcc script: - ${CC} --version ; ${CXX} --version - # Just for disabling hotcopy tests for now - source .travis.compiler.sh # https://github.com/travis-ci/travis-ci/issues/7062 - /run/shm isn't writable or executable # in trusty containers - export MTR_MEM=/tmp - env DEB_BUILD_OPTIONS="parallel=6" debian/autobake-deb.sh; - ccache --show-stats + # Until OSX becomes a bit more stable: MDEV-12435 + allow_failures: + - os: osx + compiler: clang + env: GCC_VERSION=4.8 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl + - os: osx + compiler: clang + env: GCC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main,archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts + - os: osx + compiler: clang + env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption,rocksdb + - os: osx + compiler: clang + env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles + # MDEV-13002 plugins.server_audit and plugins.thread_pool_server_audit test fail due to mysqltest error + - os: linux + compiler: gcc + env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles + - os: linux + compiler: clang + env: GCC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,percona,perfschema,plugins,multi_source,roles # Matrix include for coverity # - env: @@ -108,6 +128,7 @@ addons: - libaio-dev - libboost-dev - libcurl3-dev + - libdbd-mysql - libjudy-dev - libncurses5-dev - libpam0g-dev @@ -128,6 +149,7 @@ addons: - liblzma-dev - libzmq-dev - libdistro-info-perl + - uuid-dev - devscripts # implicit for any build on Ubuntu # libsystemd-daemon-dev # https://github.com/travis-ci/apt-package-whitelist/issues/3882 diff --git a/client/mysql.cc b/client/mysql.cc index 1033ae75881..b2e83eb8967 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -3059,7 +3059,6 @@ static int com_server_help(String *buffer __attribute__((unused)), { unsigned int num_fields= mysql_num_fields(result); my_ulonglong num_rows= mysql_num_rows(result); - mysql_fetch_fields(result); if (num_fields==3 && num_rows==1) { if (!(cur= mysql_fetch_row(result))) diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 9ac438ff6ea..4e4970bacab 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -1130,7 +1130,7 @@ static int check_version_match(void) int main(int argc, char **argv) { - char self_name[FN_REFLEN]; + char self_name[FN_REFLEN + 1]; MY_INIT(argv[0]); @@ -1138,7 +1138,7 @@ int main(int argc, char **argv) if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) #endif { - strncpy(self_name, argv[0], FN_REFLEN); + strmake_buf(self_name, argv[0]); } if (init_dynamic_string(&ds_args, "", 512, 256) || diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 1eba3f25ee8..45b332a6cd6 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -47,6 +47,7 @@ static uint opt_count_iterations= 0, my_end_arg; static ulong opt_connect_timeout, opt_shutdown_timeout; static char * unix_port=0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; +static bool sql_log_bin_off= false; #ifdef HAVE_SMEM static char *shared_memory_base_name=0; @@ -600,6 +601,31 @@ static my_bool sql_connect(MYSQL *mysql, uint wait) } +static int maybe_disable_binlog(MYSQL *mysql) +{ + if (opt_local && !sql_log_bin_off) + { + if (mysql_query(mysql, "set local sql_log_bin=0")) + { + my_printf_error(0, "SET LOCAL SQL_LOG_BIN=0 failed; error: '%-.200s'", + error_flags, mysql_error(mysql)); + return -1; + } + } + sql_log_bin_off= true; + return 0; +} + + +int flush(MYSQL *mysql, const char *what) +{ + char buf[FN_REFLEN]; + my_snprintf(buf, sizeof(buf), "flush %s%s", + (opt_local && !sql_log_bin_off ? "local " : ""), what); + return mysql_query(mysql, buf); +} + + /** @brief Execute all commands @@ -616,6 +642,7 @@ static my_bool sql_connect(MYSQL *mysql, uint wait) static int execute_commands(MYSQL *mysql,int argc, char **argv) { + int ret = 0; const char *status; /* MySQL documentation relies on the fact that mysqladmin will @@ -628,17 +655,6 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) struct my_rnd_struct rand_st; char buff[FN_REFLEN + 20]; - if (opt_local) - { - sprintf(buff, "set local sql_log_bin=0"); - if (mysql_query(mysql, buff)) - { - my_printf_error(0, "SET LOCAL SQL_LOG_BIN=0 failed; error: '%-.200s'", - error_flags, mysql_error(mysql)); - return -1; - } - } - for (; argc > 0 ; argv++,argc--) { int command; @@ -650,6 +666,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) my_printf_error(0, "Too few arguments to create", error_flags); return 1; } + if (maybe_disable_binlog(mysql)) + return -1; sprintf(buff,"create database `%.*s`",FN_REFLEN,argv[1]); if (mysql_query(mysql,buff)) { @@ -667,6 +685,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) my_printf_error(0, "Too few arguments to drop", error_flags); return 1; } + if (maybe_disable_binlog(mysql)) + return -1; if (drop_db(mysql,argv[1])) return -1; argc--; argv++; @@ -707,7 +727,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_PRIVILEGES: case ADMIN_RELOAD: - if (mysql_query(mysql,"flush privileges")) + if (flush(mysql, "privileges")) { my_printf_error(0, "reload failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -911,7 +931,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_LOGS: { - if (mysql_query(mysql,"flush logs")) + if (flush(mysql, "logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -921,7 +941,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_BINARY_LOG: { - if (mysql_query(mysql, "flush binary logs")) + if (flush(mysql, "binary logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -931,7 +951,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_ENGINE_LOG: { - if (mysql_query(mysql,"flush engine logs")) + if (flush(mysql, "engine logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -941,7 +961,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_ERROR_LOG: { - if (mysql_query(mysql, "flush error logs")) + if (flush(mysql, "error logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -951,7 +971,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_GENERAL_LOG: { - if (mysql_query(mysql, "flush general logs")) + if (flush(mysql, "general logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -961,7 +981,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_RELAY_LOG: { - if (mysql_query(mysql, "flush relay logs")) + if (flush(mysql, "relay logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -971,7 +991,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_SLOW_LOG: { - if (mysql_query(mysql,"flush slow logs")) + if (flush(mysql, "slow logs")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -981,7 +1001,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_HOSTS: { - if (mysql_query(mysql,"flush hosts")) + if (flush(mysql, "hosts")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -991,7 +1011,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_TABLES: { - if (mysql_query(mysql,"flush tables")) + if (flush(mysql, "tables")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1001,7 +1021,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_STATUS: { - if (mysql_query(mysql,"flush status")) + if (flush(mysql, "status")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1011,7 +1031,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_TABLE_STATISTICS: { - if (mysql_query(mysql,"flush table_statistics")) + if (flush(mysql, "table_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1021,7 +1041,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_INDEX_STATISTICS: { - if (mysql_query(mysql,"flush index_statistics")) + if (flush(mysql, "index_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1031,7 +1051,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_USER_STATISTICS: { - if (mysql_query(mysql,"flush user_statistics")) + if (flush(mysql, "user_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1041,7 +1061,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_USER_RESOURCES: { - if (mysql_query(mysql, "flush user_resources")) + if (flush(mysql, "user_resources")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1051,7 +1071,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_CLIENT_STATISTICS: { - if (mysql_query(mysql,"flush client_statistics")) + if (flush(mysql, "client_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1061,9 +1081,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_ALL_STATISTICS: { - if (mysql_query(mysql, - "flush table_statistics,index_statistics," - "user_statistics,client_statistics")) + if (flush(mysql, "table_statistics,index_statistics," + "user_statistics,client_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1073,9 +1092,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_FLUSH_ALL_STATUS: { - if (mysql_query(mysql, - "flush status,table_statistics,index_statistics," - "user_statistics,client_statistics")) + if (flush(mysql, "status,table_statistics,index_statistics," + "user_statistics,client_statistics")) { my_printf_error(0, "flush failed; error: '%s'", error_flags, mysql_error(mysql)); @@ -1093,6 +1111,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) start_time=time((time_t*) 0); my_rnd_init(&rand_st,(ulong) start_time,(ulong) start_time/2); + if (maybe_disable_binlog(mysql)) + return -1; if (argc < 1) { my_printf_error(0, "Too few arguments to change password", error_flags); @@ -1106,7 +1126,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (strcmp(typed_password, verified) != 0) { my_printf_error(0,"Passwords don't match",MYF(ME_BELL)); - return -1; + ret = -1; + goto password_done; } } else @@ -1133,7 +1154,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { my_printf_error(0, "Could not determine old_passwords setting from server; error: '%s'", error_flags, mysql_error(mysql)); - return -1; + ret = -1; + goto password_done; } else { @@ -1144,7 +1166,8 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) "Could not get old_passwords setting from " "server; error: '%s'", error_flags, mysql_error(mysql)); - return -1; + ret = -1; + goto password_done; } if (!mysql_num_rows(res)) old= 1; @@ -1169,15 +1192,15 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) { my_printf_error(0, "Can't turn off logging; error: '%s'", error_flags, mysql_error(mysql)); - return -1; + ret = -1; } + else if (mysql_query(mysql,buff)) { if (mysql_errno(mysql)!=1290) { my_printf_error(0,"unable to change password; error: '%s'", error_flags, mysql_error(mysql)); - return -1; } else { @@ -1191,9 +1214,10 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) " --skip-grant-tables).\n" "Use: \"mysqladmin flush-privileges password '*'\"" " instead", error_flags); - return -1; } + ret = -1; } +password_done: /* free up memory from prompted password */ if (typed_password != argv[1]) { @@ -1300,7 +1324,7 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) return 1; } } - return 0; + return ret; } /** diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 92534501fb1..34e810f7b6b 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2480,7 +2480,7 @@ static Exit_status dump_remote_log_entries(PRINT_EVENT_INFO *print_event_info, int2store(buf + BIN_LOG_HEADER_SIZE, binlog_flags); size_t tlen = strlen(logname); - if (tlen > UINT_MAX) + if (tlen > sizeof(buf) - 10) { error("Log name too long."); DBUG_RETURN(ERROR_STOP); diff --git a/client/mysqldump.c b/client/mysqldump.c index 8235090ccf3..2028606cd5a 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2174,6 +2174,7 @@ static void print_xml_comment(FILE *xml_file, size_t len, case '-': if (*(comment_string + 1) == '-') /* Only one hyphen allowed. */ break; + /* fall through */ default: fputc(*comment_string, xml_file); break; @@ -2850,6 +2851,8 @@ static uint get_table_structure(char *table, char *db, char *table_type, my_free(scv_buff); + if (path) + my_fclose(sql_file, MYF(MY_WME)); DBUG_RETURN(0); } else @@ -5940,8 +5943,7 @@ static my_bool get_view_structure(char *table, char* db) dynstr_free(&ds_view); } - if (switch_character_set_results(mysql, default_charset)) - DBUG_RETURN(1); + switch_character_set_results(mysql, default_charset); /* If a separate .sql file was opened, close it now */ if (sql_file != md_result_file) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index c92281c59e9..4c14234174e 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -1720,13 +1720,12 @@ void log_msg(const char *fmt, ...) int cat_file(DYNAMIC_STRING* ds, const char* filename) { int fd; - size_t len; + int len; char buff[16384]; if ((fd= my_open(filename, O_RDONLY, MYF(0))) < 0) return 1; - while((len= my_read(fd, (uchar*)&buff, - sizeof(buff)-1, MYF(0))) > 0) + while((len= (int)my_read(fd, (uchar*)&buff, sizeof(buff)-1, MYF(0))) > 0) { char *p= buff, *start= buff,*end=buff+len; while (p < end) diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index 0a1c7dd9697..be56a4c0772 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -41,4 +41,3 @@ SET(WSREP_PROC_INFO ${WITH_WSREP}) IF(WITH_WSREP) SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}") ENDIF() - diff --git a/config.h.cmake b/config.h.cmake index c55e4a8ac5f..01eafcebce2 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -575,21 +575,4 @@ #define __STDC_FORMAT_MACROS #endif -/* - stat structure (from <sys/stat.h>) is conditionally defined - to have different layout and size depending on the defined macros. - The correct macro is defined in my_config.h, which means it MUST be - included first (or at least before <features.h> - so, practically, - before including any system headers). - - Check the include order by looking at __GLIBC__ (defined in <features.h>) - - But we cannot force all third-party clients/connectors to include - my_config.h first. So, their crashes are their responsibility, - we enable this check only for MariaDB sources (SAFE_MUTEX check). -*/ -#if defined(__GLIBC__) && defined(SAFE_MUTEX) -#error <my_config.h> MUST be included first! -#endif - #endif diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index 238a5a80ac5..fe828026b4b 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -82,7 +82,7 @@ uintmax_t cur_page_num; /* Skip the checksum verification. */ static bool no_check; /* Enabled for strict checksum verification. */ -bool strict_verify = 0; +bool strict_verify; /* Enabled for rewrite checksum. */ static bool do_write; /* Mismatches count allowed (0 by default). */ @@ -280,7 +280,8 @@ void print_index_leaf_stats( fprintf(fil_out, "page_no\tdata_size\tn_recs\n"); while (it_page != index.leaves.end()) { const per_page_stats& stat = it_page->second; - fprintf(fil_out, "%llu\t%lu\t%lu\n", it_page->first, stat.data_size, stat.n_recs); + fprintf(fil_out, "%llu\t" ULINTPF "\t" ULINTPF "\n", + it_page->first, stat.data_size, stat.n_recs); page_no = stat.right_page_no; it_page = index.leaves.find(page_no); } @@ -315,12 +316,15 @@ void defrag_analysis( } if (index.leaf_pages) { - fprintf(fil_out, "count = %lu free = %lu\n", index.count, index.free_pages); + fprintf(fil_out, "count = " ULINTPF " free = " ULINTPF "\n", + index.count, index.free_pages); } - fprintf(fil_out, "%llu\t\t%llu\t\t%lu\t\t%lu\t\t%lu\t\t%.2f\t%lu\n", + fprintf(fil_out, "%llu\t\t%llu\t\t" + ULINTPF "\t\t%lu\t\t" ULINTPF "\t\t%.2f\t" ULINTPF "\n", id, index.leaf_pages, n_leaf_pages, n_merge, n_pages, - 1.0 - (double)n_pages / (double)n_leaf_pages, index.max_data_size); + 1.0 - (double)n_pages / (double)n_leaf_pages, + index.max_data_size); } void print_leaf_stats( diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt index 693082b765a..ac15460660c 100644 --- a/extra/mariabackup/CMakeLists.txt +++ b/extra/mariabackup/CMakeLists.txt @@ -27,101 +27,6 @@ IF(NOT WIN32) ENDIF() ENDIF() -IF(WITH_LIBARCHIVE STREQUAL "STATIC") - SET(CMAKE_FIND_LIBRARY_SUFFIXES .a .lib) -ENDIF() - -FIND_PACKAGE(LibArchive) - -IF(NOT DEFINED WITH_LIBARCHIVE) - IF(LibArchive_FOUND) - SET(WITH_LIBARCHIVE_DEFAULT ON) - ELSE() - SET(WITH_LIBARCHIVE_DEFAULT OFF) - ENDIF() - SET(WITH_LIBARCHIVE ${WITH_LIBARCHIVE_DEFAULT} CACHE STRING "Use libarchive for streaming features (ON, OFF or STATIC)" ) -ENDIF() - -IF(NOT WITH_LIBARCHIVE MATCHES "^(ON|OFF|STATIC)$") - MESSAGE(FATAL_ERROR "Invalid value for WITH_LIBARCHIVE: '${WITH_LIBARCHIVE}'. Use one of ON, OFF or STATIC") -ENDIF() - -IF(UNIX) - SET(PIC_FLAG -fPIC) -ENDIF() - -IF((NOT WITH_LIBARCHIVE STREQUAL "OFF") AND (NOT LibArchive_FOUND)) - IF(CMAKE_VERSION VERSION_LESS "2.8.12") - MESSAGE("libarchive can't be built, old cmake") - ELSE() - # Build a local version - INCLUDE(ExternalProject) - SET(LIBARCHIVE_DIR ${CMAKE_CURRENT_BINARY_DIR}/libarchive) - SET(libarchive_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libarchive) - SET(libarchive_CMAKE_ARGS - -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR> - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DENABLE_ICONV=OFF - -DENABLE_TAR=ON - -DENABLE_OPENSSL=OFF - -DENABLE_TEST=OFF - "-DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} ${PIC_FLAG}" - "-DCMAKE_C_FLAGS_RELWITHDEBINFO=${CMAKE_C_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}" - "-DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE} ${PIC_FLAG}" - "-DCMAKE_C_FLAGS_MINSIZEREL=${CMAKE_C_FLAGS_MINSIZEREL} ${PIC_FLAG}" - ) - IF(WIN32) - SET(libarchive_CMAKE_ARGS ${libarchive_CMAKE_ARGS} -DWINDOWS_VERSION=WIN7 -DCMAKE_DEBUG_POSTFIX=d) - SET(LIBARCHIVE_RELEASE_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive_static${CMAKE_STATIC_LIBRARY_SUFFIX}) - SET(LIBARCHIVE_DEBUG_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive_staticd${CMAKE_STATIC_LIBRARY_SUFFIX}) - SET(byproducts ${LIBARCHIVE_RELEASE_LIB} ${LIBARCHIVE_DEBUG_LIB}) - ELSE() - SET(LIBARCHIVE_LIB ${LIBARCHIVE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}archive${CMAKE_STATIC_LIBRARY_SUFFIX}) - SET(byproducts ${LIBARCHIVE_LIB}) - ENDIF() - - IF(CMAKE_VERSION VERSION_GREATER "3.1") - SET(byproducts BUILD_BYPRODUCTS ${byproducts}) - ENDIF() - - ExternalProject_Add(libarchive - PREFIX ${libarchive_PREFIX} - DOWNLOAD_DIR ${LIBARCHIVE_DIR} - URL http://www.libarchive.org/downloads/libarchive-3.2.2.tar.gz - INSTALL_DIR ${LIBARCHIVE_DIR} - CMAKE_ARGS ${libarchive_CMAKE_ARGS} - ${byproducts} - ) - ADD_LIBRARY(archive_static STATIC IMPORTED) - ADD_DEPENDENCIES(archive_static libarchive) - IF(WIN32) - SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_RELWITHDEBINFO ${LIBARCHIVE_RELEASE_LIB}) - SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_RELEASE ${LIBARCHIVE_RELEASE_LIB}) - SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_DEBUG ${LIBARCHIVE_DEBUG_LIB}) - SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION_MINSIZEREL ${LIBARCHIVE_RELEASE_LIB}) - ELSE() - SET_PROPERTY(TARGET archive_static PROPERTY IMPORTED_LOCATION ${LIBARCHIVE_LIB}) - ENDIF() - - SET(LibArchive_FOUND ON ) - SET(LibArchive_INCLUDE_DIRS ${LIBARCHIVE_DIR}/include ) - SET(LibArchive_LIBRARIES archive_static) - IF(WIN32) - SET(LIBARCHIVE_STATIC 1) - ENDIF() - ENDIF() -ENDIF() - - -IF(WITH_LIBARCHIVE AND LibArchive_FOUND) - ADD_DEFINITIONS(-DHAVE_LIBARCHIVE) - IF(LIBARCHIVE_STATIC) - ADD_DEFINITIONS(-DLIBARCHIVE_STATIC) - ENDIF() - INCLUDE_DIRECTORIES(${LibArchive_INCLUDE_DIRS}) - LINK_LIBRARIES(${LibArchive_LIBRARIES}) - SET(DS_ARCHIVE_SOURCE ds_archive.c) -ENDIF() INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR}/include @@ -154,7 +59,6 @@ MYSQL_ADD_EXECUTABLE(mariabackup innobackupex.cc changed_page_bitmap.cc datasink.c - ${DS_ARCHIVE_SOURCE} ds_buffer.c ds_compress.c ds_local.c diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 1565e20d732..156e0b20e7c 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -450,7 +450,7 @@ datadir_iter_free(datadir_iter_t *it) /************************************************************************ Holds the state needed to copy single data file. */ struct datafile_cur_t { - os_file_t file; + pfs_os_file_t file; char rel_path[FN_REFLEN]; char abs_path[FN_REFLEN]; MY_STAT statinfo; @@ -1774,7 +1774,7 @@ copy_back() const char *ext_list[] = {"backup-my.cnf", "xtrabackup_logfile", "xtrabackup_binary", "xtrabackup_binlog_info", "xtrabackup_checkpoints", ".qp", ".pmap", ".tmp", - ".xbcrypt", NULL}; + NULL}; const char *filename; char c_tmp; int i_tmp; @@ -1807,7 +1807,7 @@ copy_back() filename = base_name(node.filepath); - /* skip .qp and .xbcrypt files */ + /* skip .qp files */ if (filename_matches(filename, ext_list)) { continue; } @@ -1899,24 +1899,8 @@ decrypt_decompress_file(const char *filepath, uint thread_n) cmd << IF_WIN("type ","cat ") << filepath; - if (ends_with(filepath, ".xbcrypt") && opt_decrypt) { - cmd << " | xbcrypt --decrypt --encrypt-algo=" - << xtrabackup_encrypt_algo_names[opt_decrypt_algo]; - if (xtrabackup_encrypt_key) { - cmd << " --encrypt-key=" << xtrabackup_encrypt_key; - } else { - cmd << " --encrypt-key-file=" - << xtrabackup_encrypt_key_file; - } - dest_filepath[strlen(dest_filepath) - 8] = 0; - message << "decrypting"; - needs_action = true; - } - if (opt_decompress - && (ends_with(filepath, ".qp") - || (ends_with(filepath, ".qp.xbcrypt") - && opt_decrypt))) { + && ends_with(filepath, ".qp")) { cmd << " | qpress -dio "; dest_filepath[strlen(dest_filepath) - 3] = 0; if (needs_action) { @@ -1967,8 +1951,7 @@ decrypt_decompress_thread_func(void *arg) continue; } - if (!ends_with(node.filepath, ".qp") - && !ends_with(node.filepath, ".xbcrypt")) { + if (!ends_with(node.filepath, ".qp")) { continue; } diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index 6299afffc6e..2353c7692cb 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -1435,9 +1435,7 @@ write_xtrabackup_info(MYSQL *connection) "partial = %s\n" "incremental = %s\n" "format = %s\n" - "compact = %s\n" - "compressed = %s\n" - "encrypted = %s\n", + "compressed = %s\n", uuid, /* uuid */ opt_history ? opt_history : "", /* name */ tool_name, /* tool_name */ @@ -1455,9 +1453,7 @@ write_xtrabackup_info(MYSQL *connection) is_partial? "Y" : "N", xtrabackup_incremental ? "Y" : "N", /* incremental */ xb_stream_name[xtrabackup_stream_fmt], /* format */ - "N", /* compact */ - xtrabackup_compress ? "compressed" : "N", /* compressed */ - xtrabackup_encrypt ? "Y" : "N"); /* encrypted */ + xtrabackup_compress ? "compressed" : "N"); /* compressed */ if (!opt_history) { goto cleanup; @@ -1483,9 +1479,7 @@ write_xtrabackup_info(MYSQL *connection) "partial ENUM('Y', 'N') DEFAULT NULL," "incremental ENUM('Y', 'N') DEFAULT NULL," "format ENUM('file', 'tar', 'xbstream') DEFAULT NULL," - "compact ENUM('Y', 'N') DEFAULT NULL," - "compressed ENUM('Y', 'N') DEFAULT NULL," - "encrypted ENUM('Y', 'N') DEFAULT NULL" + "compressed ENUM('Y', 'N') DEFAULT NULL" ") CHARACTER SET utf8 ENGINE=innodb", false); @@ -1495,8 +1489,8 @@ write_xtrabackup_info(MYSQL *connection) << "uuid, name, tool_name, tool_command, tool_version," << "ibbackup_version, server_version, start_time, end_time," << "lock_time, binlog_pos, innodb_from_lsn, innodb_to_lsn," - << "partial, incremental, format, compact, compressed, " - << "encrypted) values(" + << "partial, incremental, format, compressed) " + << "values(" << escape_and_quote(connection, uuid) << "," << escape_and_quote(connection, opt_history) << "," << escape_and_quote(connection, tool_name) << "," @@ -1513,9 +1507,7 @@ write_xtrabackup_info(MYSQL *connection) << ESCAPE_BOOL(is_partial) << "," << ESCAPE_BOOL(xtrabackup_incremental)<< "," << escape_and_quote(connection,xb_stream_name[xtrabackup_stream_fmt]) <<"," - << ESCAPE_BOOL(false) << "," - << ESCAPE_BOOL(xtrabackup_compress) << "," - << ESCAPE_BOOL(xtrabackup_encrypt) <<")"; + << ESCAPE_BOOL(xtrabackup_compress) << ")"; xb_mysql_query(mysql_connection, oss.str().c_str(), false); @@ -1581,14 +1573,6 @@ char *make_argv(char *buf, size_t len, int argc, char **argv) if (strncmp(*argv, "--password", strlen("--password")) == 0) { arg = "--password=..."; } - if (strncmp(*argv, "--encrypt-key", - strlen("--encrypt-key")) == 0) { - arg = "--encrypt-key=..."; - } - if (strncmp(*argv, "--encrypt_key", - strlen("--encrypt_key")) == 0) { - arg = "--encrypt_key=..."; - } left-= ut_snprintf(buf + len - left, left, "%s%c", arg, argc > 1 ? ' ' : 0); ++argv; --argc; diff --git a/extra/mariabackup/changed_page_bitmap.cc b/extra/mariabackup/changed_page_bitmap.cc index 435b7fb6172..86a873ef69c 100644 --- a/extra/mariabackup/changed_page_bitmap.cc +++ b/extra/mariabackup/changed_page_bitmap.cc @@ -35,7 +35,7 @@ Remove these on the first opportunity, i.e. single-binary XtraBackup. */ /** Single bitmap file information */ struct log_online_bitmap_file_t { char name[FN_REFLEN]; /*!< Name with full path */ - os_file_t file; /*!< Handle to opened file */ + pfs_os_file_t file; /*!< Handle to opened file */ ib_uint64_t size; /*!< Size of the file */ ib_uint64_t offset; /*!< Offset of the next read, or count of already-read bytes diff --git a/extra/mariabackup/crc/CMakeLists.txt b/extra/mariabackup/crc/CMakeLists.txt index 577cab6080c..91758cdf520 100644 --- a/extra/mariabackup/crc/CMakeLists.txt +++ b/extra/mariabackup/crc/CMakeLists.txt @@ -30,4 +30,4 @@ ENDIF() IF(HAVE_CLMUL_INSTRUCTION) ADD_DEFINITIONS(-DHAVE_CLMUL_INSTRUCTION) ENDIF() -ADD_LIBRARY(crc crc_glue.c crc-intel-pclmul.c) +ADD_LIBRARY(crc STATIC crc_glue.c crc-intel-pclmul.c) diff --git a/extra/mariabackup/crc/crc_glue.c b/extra/mariabackup/crc/crc_glue.c index ae3fa91c1b0..c301cb01e2e 100644 --- a/extra/mariabackup/crc/crc_glue.c +++ b/extra/mariabackup/crc/crc_glue.c @@ -17,7 +17,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *******************************************************/ - +#include "my_config.h" #include "crc_glue.h" #include "crc-intel-pclmul.h" #include <stdint.h> diff --git a/extra/mariabackup/ds_decrypt.c b/extra/mariabackup/ds_decrypt.c deleted file mode 100644 index e897ca101e5..00000000000 --- a/extra/mariabackup/ds_decrypt.c +++ /dev/null @@ -1,665 +0,0 @@ -/****************************************************** -Copyright (c) 2017 Percona LLC and/or its affiliates. - -Encryption datasink implementation for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - - -#include <my_base.h> -#include "common.h" -#include "datasink.h" -#include "xbcrypt.h" -#include "xbcrypt_common.h" -#include "crc_glue.h" - -typedef struct { - pthread_t id; - uint num; - pthread_mutex_t ctrl_mutex; - pthread_cond_t ctrl_cond; - pthread_mutex_t data_mutex; - pthread_cond_t data_cond; - my_bool started; - my_bool data_avail; - my_bool cancelled; - my_bool failed; - const uchar *from; - size_t from_len; - uchar *to; - size_t to_len; - size_t to_size; - const uchar *iv; - size_t iv_len; - unsigned long long offset; - my_bool hash_appended; - gcry_cipher_hd_t cipher_handle; - xb_rcrypt_result_t parse_result; -} crypt_thread_ctxt_t; - -typedef struct { - crypt_thread_ctxt_t *threads; - uint nthreads; - int encrypt_algo; - size_t chunk_size; - char *encrypt_key; - char *encrypt_key_file; -} ds_decrypt_ctxt_t; - -typedef struct { - ds_decrypt_ctxt_t *crypt_ctxt; - size_t bytes_processed; - ds_file_t *dest_file; - uchar *buf; - size_t buf_len; - size_t buf_size; -} ds_decrypt_file_t; - -int ds_decrypt_encrypt_threads = 1; - -static ds_ctxt_t *decrypt_init(const char *root); -static ds_file_t *decrypt_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); -static int decrypt_write(ds_file_t *file, const void *buf, size_t len); -static int decrypt_close(ds_file_t *file); -static void decrypt_deinit(ds_ctxt_t *ctxt); - -datasink_t datasink_decrypt = { - &decrypt_init, - &decrypt_open, - &decrypt_write, - &decrypt_close, - &decrypt_deinit -}; - -static crypt_thread_ctxt_t *create_worker_threads(uint n); -static void destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n); -static void *decrypt_worker_thread_func(void *arg); - -static -ds_ctxt_t * -decrypt_init(const char *root) -{ - ds_ctxt_t *ctxt; - ds_decrypt_ctxt_t *decrypt_ctxt; - crypt_thread_ctxt_t *threads; - - if (xb_crypt_init(NULL)) { - return NULL; - } - - /* Create and initialize the worker threads */ - threads = create_worker_threads(ds_decrypt_encrypt_threads); - if (threads == NULL) { - msg("decrypt: failed to create worker threads.\n"); - return NULL; - } - - ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) + - sizeof(ds_decrypt_ctxt_t), - MYF(MY_FAE)); - - decrypt_ctxt = (ds_decrypt_ctxt_t *) (ctxt + 1); - decrypt_ctxt->threads = threads; - decrypt_ctxt->nthreads = ds_decrypt_encrypt_threads; - - ctxt->ptr = decrypt_ctxt; - ctxt->root = my_strdup(root, MYF(MY_FAE)); - - return ctxt; -} - -static -ds_file_t * -decrypt_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) -{ - ds_ctxt_t *dest_ctxt; - - ds_decrypt_ctxt_t *crypt_ctxt; - ds_decrypt_file_t *crypt_file; - - char new_name[FN_REFLEN]; - ds_file_t *file; - - xb_ad(ctxt->pipe_ctxt != NULL); - dest_ctxt = ctxt->pipe_ctxt; - - crypt_ctxt = (ds_decrypt_ctxt_t *) ctxt->ptr; - - - file = (ds_file_t *) my_malloc(sizeof(ds_file_t) + - sizeof(ds_decrypt_file_t), - MYF(MY_FAE|MY_ZEROFILL)); - - crypt_file = (ds_decrypt_file_t *) (file + 1); - - /* Remove the .xbcrypt extension from the filename */ - strncpy(new_name, path, FN_REFLEN); - new_name[strlen(new_name) - 8] = 0; - crypt_file->dest_file = ds_open(dest_ctxt, new_name, mystat); - if (crypt_file->dest_file == NULL) { - msg("decrypt: ds_open(\"%s\") failed.\n", new_name); - goto err; - } - - crypt_file->crypt_ctxt = crypt_ctxt; - crypt_file->buf = NULL; - crypt_file->buf_size = 0; - crypt_file->buf_len = 0; - - file->ptr = crypt_file; - file->path = crypt_file->dest_file->path; - - return file; - -err: - if (crypt_file->dest_file) { - ds_close(crypt_file->dest_file); - } - my_free(file); - return NULL; -} - -#define CHECK_BUF_SIZE(ptr, size, buf, len) \ - if (ptr + size - buf > (ssize_t) len) { \ - result = XB_CRYPT_READ_INCOMPLETE; \ - goto exit; \ - } - -static -xb_rcrypt_result_t -parse_xbcrypt_chunk(crypt_thread_ctxt_t *thd, const uchar *buf, size_t len, - size_t *bytes_processed) -{ - const uchar *ptr; - uint version; - ulong checksum, checksum_exp; - ulonglong tmp; - xb_rcrypt_result_t result = XB_CRYPT_READ_CHUNK; - - *bytes_processed = 0; - ptr = buf; - - CHECK_BUF_SIZE(ptr, XB_CRYPT_CHUNK_MAGIC_SIZE, buf, len); - if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC3, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 3; - } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC2, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 2; - } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC1, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 1; - } else { - msg("%s:%s: wrong chunk magic at offset 0x%llx.\n", - my_progname, __FUNCTION__, thd->offset); - result = XB_CRYPT_READ_ERROR; - goto exit; - } - - ptr += XB_CRYPT_CHUNK_MAGIC_SIZE; - thd->offset += XB_CRYPT_CHUNK_MAGIC_SIZE; - - CHECK_BUF_SIZE(ptr, 8, buf, len); - tmp = uint8korr(ptr); /* reserved */ - ptr += 8; - thd->offset += 8; - - CHECK_BUF_SIZE(ptr, 8, buf, len); - tmp = uint8korr(ptr); /* original size */ - ptr += 8; - if (tmp > INT_MAX) { - msg("%s:%s: invalid original size at offset 0x%llx.\n", - my_progname, __FUNCTION__, thd->offset); - result = XB_CRYPT_READ_ERROR; - goto exit; - } - thd->offset += 8; - thd->to_len = (size_t)tmp; - - if (thd->to_size < thd->to_len + XB_CRYPT_HASH_LEN) { - thd->to = (uchar *) my_realloc( - thd->to, - thd->to_len + XB_CRYPT_HASH_LEN, - MYF(MY_FAE | MY_ALLOW_ZERO_PTR)); - thd->to_size = thd->to_len; - } - - CHECK_BUF_SIZE(ptr, 8, buf, len); - tmp = uint8korr(ptr); /* encrypted size */ - ptr += 8; - if (tmp > INT_MAX) { - msg("%s:%s: invalid encrypted size at offset 0x%llx.\n", - my_progname, __FUNCTION__, thd->offset); - result = XB_CRYPT_READ_ERROR; - goto exit; - } - thd->offset += 8; - thd->from_len = (size_t)tmp; - - xb_a(thd->from_len <= thd->to_len + XB_CRYPT_HASH_LEN); - - CHECK_BUF_SIZE(ptr, 4, buf, len); - checksum_exp = uint4korr(ptr); /* checksum */ - ptr += 4; - thd->offset += 4; - - /* iv size */ - if (version == 1) { - thd->iv_len = 0; - thd->iv = NULL; - } else { - CHECK_BUF_SIZE(ptr, 8, buf, len); - - tmp = uint8korr(ptr); - if (tmp > INT_MAX) { - msg("%s:%s: invalid iv size at offset 0x%llx.\n", - my_progname, __FUNCTION__, thd->offset); - result = XB_CRYPT_READ_ERROR; - goto exit; - } - ptr += 8; - thd->offset += 8; - thd->iv_len = (size_t)tmp; - } - - if (thd->iv_len > 0) { - CHECK_BUF_SIZE(ptr, thd->iv_len, buf, len); - thd->iv = ptr; - ptr += thd->iv_len; - } - - /* for version euqals 2 we need to read in the iv data but do not init - CTR with it */ - if (version == 2) { - thd->iv_len = 0; - thd->iv = 0; - } - - if (thd->from_len > 0) { - CHECK_BUF_SIZE(ptr, thd->from_len, buf, len); - thd->from = ptr; - ptr += thd->from_len; - } - - xb_ad(thd->from_len <= thd->to_len); - - checksum = crc32_iso3309(0, thd->from, thd->from_len); - if (checksum != checksum_exp) { - msg("%s:%s invalid checksum at offset 0x%llx, " - "expected 0x%lx, actual 0x%lx.\n", my_progname, - __FUNCTION__, thd->offset, checksum_exp, checksum); - result = XB_CRYPT_READ_ERROR; - goto exit; - } - - thd->offset += thd->from_len; - - thd->hash_appended = version > 2; - -exit: - - *bytes_processed = (size_t) (ptr - buf); - - return result; -} - -static -int -decrypt_write(ds_file_t *file, const void *buf, size_t len) -{ - ds_decrypt_file_t *crypt_file; - ds_decrypt_ctxt_t *crypt_ctxt; - crypt_thread_ctxt_t *threads; - crypt_thread_ctxt_t *thd; - uint nthreads; - uint i; - size_t bytes_processed; - xb_rcrypt_result_t parse_result = XB_CRYPT_READ_CHUNK; - my_bool err = FALSE; - - crypt_file = (ds_decrypt_file_t *) file->ptr; - crypt_ctxt = crypt_file->crypt_ctxt; - - threads = crypt_ctxt->threads; - nthreads = crypt_ctxt->nthreads; - - if (crypt_file->buf_len > 0) { - thd = threads; - - pthread_mutex_lock(&thd->ctrl_mutex); - - do { - if (parse_result == XB_CRYPT_READ_INCOMPLETE) { - crypt_file->buf_size = crypt_file->buf_size * 2; - crypt_file->buf = (uchar *) my_realloc( - crypt_file->buf, - crypt_file->buf_size, - MYF(MY_FAE|MY_ALLOW_ZERO_PTR)); - } - - memcpy(crypt_file->buf + crypt_file->buf_len, - buf, MY_MIN(crypt_file->buf_size - - crypt_file->buf_len, len)); - - parse_result = parse_xbcrypt_chunk( - thd, crypt_file->buf, - crypt_file->buf_size, &bytes_processed); - - if (parse_result == XB_CRYPT_READ_ERROR) { - pthread_mutex_unlock(&thd->ctrl_mutex); - return 1; - } - - } while (parse_result == XB_CRYPT_READ_INCOMPLETE && - crypt_file->buf_size < len); - - if (parse_result != XB_CRYPT_READ_CHUNK) { - msg("decrypt: incomplete data.\n"); - pthread_mutex_unlock(&thd->ctrl_mutex); - return 1; - } - - pthread_mutex_lock(&thd->data_mutex); - thd->data_avail = TRUE; - pthread_cond_signal(&thd->data_cond); - pthread_mutex_unlock(&thd->data_mutex); - - len -= bytes_processed - crypt_file->buf_len; - buf += bytes_processed - crypt_file->buf_len; - - /* reap */ - - pthread_mutex_lock(&thd->data_mutex); - while (thd->data_avail == TRUE) { - pthread_cond_wait(&thd->data_cond, - &thd->data_mutex); - } - - if (thd->failed) { - msg("decrypt: failed to decrypt chunk.\n"); - err = TRUE; - } - - xb_a(thd->to_len > 0); - - if (!err && - ds_write(crypt_file->dest_file, thd->to, thd->to_len)) { - msg("decrypt: write to destination failed.\n"); - err = TRUE; - } - - crypt_file->bytes_processed += thd->from_len; - - pthread_mutex_unlock(&thd->data_mutex); - pthread_mutex_unlock(&thd->ctrl_mutex); - - crypt_file->buf_len = 0; - - if (err) { - return 1; - } - } - - while (parse_result == XB_CRYPT_READ_CHUNK && len > 0) { - uint max_thread; - - for (i = 0; i < nthreads; i++) { - thd = threads + i; - - pthread_mutex_lock(&thd->ctrl_mutex); - - parse_result = parse_xbcrypt_chunk( - thd, buf, len, &bytes_processed); - - if (parse_result == XB_CRYPT_READ_ERROR) { - pthread_mutex_unlock(&thd->ctrl_mutex); - err = TRUE; - break; - } - - thd->parse_result = parse_result; - - if (parse_result != XB_CRYPT_READ_CHUNK) { - pthread_mutex_unlock(&thd->ctrl_mutex); - break; - } - - pthread_mutex_lock(&thd->data_mutex); - thd->data_avail = TRUE; - pthread_cond_signal(&thd->data_cond); - pthread_mutex_unlock(&thd->data_mutex); - - len -= bytes_processed; - buf += bytes_processed; - } - - max_thread = (i < nthreads) ? i : nthreads - 1; - - /* Reap and write decrypted data */ - for (i = 0; i <= max_thread; i++) { - thd = threads + i; - - if (thd->parse_result != XB_CRYPT_READ_CHUNK) { - break; - } - - pthread_mutex_lock(&thd->data_mutex); - while (thd->data_avail == TRUE) { - pthread_cond_wait(&thd->data_cond, - &thd->data_mutex); - } - - if (thd->failed) { - msg("decrypt: failed to decrypt chunk.\n"); - err = TRUE; - } - - xb_a(thd->to_len > 0); - - if (!err && ds_write(crypt_file->dest_file, thd->to, - thd->to_len)) { - msg("decrypt: write to destination failed.\n"); - err = TRUE; - } - - crypt_file->bytes_processed += thd->from_len; - - pthread_mutex_unlock(&thd->data_mutex); - pthread_mutex_unlock(&thd->ctrl_mutex); - } - - if (err) { - return 1; - } - } - - if (parse_result == XB_CRYPT_READ_INCOMPLETE && len > 0) { - crypt_file->buf_len = len; - if (crypt_file->buf_size < len) { - crypt_file->buf = (uchar *) my_realloc( - crypt_file->buf, - crypt_file->buf_len, - MYF(MY_FAE | MY_ALLOW_ZERO_PTR)); - crypt_file->buf_size = len; - } - memcpy(crypt_file->buf, buf, len); - } - - return 0; -} - -static -int -decrypt_close(ds_file_t *file) -{ - ds_decrypt_file_t *crypt_file; - ds_file_t *dest_file; - int rc = 0; - - crypt_file = (ds_decrypt_file_t *) file->ptr; - dest_file = crypt_file->dest_file; - - if (ds_close(dest_file)) { - rc = 1; - } - - my_free(crypt_file->buf); - my_free(file); - - return rc; -} - -static -void -decrypt_deinit(ds_ctxt_t *ctxt) -{ - ds_decrypt_ctxt_t *crypt_ctxt; - - xb_ad(ctxt->pipe_ctxt != NULL); - - crypt_ctxt = (ds_decrypt_ctxt_t *) ctxt->ptr; - - destroy_worker_threads(crypt_ctxt->threads, crypt_ctxt->nthreads); - - my_free(ctxt->root); - my_free(ctxt); -} - -static -crypt_thread_ctxt_t * -create_worker_threads(uint n) -{ - crypt_thread_ctxt_t *threads; - uint i; - - threads = (crypt_thread_ctxt_t *) - my_malloc(sizeof(crypt_thread_ctxt_t) * n, - MYF(MY_FAE | MY_ZEROFILL)); - - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - thd->num = i + 1; - - /* Initialize the control mutex and condition var */ - if (pthread_mutex_init(&thd->ctrl_mutex, NULL) || - pthread_cond_init(&thd->ctrl_cond, NULL)) { - goto err; - } - - /* Initialize and data mutex and condition var */ - if (pthread_mutex_init(&thd->data_mutex, NULL) || - pthread_cond_init(&thd->data_cond, NULL)) { - goto err; - } - - xb_crypt_cipher_open(&thd->cipher_handle); - - pthread_mutex_lock(&thd->ctrl_mutex); - - if (pthread_create(&thd->id, NULL, decrypt_worker_thread_func, - thd)) { - msg("decrypt: pthread_create() failed: " - "errno = %d\n", errno); - goto err; - } - } - - /* Wait for the threads to start */ - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - while (thd->started == FALSE) - pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex); - pthread_mutex_unlock(&thd->ctrl_mutex); - } - - return threads; - -err: - return NULL; -} - -static -void -destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n) -{ - uint i; - - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - pthread_mutex_lock(&thd->data_mutex); - threads[i].cancelled = TRUE; - pthread_cond_signal(&thd->data_cond); - pthread_mutex_unlock(&thd->data_mutex); - - pthread_join(thd->id, NULL); - - pthread_cond_destroy(&thd->data_cond); - pthread_mutex_destroy(&thd->data_mutex); - pthread_cond_destroy(&thd->ctrl_cond); - pthread_mutex_destroy(&thd->ctrl_mutex); - - xb_crypt_cipher_close(thd->cipher_handle); - - my_free(thd->to); - } - - my_free(threads); -} - -static -void * -decrypt_worker_thread_func(void *arg) -{ - crypt_thread_ctxt_t *thd = (crypt_thread_ctxt_t *) arg; - - pthread_mutex_lock(&thd->ctrl_mutex); - - pthread_mutex_lock(&thd->data_mutex); - - thd->started = TRUE; - pthread_cond_signal(&thd->ctrl_cond); - - pthread_mutex_unlock(&thd->ctrl_mutex); - - while (1) { - thd->data_avail = FALSE; - pthread_cond_signal(&thd->data_cond); - - while (!thd->data_avail && !thd->cancelled) { - pthread_cond_wait(&thd->data_cond, &thd->data_mutex); - } - - if (thd->cancelled) - break; - - if (xb_crypt_decrypt(thd->cipher_handle, thd->from, - thd->from_len, thd->to, &thd->to_len, - thd->iv, thd->iv_len, - thd->hash_appended)) { - thd->failed = TRUE; - continue; - } - - } - - pthread_mutex_unlock(&thd->data_mutex); - - return NULL; -} diff --git a/extra/mariabackup/ds_decrypt.h b/extra/mariabackup/ds_decrypt.h deleted file mode 100644 index 3bb4de55f54..00000000000 --- a/extra/mariabackup/ds_decrypt.h +++ /dev/null @@ -1,30 +0,0 @@ -/****************************************************** -Copyright (c) 2017 Percona LLC and/or its affiliates. - -Encryption interface for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#ifndef DS_DECRYPT_H -#define DS_DECRYPT_H - -#include "datasink.h" - -extern datasink_t datasink_decrypt; - -extern int ds_decrypt_encrypt_threads; - -#endif diff --git a/extra/mariabackup/ds_encrypt.c b/extra/mariabackup/ds_encrypt.c deleted file mode 100644 index 576ea207eb1..00000000000 --- a/extra/mariabackup/ds_encrypt.c +++ /dev/null @@ -1,446 +0,0 @@ -/****************************************************** -Copyright (c) 2013 Percona LLC and/or its affiliates. - -Encryption datasink implementation for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - - -#include <my_base.h> -#include "common.h" -#include "datasink.h" -#include "xbcrypt_common.h" -#ifdef HAVE_GRYPT -#include "xbcrypt.h" - -#define XB_CRYPT_CHUNK_SIZE ((size_t) (ds_encrypt_encrypt_chunk_size)) - -typedef struct { - pthread_t id; - uint num; - pthread_mutex_t ctrl_mutex; - pthread_cond_t ctrl_cond; - pthread_mutex_t data_mutex; - pthread_cond_t data_cond; - my_bool started; - my_bool data_avail; - my_bool cancelled; - const uchar *from; - size_t from_len; - uchar *to; - uchar *iv; - size_t to_len; - gcry_cipher_hd_t cipher_handle; -} crypt_thread_ctxt_t; - -typedef struct { - crypt_thread_ctxt_t *threads; - uint nthreads; -} ds_encrypt_ctxt_t; - -typedef struct { - xb_wcrypt_t *xbcrypt_file; - ds_encrypt_ctxt_t *crypt_ctxt; - size_t bytes_processed; - ds_file_t *dest_file; -} ds_encrypt_file_t; - -/* Encryption options */ -uint ds_encrypt_encrypt_threads; -ulonglong ds_encrypt_encrypt_chunk_size; - -static ds_ctxt_t *encrypt_init(const char *root); -static ds_file_t *encrypt_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); -static int encrypt_write(ds_file_t *file, const void *buf, size_t len); -static int encrypt_close(ds_file_t *file); -static void encrypt_deinit(ds_ctxt_t *ctxt); - -datasink_t datasink_encrypt = { - &encrypt_init, - &encrypt_open, - &encrypt_write, - &encrypt_close, - &encrypt_deinit -}; - -static crypt_thread_ctxt_t *create_worker_threads(uint n); -static void destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n); -static void *encrypt_worker_thread_func(void *arg); - -static uint encrypt_iv_len = 0; - -static -ssize_t -my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len) -{ - ds_encrypt_file_t *encrypt_file; - - encrypt_file = (ds_encrypt_file_t *) userdata; - - xb_ad(encrypt_file != NULL); - xb_ad(encrypt_file->dest_file != NULL); - - if (!ds_write(encrypt_file->dest_file, buf, len)) { - return len; - } - return -1; -} - -static -ds_ctxt_t * -encrypt_init(const char *root) -{ - ds_ctxt_t *ctxt; - ds_encrypt_ctxt_t *encrypt_ctxt; - crypt_thread_ctxt_t *threads; - - if (xb_crypt_init(&encrypt_iv_len)) { - return NULL; - } - - /* Create and initialize the worker threads */ - threads = create_worker_threads(ds_encrypt_encrypt_threads); - if (threads == NULL) { - msg("encrypt: failed to create worker threads.\n"); - return NULL; - } - - ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) + - sizeof(ds_encrypt_ctxt_t), - MYF(MY_FAE)); - - encrypt_ctxt = (ds_encrypt_ctxt_t *) (ctxt + 1); - encrypt_ctxt->threads = threads; - encrypt_ctxt->nthreads = ds_encrypt_encrypt_threads; - - ctxt->ptr = encrypt_ctxt; - ctxt->root = my_strdup(root, MYF(MY_FAE)); - - return ctxt; -} - -static -ds_file_t * -encrypt_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) -{ - ds_ctxt_t *dest_ctxt; - - ds_encrypt_ctxt_t *crypt_ctxt; - ds_encrypt_file_t *crypt_file; - - char new_name[FN_REFLEN]; - ds_file_t *file; - - xb_ad(ctxt->pipe_ctxt != NULL); - dest_ctxt = ctxt->pipe_ctxt; - - crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr; - - - file = (ds_file_t *) my_malloc(sizeof(ds_file_t) + - sizeof(ds_encrypt_file_t), - MYF(MY_FAE|MY_ZEROFILL)); - - crypt_file = (ds_encrypt_file_t *) (file + 1); - - /* Append the .xbcrypt extension to the filename */ - fn_format(new_name, path, "", ".xbcrypt", MYF(MY_APPEND_EXT)); - crypt_file->dest_file = ds_open(dest_ctxt, new_name, mystat); - if (crypt_file->dest_file == NULL) { - msg("encrypt: ds_open(\"%s\") failed.\n", new_name); - goto err; - } - - crypt_file->crypt_ctxt = crypt_ctxt; - crypt_file->xbcrypt_file = xb_crypt_write_open(crypt_file, - my_xb_crypt_write_callback); - - if (crypt_file->xbcrypt_file == NULL) { - msg("encrypt: xb_crypt_write_open() failed.\n"); - goto err; - } - - - file->ptr = crypt_file; - file->path = crypt_file->dest_file->path; - - return file; - -err: - if (crypt_file->dest_file) { - ds_close(crypt_file->dest_file); - } - my_free(file); - return NULL; -} - -static -int -encrypt_write(ds_file_t *file, const void *buf, size_t len) -{ - ds_encrypt_file_t *crypt_file; - ds_encrypt_ctxt_t *crypt_ctxt; - crypt_thread_ctxt_t *threads; - crypt_thread_ctxt_t *thd; - uint nthreads; - uint i; - const uchar *ptr; - - crypt_file = (ds_encrypt_file_t *) file->ptr; - crypt_ctxt = crypt_file->crypt_ctxt; - - threads = crypt_ctxt->threads; - nthreads = crypt_ctxt->nthreads; - - ptr = (const uchar *) buf; - while (len > 0) { - uint max_thread; - - /* Send data to worker threads for encryption */ - for (i = 0; i < nthreads; i++) { - size_t chunk_len; - - thd = threads + i; - - pthread_mutex_lock(&thd->ctrl_mutex); - - chunk_len = (len > XB_CRYPT_CHUNK_SIZE) ? - XB_CRYPT_CHUNK_SIZE : len; - thd->from = ptr; - thd->from_len = chunk_len; - - pthread_mutex_lock(&thd->data_mutex); - thd->data_avail = TRUE; - pthread_cond_signal(&thd->data_cond); - pthread_mutex_unlock(&thd->data_mutex); - - len -= chunk_len; - if (len == 0) { - break; - } - ptr += chunk_len; - } - - max_thread = (i < nthreads) ? i : nthreads - 1; - - /* Reap and stream the encrypted data */ - for (i = 0; i <= max_thread; i++) { - thd = threads + i; - - pthread_mutex_lock(&thd->data_mutex); - while (thd->data_avail == TRUE) { - pthread_cond_wait(&thd->data_cond, - &thd->data_mutex); - } - - xb_a(threads[i].to_len > 0); - - if (xb_crypt_write_chunk(crypt_file->xbcrypt_file, - threads[i].to, - threads[i].from_len + - XB_CRYPT_HASH_LEN, - threads[i].to_len, - threads[i].iv, - encrypt_iv_len)) { - msg("encrypt: write to the destination file " - "failed.\n"); - return 1; - } - - crypt_file->bytes_processed += threads[i].from_len; - - pthread_mutex_unlock(&threads[i].data_mutex); - pthread_mutex_unlock(&threads[i].ctrl_mutex); - } - } - - return 0; -} - -static -int -encrypt_close(ds_file_t *file) -{ - ds_encrypt_file_t *crypt_file; - ds_file_t *dest_file; - int rc = 0; - - crypt_file = (ds_encrypt_file_t *) file->ptr; - dest_file = crypt_file->dest_file; - - rc = xb_crypt_write_close(crypt_file->xbcrypt_file); - - if (ds_close(dest_file)) { - rc = 1; - } - - my_free(file); - - return rc; -} - -static -void -encrypt_deinit(ds_ctxt_t *ctxt) -{ - ds_encrypt_ctxt_t *crypt_ctxt; - - xb_ad(ctxt->pipe_ctxt != NULL); - - crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr; - - destroy_worker_threads(crypt_ctxt->threads, crypt_ctxt->nthreads); - - my_free(ctxt->root); - my_free(ctxt); -} - -static -crypt_thread_ctxt_t * -create_worker_threads(uint n) -{ - crypt_thread_ctxt_t *threads; - uint i; - - threads = (crypt_thread_ctxt_t *) - my_malloc(sizeof(crypt_thread_ctxt_t) * n, MYF(MY_FAE)); - - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - thd->num = i + 1; - thd->started = FALSE; - thd->cancelled = FALSE; - thd->data_avail = FALSE; - - thd->to = (uchar *) my_malloc(XB_CRYPT_CHUNK_SIZE + - XB_CRYPT_HASH_LEN, MYF(MY_FAE)); - - thd->iv = (uchar *) my_malloc(encrypt_iv_len, MYF(MY_FAE)); - - /* Initialize the control mutex and condition var */ - if (pthread_mutex_init(&thd->ctrl_mutex, NULL) || - pthread_cond_init(&thd->ctrl_cond, NULL)) { - goto err; - } - - /* Initialize and data mutex and condition var */ - if (pthread_mutex_init(&thd->data_mutex, NULL) || - pthread_cond_init(&thd->data_cond, NULL)) { - goto err; - } - - if (xb_crypt_cipher_open(&thd->cipher_handle)) { - goto err; - } - - pthread_mutex_lock(&thd->ctrl_mutex); - - if (pthread_create(&thd->id, NULL, encrypt_worker_thread_func, - thd)) { - msg("encrypt: pthread_create() failed: " - "errno = %d\n", errno); - goto err; - } - } - - /* Wait for the threads to start */ - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - while (thd->started == FALSE) - pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex); - pthread_mutex_unlock(&thd->ctrl_mutex); - } - - return threads; - -err: - return NULL; -} - -static -void -destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n) -{ - uint i; - - for (i = 0; i < n; i++) { - crypt_thread_ctxt_t *thd = threads + i; - - pthread_mutex_lock(&thd->data_mutex); - threads[i].cancelled = TRUE; - pthread_cond_signal(&thd->data_cond); - pthread_mutex_unlock(&thd->data_mutex); - - pthread_join(thd->id, NULL); - - pthread_cond_destroy(&thd->data_cond); - pthread_mutex_destroy(&thd->data_mutex); - pthread_cond_destroy(&thd->ctrl_cond); - pthread_mutex_destroy(&thd->ctrl_mutex); - - xb_crypt_cipher_close(thd->cipher_handle); - - my_free(thd->to); - my_free(thd->iv); - } - - my_free(threads); -} - -static -void * -encrypt_worker_thread_func(void *arg) -{ - crypt_thread_ctxt_t *thd = (crypt_thread_ctxt_t *) arg; - - pthread_mutex_lock(&thd->ctrl_mutex); - - pthread_mutex_lock(&thd->data_mutex); - - thd->started = TRUE; - pthread_cond_signal(&thd->ctrl_cond); - - pthread_mutex_unlock(&thd->ctrl_mutex); - - while (1) { - thd->data_avail = FALSE; - pthread_cond_signal(&thd->data_cond); - - while (!thd->data_avail && !thd->cancelled) { - pthread_cond_wait(&thd->data_cond, &thd->data_mutex); - } - - if (thd->cancelled) - break; - - thd->to_len = thd->from_len; - - if (xb_crypt_encrypt(thd->cipher_handle, thd->from, - thd->from_len, thd->to, &thd->to_len, - thd->iv)) { - thd->to_len = 0; - continue; - } - } - - pthread_mutex_unlock(&thd->data_mutex); - - return NULL; -} -#endif /* HAVE_GCRYPT*/ diff --git a/extra/mariabackup/ds_encrypt.h b/extra/mariabackup/ds_encrypt.h deleted file mode 100644 index c4d8d7f8427..00000000000 --- a/extra/mariabackup/ds_encrypt.h +++ /dev/null @@ -1,33 +0,0 @@ -/****************************************************** -Copyright (c) 2013 Percona LLC and/or its affiliates. - -Encryption interface for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#ifndef DS_ENCRYPT_H -#define DS_ENCRYPT_H - -#include "datasink.h" -#ifdef HAVE_GCRYPT -extern datasink_t datasink_encrypt; -#endif -/* Encryption options */ -extern uint ds_encrypt_encrypt_threads; -extern ulonglong ds_encrypt_encrypt_chunk_size; - - -#endif diff --git a/extra/mariabackup/encryption_plugin.cc b/extra/mariabackup/encryption_plugin.cc index 9f2782d89a1..8f7741b057a 100644 --- a/extra/mariabackup/encryption_plugin.cc +++ b/extra/mariabackup/encryption_plugin.cc @@ -126,7 +126,6 @@ void encryption_plugin_prepare_init(int argc, char **argv) if (!xb_plugin_load) { - /* This prevents crashes e.g in --stats with wrong my.cnf*/ finalize_encryption_plugin(0); return; } diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h index 88239efd2bb..f3601823a5a 100644 --- a/extra/mariabackup/fil_cur.h +++ b/extra/mariabackup/fil_cur.h @@ -29,7 +29,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "read_filt.h" struct xb_fil_cur_t { - os_file_t file; /*!< source file handle */ + pfs_os_file_t file; /*!< source file handle */ fil_node_t* node; /*!< source tablespace node */ char rel_path[FN_REFLEN]; /*!< normalized file path */ diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc index 59fb8fb5565..7e97b258489 100644 --- a/extra/mariabackup/innobackupex.cc +++ b/extra/mariabackup/innobackupex.cc @@ -101,8 +101,6 @@ char *opt_ibx_login_path = NULL; ulong opt_ibx_lock_wait_query_type; ulong opt_ibx_kill_long_query_type; -ulong opt_ibx_decrypt_algo = 0; - uint opt_ibx_kill_long_queries_timeout = 0; uint opt_ibx_lock_wait_timeout = 0; uint opt_ibx_lock_wait_threshold = 0; @@ -110,7 +108,6 @@ uint opt_ibx_debug_sleep_before_unlock = 0; uint opt_ibx_safe_slave_backup_timeout = 0; const char *opt_ibx_history = NULL; -bool opt_ibx_decrypt = false; char *opt_ibx_include = NULL; char *opt_ibx_databases = NULL; @@ -121,15 +118,9 @@ char *ibx_backup_directory = NULL; /* copy of proxied xtrabackup options */ my_bool ibx_xb_close_files; -my_bool ibx_xtrabackup_compact; const char *ibx_xtrabackup_compress_alg; uint ibx_xtrabackup_compress_threads; ulonglong ibx_xtrabackup_compress_chunk_size; -ulong ibx_xtrabackup_encrypt_algo; -char *ibx_xtrabackup_encrypt_key; -char *ibx_xtrabackup_encrypt_key_file; -uint ibx_xtrabackup_encrypt_threads; -ulonglong ibx_xtrabackup_encrypt_chunk_size; my_bool ibx_xtrabackup_export; char *ibx_xtrabackup_extra_lsndir; char *ibx_xtrabackup_incremental_basedir; @@ -138,8 +129,6 @@ my_bool ibx_xtrabackup_incremental_force_scan; ulint ibx_xtrabackup_log_copy_interval; char *ibx_xtrabackup_incremental; int ibx_xtrabackup_parallel; -my_bool ibx_xtrabackup_rebuild_indexes; -ulint ibx_xtrabackup_rebuild_threads; char *ibx_xtrabackup_stream_str; char *ibx_xtrabackup_tables_file; long ibx_xtrabackup_throttle; @@ -201,7 +190,6 @@ enum innobackupex_options OPT_NO_VERSION_CHECK, OPT_NO_BACKUP_LOCKS, OPT_DATABASES, - OPT_DECRYPT, OPT_DECOMPRESS, /* options wich are passed directly to xtrabackup */ @@ -210,11 +198,6 @@ enum innobackupex_options OPT_COMPRESS, OPT_COMPRESS_THREADS, OPT_COMPRESS_CHUNK_SIZE, - OPT_ENCRYPT, - OPT_ENCRYPT_KEY, - OPT_ENCRYPT_KEY_FILE, - OPT_ENCRYPT_THREADS, - OPT_ENCRYPT_CHUNK_SIZE, OPT_EXPORT, OPT_EXTRA_LSNDIR, OPT_INCREMENTAL_BASEDIR, @@ -430,12 +413,6 @@ static struct my_option ibx_long_options[] = (uchar*) &opt_ibx_incremental_history_uuid, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"decrypt", OPT_DECRYPT, "Decrypts all files with the .xbcrypt " - "extension in a backup previously made with --encrypt option.", - &opt_ibx_decrypt_algo, &opt_ibx_decrypt_algo, - &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, - {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE, "This option specifies which types of queries are allowed to complete " "before innobackupex will issue the global lock. Default is all.", @@ -533,12 +510,6 @@ static struct my_option ibx_long_options[] = (uchar*) &ibx_xb_close_files, (uchar*) &ibx_xb_close_files, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"compact", OPT_COMPACT, "Create a compact backup with all secondary " - "index pages omitted. This option is passed directly to xtrabackup. " - "See xtrabackup documentation for details.", - (uchar*) &ibx_xtrabackup_compact, (uchar*) &ibx_xtrabackup_compact, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"compress", OPT_COMPRESS, "This option instructs xtrabackup to " "compress backup copies of InnoDB data files. It is passed directly " "to the xtrabackup child process. Try 'xtrabackup --help' for more " @@ -560,46 +531,6 @@ static struct my_option ibx_long_options[] = (uchar*) &ibx_xtrabackup_compress_chunk_size, 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0}, - {"encrypt", OPT_ENCRYPT, "This option instructs xtrabackup to encrypt " - "backup copies of InnoDB data files using the algorithm specified in " - "the ENCRYPTION-ALGORITHM. It is passed directly to the xtrabackup " - "child process. Try 'xtrabackup --help' for more details.", - &ibx_xtrabackup_encrypt_algo, &ibx_xtrabackup_encrypt_algo, - &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, - - {"encrypt-key", OPT_ENCRYPT_KEY, "This option instructs xtrabackup to " - "use the given ENCRYPTION-KEY when using the --encrypt or --decrypt " - "options. During backup it is passed directly to the xtrabackup child " - "process. Try 'xtrabackup --help' for more details.", - (uchar*) &ibx_xtrabackup_encrypt_key, - (uchar*) &ibx_xtrabackup_encrypt_key, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-key-file", OPT_ENCRYPT_KEY_FILE, "This option instructs " - "xtrabackup to use the encryption key stored in the given " - "ENCRYPTION-KEY-FILE when using the --encrypt or --decrypt options.", - (uchar*) &ibx_xtrabackup_encrypt_key_file, - (uchar*) &ibx_xtrabackup_encrypt_key_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-threads", OPT_ENCRYPT_THREADS, - "This option specifies the number of worker threads that will be used " - "for parallel encryption. It is passed directly to the xtrabackup " - "child process. Try 'xtrabackup --help' for more details.", - (uchar*) &ibx_xtrabackup_encrypt_threads, - (uchar*) &ibx_xtrabackup_encrypt_threads, - 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0}, - - {"encrypt-chunk-size", OPT_ENCRYPT_CHUNK_SIZE, - "This option specifies the size of the internal working buffer for " - "each encryption thread, measured in bytes. It is passed directly to " - "the xtrabackup child process. Try 'xtrabackup --help' for more " - "details.", - (uchar*) &ibx_xtrabackup_encrypt_chunk_size, - (uchar*) &ibx_xtrabackup_encrypt_chunk_size, - 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0}, - {"export", OPT_EXPORT, "This option is passed directly to xtrabackup's " "--export option. It enables exporting individual tables for import " "into another server. See the xtrabackup documentation for details.", @@ -735,8 +666,6 @@ You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0 SYNOPOSIS\n\ \n\ innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chunk-size=CHUNK-SIZE]\n\ - [--encrypt=ENCRYPTION-ALGORITHM] [--encrypt-threads=NUMBER-OF-THREADS] [--encrypt-chunk-size=CHUNK-SIZE]\n\ - [--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\ [--include=REGEXP] [--user=NAME]\n\ [--password=WORD] [--port=PORT] [--socket=SOCKET]\n\ [--no-timestamp] [--ibbackup=IBBACKUP-BINARY]\n\ @@ -748,7 +677,7 @@ innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chu [--incremental] [--incremental-basedir]\n\ [--incremental-dir] [--incremental-force-scan] [--incremental-lsn]\n\ [--incremental-history-name=NAME] [--incremental-history-uuid=UUID]\n\ - [--close-files] [--compact] \n\ + [--close-files]\n\ BACKUP-ROOT-DIR\n\ \n\ innobackupex --apply-log [--use-memory=B]\n\ @@ -760,8 +689,7 @@ innobackupex --copy-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] \n\ innobackupex --move-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\ \n\ -innobackupex [--decompress] [--decrypt=ENCRYPTION-ALGORITHM]\n\ - [--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\ +innobackupex [--decompress]\n\ [--parallel=NUMBER-OF-FORKS] BACKUP-DIR\n\ \n\ DESCRIPTION\n\ @@ -798,15 +726,12 @@ it moves files to their original locations rather than copies them. As this\n\ option removes backup files, it must be used with caution. It may be useful in\n\ cases when there is not enough free disk space to copy files.\n\ \n\ -The --decompress --decrypt command will decrypt and/or decompress a backup made\n\ -with the --compress and/or --encrypt options. When decrypting, the encryption\n\ -algorithm and key used when the backup was taken MUST be provided via the\n\ -specified options. --decrypt and --decompress may be used together at the same\n\ -time to completely normalize a previously compressed and encrypted backup. The\n\ ---parallel option will allow multiple files to be decrypted and/or decompressed\n\ +The --decompress command will decompress a backup made\n\ +with the --compress option. The\n\ +--parallel option will allow multiple files to be decompressed\n\ simultaneously. In order to decompress, the qpress utility MUST be installed\n\ and accessable within the path. This process will remove the original\n\ -compressed/encrypted files and leave the results in the same location.\n\ +compressed files and leave the results in the same location.\n\ \n\ On success the exit code innobackupex is 0. A non-zero exit code \n\ indicates an error.\n"); @@ -839,18 +764,8 @@ ibx_get_one_option(int optid, opt_ibx_history = ""; } break; - case OPT_DECRYPT: - if (argument == NULL) { - ibx_msg("Missing --decrypt argument, must specify a " - "valid encryption algorithm.\n"); - return(1); - } - opt_ibx_decrypt = true; - break; case OPT_STREAM: - if (!strcasecmp(argument, "tar")) - xtrabackup_stream_fmt = XB_STREAM_FMT_TAR; - else if (!strcasecmp(argument, "xbstream")) + if (!strcasecmp(argument, "xbstream")) xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM; else { ibx_msg("Invalid --stream argument: %s\n", argument); @@ -868,15 +783,6 @@ ibx_get_one_option(int optid, } xtrabackup_compress = TRUE; break; - case OPT_ENCRYPT: - if (argument == NULL) - { - msg("Missing --encrypt argument, must specify a " - "valid encryption algorithm.\n"); - return 1; - } - xtrabackup_encrypt = TRUE; - break; case 'p': if (argument) { @@ -930,7 +836,7 @@ ibx_handle_options(int *argc, char ***argv) ibx_mode = IBX_MODE_COPY_BACK; } else if (opt_ibx_move_back) { ibx_mode = IBX_MODE_MOVE_BACK; - } else if (opt_ibx_decrypt || opt_ibx_decompress) { + } else if (opt_ibx_decompress) { ibx_mode = IBX_MODE_DECRYPT_DECOMPRESS; } else { ibx_mode = IBX_MODE_BACKUP; @@ -1008,8 +914,6 @@ ibx_init() opt_lock_wait_query_type = opt_ibx_lock_wait_query_type; opt_kill_long_query_type = opt_ibx_kill_long_query_type; - opt_decrypt_algo = opt_ibx_decrypt_algo; - opt_kill_long_queries_timeout = opt_ibx_kill_long_queries_timeout; opt_lock_wait_timeout = opt_ibx_lock_wait_timeout; opt_lock_wait_threshold = opt_ibx_lock_wait_threshold; @@ -1017,18 +921,12 @@ ibx_init() opt_safe_slave_backup_timeout = opt_ibx_safe_slave_backup_timeout; opt_history = opt_ibx_history; - opt_decrypt = opt_ibx_decrypt; /* setup xtrabackup options */ xb_close_files = ibx_xb_close_files; xtrabackup_compress_alg = ibx_xtrabackup_compress_alg; xtrabackup_compress_threads = ibx_xtrabackup_compress_threads; xtrabackup_compress_chunk_size = ibx_xtrabackup_compress_chunk_size; - xtrabackup_encrypt_algo = ibx_xtrabackup_encrypt_algo; - xtrabackup_encrypt_key = ibx_xtrabackup_encrypt_key; - xtrabackup_encrypt_key_file = ibx_xtrabackup_encrypt_key_file; - xtrabackup_encrypt_threads = ibx_xtrabackup_encrypt_threads; - xtrabackup_encrypt_chunk_size = ibx_xtrabackup_encrypt_chunk_size; xtrabackup_export = ibx_xtrabackup_export; xtrabackup_extra_lsndir = ibx_xtrabackup_extra_lsndir; xtrabackup_incremental_basedir = ibx_xtrabackup_incremental_basedir; @@ -1109,7 +1007,7 @@ ibx_init() case IBX_MODE_DECRYPT_DECOMPRESS: xtrabackup_decrypt_decompress = TRUE; xtrabackup_target_dir = ibx_position_arg; - run = "decrypt and decompress"; + run = "decompress"; break; default: ut_error; diff --git a/extra/mariabackup/xb0xb.h b/extra/mariabackup/xb0xb.h index 659ab8ea5d0..a8b17f59579 100644 --- a/extra/mariabackup/xb0xb.h +++ b/extra/mariabackup/xb0xb.h @@ -23,17 +23,16 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA extern void os_io_init_simple(void); -extern os_file_t files[1000]; +extern pfs_os_file_t files[1000]; extern const char *innodb_checksum_algorithm_names[]; extern TYPELIB innodb_checksum_algorithm_typelib; extern dberr_t open_or_create_data_files( - ibool* create_new_db, + bool* create_new_db, #ifdef UNIV_LOG_ARCHIVE lsn_t* min_arch_log_no, lsn_t* max_arch_log_no, -#endif - lsn_t* min_flushed_lsn, - lsn_t* max_flushed_lsn, +#endif + lsn_t* flushed_lsn, ulint* sum_of_new_sizes) ; int @@ -45,19 +44,6 @@ dberr_t* err, /*!< out: this is set to DB_ERROR if an error os_file_dir_t dir, /*!< in: directory stream */ os_file_stat_t* info) /*!< in/out: buffer where the info is returned */; -buf_block_t* btr_node_ptr_get_child( - const rec_t* node_ptr,/*!< in: node pointer */ - dict_index_t* index, /*!< in: index */ - const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ - mtr_t* mtr) /*!< in: mtr */; - -buf_block_t* -btr_root_block_get( -/*===============*/ -const dict_index_t* index, /*!< in: index tree */ -ulint mode, /*!< in: either RW_S_LATCH - or RW_X_LATCH */ - mtr_t* mtr) /*!< in: mtr */; fil_space_t* fil_space_get_by_name(const char *); ibool @@ -66,7 +52,6 @@ void innodb_log_checksum_func_update( /*============================*/ ulint algorithm) /*!< in: algorithm */; -dberr_t recv_find_max_checkpoint(log_group_t** max_group, ulint* max_field); dberr_t srv_undo_tablespaces_init( /*======================*/ diff --git a/extra/mariabackup/xbcrypt.c b/extra/mariabackup/xbcrypt.c deleted file mode 100644 index 3da70e171f7..00000000000 --- a/extra/mariabackup/xbcrypt.c +++ /dev/null @@ -1,696 +0,0 @@ -/****************************************************** -Copyright (c) 2013 Percona LLC and/or its affiliates. - -The xbcrypt utility: decrypt files in the XBCRYPT format. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include <my_base.h> -#include <my_getopt.h> -#include "common.h" -#include "xbcrypt.h" -#include "xbcrypt_common.h" -#include "crc_glue.h" - -#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600) -GCRY_THREAD_OPTION_PTHREAD_IMPL; -#endif - -#define XBCRYPT_VERSION "1.1" - -typedef enum { - RUN_MODE_NONE, - RUN_MODE_ENCRYPT, - RUN_MODE_DECRYPT -} run_mode_t; - -const char *xbcrypt_encrypt_algo_names[] = -{ "NONE", "AES128", "AES192", "AES256", NullS}; -TYPELIB xbcrypt_encrypt_algo_typelib= -{array_elements(xbcrypt_encrypt_algo_names)-1,"", - xbcrypt_encrypt_algo_names, NULL}; - -static run_mode_t opt_run_mode = RUN_MODE_ENCRYPT; -static char *opt_input_file = NULL; -static char *opt_output_file = NULL; -static ulong opt_encrypt_algo; -static char *opt_encrypt_key_file = NULL; -static void *opt_encrypt_key = NULL; -static ulonglong opt_encrypt_chunk_size = 0; -static my_bool opt_verbose = FALSE; - -static uint encrypt_algos[] = { GCRY_CIPHER_NONE, - GCRY_CIPHER_AES128, - GCRY_CIPHER_AES192, - GCRY_CIPHER_AES256 }; -static int encrypt_algo = 0; -static int encrypt_mode = GCRY_CIPHER_MODE_CTR; -static uint encrypt_key_len = 0; -static size_t encrypt_iv_len = 0; - -static struct my_option my_long_options[] = -{ - {"help", '?', "Display this help and exit.", - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - - {"decrypt", 'd', "Decrypt data input to output.", - 0, 0, 0, - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - - {"input", 'i', "Optional input file. If not specified, input" - " will be read from standard input.", - &opt_input_file, &opt_input_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"output", 'o', "Optional output file. If not specified, output" - " will be written to standard output.", - &opt_output_file, &opt_output_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-algo", 'a', "Encryption algorithm.", - &opt_encrypt_algo, &opt_encrypt_algo, &xbcrypt_encrypt_algo_typelib, - GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-key", 'k', "Encryption key.", - &opt_encrypt_key, &opt_encrypt_key, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-key-file", 'f', "File which contains encryption key.", - &opt_encrypt_key_file, &opt_encrypt_key_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-chunk-size", 's', "Size of working buffer for encryption in" - " bytes. The default value is 64K.", - &opt_encrypt_chunk_size, &opt_encrypt_chunk_size, 0, - GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0}, - - {"verbose", 'v', "Display verbose status output.", - &opt_verbose, &opt_verbose, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} -}; - -static -int -get_options(int *argc, char ***argv); - -static -my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument __attribute__((unused))); - -static -void -print_version(void); - -static -void -usage(void); - -static -int -mode_decrypt(File filein, File fileout); - -static -int -mode_encrypt(File filein, File fileout); - -int -main(int argc, char **argv) -{ -#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600) - gcry_error_t gcry_error; -#endif - File filein = 0; - File fileout = 0; - - MY_INIT(argv[0]); - - crc_init(); - - if (get_options(&argc, &argv)) { - goto err; - } - - /* Acording to gcrypt docs (and my testing), setting up the threading - callbacks must be done first, so, lets give it a shot */ -#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600) - gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread); - if (gcry_error) { - msg("%s: unable to set libgcrypt thread cbs - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return 1; - } -#endif - - /* Version check should be the very first call because it - makes sure that important subsystems are intialized. */ - if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) { - const char *gcrypt_version; - gcrypt_version = gcry_check_version(NULL); - /* No other library has already initialized libgcrypt. */ - if (!gcrypt_version) { - msg("%s: failed to initialize libgcrypt\n", - my_progname); - return 1; - } else if (opt_verbose) { - msg("%s: using gcrypt %s\n", my_progname, - gcrypt_version); - } - } - gcry_control(GCRYCTL_DISABLE_SECMEM, 0); - gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); - - /* Determine the algorithm */ - encrypt_algo = encrypt_algos[opt_encrypt_algo]; - - /* Set up the iv length */ - encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo); - - /* Now set up the key */ - if (opt_encrypt_key == NULL && opt_encrypt_key_file == NULL) { - msg("%s: no encryption key or key file specified.\n", - my_progname); - return 1; - } else if (opt_encrypt_key && opt_encrypt_key_file) { - msg("%s: both encryption key and key file specified.\n", - my_progname); - return 1; - } else if (opt_encrypt_key_file) { - if (!xb_crypt_read_key_file(opt_encrypt_key_file, - &opt_encrypt_key, - &encrypt_key_len)) { - msg("%s: unable to read encryption key file \"%s\".\n", - opt_encrypt_key_file, my_progname); - return 1; - } - } else { - encrypt_key_len = strlen(opt_encrypt_key); - } - - if (opt_input_file) { - MY_STAT mystat; - - if (opt_verbose) - msg("%s: input file \"%s\".\n", my_progname, - opt_input_file); - - if (my_stat(opt_input_file, &mystat, MYF(MY_WME)) == NULL) { - goto err; - } - if (!MY_S_ISREG(mystat.st_mode)) { - msg("%s: \"%s\" is not a regular file, exiting.\n", - my_progname, opt_input_file); - goto err; - } - if ((filein = my_open(opt_input_file, O_RDONLY, MYF(MY_WME))) - < 0) { - msg("%s: failed to open \"%s\".\n", my_progname, - opt_input_file); - goto err; - } - } else { - if (opt_verbose) - msg("%s: input from standard input.\n", my_progname); - filein = fileno(stdin); - } - - if (opt_output_file) { - if (opt_verbose) - msg("%s: output file \"%s\".\n", my_progname, - opt_output_file); - - if ((fileout = my_create(opt_output_file, 0, - O_WRONLY|O_BINARY|O_EXCL|O_NOFOLLOW, - MYF(MY_WME))) < 0) { - msg("%s: failed to create output file \"%s\".\n", - my_progname, opt_output_file); - goto err; - } - } else { - if (opt_verbose) - msg("%s: output to standard output.\n", my_progname); - fileout = fileno(stdout); - } - - if (opt_run_mode == RUN_MODE_DECRYPT - && mode_decrypt(filein, fileout)) { - goto err; - } else if (opt_run_mode == RUN_MODE_ENCRYPT - && mode_encrypt(filein, fileout)) { - goto err; - } - - if (opt_input_file && filein) { - my_close(filein, MYF(MY_WME)); - } - if (opt_output_file && fileout) { - my_close(fileout, MYF(MY_WME)); - } - - my_cleanup_options(my_long_options); - - my_end(0); - - return EXIT_SUCCESS; -err: - if (opt_input_file && filein) { - my_close(filein, MYF(MY_WME)); - } - if (opt_output_file && fileout) { - my_close(fileout, MYF(MY_WME)); - } - - my_cleanup_options(my_long_options); - - my_end(0); - - exit(EXIT_FAILURE); - -} - - -static -size_t -my_xb_crypt_read_callback(void *userdata, void *buf, size_t len) -{ - File* file = (File *) userdata; - return xb_read_full(*file, buf, len); -} - -static -int -mode_decrypt(File filein, File fileout) -{ - xb_rcrypt_t *xbcrypt_file = NULL; - void *chunkbuf = NULL; - size_t chunksize; - size_t originalsize; - void *ivbuf = NULL; - size_t ivsize; - void *decryptbuf = NULL; - size_t decryptbufsize = 0; - ulonglong ttlchunksread = 0; - ulonglong ttlbytesread = 0; - xb_rcrypt_result_t result; - gcry_cipher_hd_t cipher_handle; - gcry_error_t gcry_error; - my_bool hash_appended; - - if (encrypt_algo != GCRY_CIPHER_NONE) { - gcry_error = gcry_cipher_open(&cipher_handle, - encrypt_algo, - encrypt_mode, 0); - if (gcry_error) { - msg("%s:decrypt: unable to open libgcrypt" - " cipher - %s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return 1; - } - - gcry_error = gcry_cipher_setkey(cipher_handle, - opt_encrypt_key, - encrypt_key_len); - if (gcry_error) { - msg("%s:decrypt: unable to set libgcrypt cipher" - "key - %s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - goto err; - } - } - - /* Initialize the xb_crypt format reader */ - xbcrypt_file = xb_crypt_read_open(&filein, my_xb_crypt_read_callback); - if (xbcrypt_file == NULL) { - msg("%s:decrypt: xb_crypt_read_open() failed.\n", my_progname); - goto err; - } - - /* Walk the encrypted chunks, decrypting them and writing out */ - while ((result = xb_crypt_read_chunk(xbcrypt_file, &chunkbuf, - &originalsize, &chunksize, - &ivbuf, &ivsize, &hash_appended)) - == XB_CRYPT_READ_CHUNK) { - - if (encrypt_algo != GCRY_CIPHER_NONE) { - gcry_error = gcry_cipher_reset(cipher_handle); - if (gcry_error) { - msg("%s:decrypt: unable to reset libgcrypt" - " cipher - %s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - goto err; - } - - if (ivsize) { - gcry_error = gcry_cipher_setctr(cipher_handle, - ivbuf, - ivsize); - } - if (gcry_error) { - msg("%s:decrypt: unable to set cipher iv - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - continue; - } - - if (decryptbufsize < originalsize) { - decryptbuf = my_realloc(decryptbuf, - originalsize, - MYF(MY_WME | MY_ALLOW_ZERO_PTR)); - decryptbufsize = originalsize; - } - - /* Try to decrypt it */ - gcry_error = gcry_cipher_decrypt(cipher_handle, - decryptbuf, - originalsize, - chunkbuf, - chunksize); - if (gcry_error) { - msg("%s:decrypt: unable to decrypt chunk - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - gcry_cipher_close(cipher_handle); - goto err; - } - - } else { - decryptbuf = chunkbuf; - } - - if (hash_appended) { - uchar hash[XB_CRYPT_HASH_LEN]; - - originalsize -= XB_CRYPT_HASH_LEN; - - /* ensure that XB_CRYPT_HASH_LEN is the correct length - of XB_CRYPT_HASH hashing algorithm output */ - xb_a(gcry_md_get_algo_dlen(XB_CRYPT_HASH) == - XB_CRYPT_HASH_LEN); - gcry_md_hash_buffer(XB_CRYPT_HASH, hash, decryptbuf, - originalsize); - if (memcmp(hash, (char *) decryptbuf + originalsize, - XB_CRYPT_HASH_LEN) != 0) { - msg("%s:%s invalid plaintext hash. " - "Wrong encrytion key specified?\n", - my_progname, __FUNCTION__); - result = XB_CRYPT_READ_ERROR; - goto err; - } - } - - /* Write it out */ - if (my_write(fileout, (const uchar *) decryptbuf, originalsize, - MYF(MY_WME | MY_NABP))) { - msg("%s:decrypt: unable to write output chunk.\n", - my_progname); - goto err; - } - ttlchunksread++; - ttlbytesread += chunksize; - if (opt_verbose) - msg("%s:decrypt: %llu chunks read, %llu bytes read\n.", - my_progname, ttlchunksread, ttlbytesread); - } - - xb_crypt_read_close(xbcrypt_file); - - if (encrypt_algo != GCRY_CIPHER_NONE) - gcry_cipher_close(cipher_handle); - - if (decryptbuf && decryptbufsize) - my_free(decryptbuf); - - if (opt_verbose) - msg("\n%s:decrypt: done\n", my_progname); - - return 0; -err: - if (xbcrypt_file) - xb_crypt_read_close(xbcrypt_file); - - if (encrypt_algo != GCRY_CIPHER_NONE) - gcry_cipher_close(cipher_handle); - - if (decryptbuf && decryptbufsize) - my_free(decryptbuf); - - return 1; -} - -static -ssize_t -my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len) -{ - File* file = (File *) userdata; - - ssize_t ret = my_write(*file, buf, len, MYF(MY_WME)); - posix_fadvise(*file, 0, 0, POSIX_FADV_DONTNEED); - return ret; -} - -static -int -mode_encrypt(File filein, File fileout) -{ - size_t bytesread; - size_t chunkbuflen; - uchar *chunkbuf = NULL; - void *ivbuf = NULL; - size_t encryptbuflen = 0; - size_t encryptedlen = 0; - void *encryptbuf = NULL; - ulonglong ttlchunkswritten = 0; - ulonglong ttlbyteswritten = 0; - xb_wcrypt_t *xbcrypt_file = NULL; - gcry_cipher_hd_t cipher_handle; - gcry_error_t gcry_error; - - if (encrypt_algo != GCRY_CIPHER_NONE) { - gcry_error = gcry_cipher_open(&cipher_handle, - encrypt_algo, - encrypt_mode, 0); - if (gcry_error) { - msg("%s:encrypt: unable to open libgcrypt cipher - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return 1; - } - - gcry_error = gcry_cipher_setkey(cipher_handle, - opt_encrypt_key, - encrypt_key_len); - if (gcry_error) { - msg("%s:encrypt: unable to set libgcrypt cipher key - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - goto err; - } - } - - posix_fadvise(filein, 0, 0, POSIX_FADV_SEQUENTIAL); - - xbcrypt_file = xb_crypt_write_open(&fileout, - my_xb_crypt_write_callback); - if (xbcrypt_file == NULL) { - msg("%s:encrypt: xb_crypt_write_open() failed.\n", - my_progname); - goto err; - } - - ivbuf = my_malloc(encrypt_iv_len, MYF(MY_FAE)); - - /* now read in data in chunk size, encrypt and write out */ - chunkbuflen = opt_encrypt_chunk_size + XB_CRYPT_HASH_LEN; - chunkbuf = (uchar *) my_malloc(chunkbuflen, MYF(MY_FAE)); - while ((bytesread = my_read(filein, chunkbuf, opt_encrypt_chunk_size, - MYF(MY_WME))) > 0) { - - size_t origbuflen = bytesread + XB_CRYPT_HASH_LEN; - - /* ensure that XB_CRYPT_HASH_LEN is the correct length - of XB_CRYPT_HASH hashing algorithm output */ - xb_a(XB_CRYPT_HASH_LEN == gcry_md_get_algo_dlen(XB_CRYPT_HASH)); - gcry_md_hash_buffer(XB_CRYPT_HASH, chunkbuf + bytesread, - chunkbuf, bytesread); - - if (encrypt_algo != GCRY_CIPHER_NONE) { - gcry_error = gcry_cipher_reset(cipher_handle); - - if (gcry_error) { - msg("%s:encrypt: unable to reset cipher - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - goto err; - } - - xb_crypt_create_iv(ivbuf, encrypt_iv_len); - gcry_error = gcry_cipher_setctr(cipher_handle, - ivbuf, - encrypt_iv_len); - - if (gcry_error) { - msg("%s:encrypt: unable to set cipher iv - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - continue; - } - - if (encryptbuflen < origbuflen) { - encryptbuf = my_realloc(encryptbuf, origbuflen, - MYF(MY_WME | MY_ALLOW_ZERO_PTR)); - encryptbuflen = origbuflen; - } - - gcry_error = gcry_cipher_encrypt(cipher_handle, - encryptbuf, - encryptbuflen, - chunkbuf, - origbuflen); - - encryptedlen = origbuflen; - - if (gcry_error) { - msg("%s:encrypt: unable to encrypt chunk - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - gcry_cipher_close(cipher_handle); - goto err; - } - } else { - encryptedlen = origbuflen; - encryptbuf = chunkbuf; - } - - if (xb_crypt_write_chunk(xbcrypt_file, encryptbuf, - bytesread + XB_CRYPT_HASH_LEN, - encryptedlen, ivbuf, encrypt_iv_len)) { - msg("%s:encrypt: abcrypt_write_chunk() failed.\n", - my_progname); - goto err; - } - - ttlchunkswritten++; - ttlbyteswritten += encryptedlen; - - if (opt_verbose) - msg("%s:encrypt: %llu chunks written, %llu bytes " - "written\n.", my_progname, ttlchunkswritten, - ttlbyteswritten); - } - - my_free(ivbuf); - my_free(chunkbuf); - - if (encryptbuf && encryptbuflen) - my_free(encryptbuf); - - xb_crypt_write_close(xbcrypt_file); - - if (encrypt_algo != GCRY_CIPHER_NONE) - gcry_cipher_close(cipher_handle); - - if (opt_verbose) - msg("\n%s:encrypt: done\n", my_progname); - - return 0; -err: - if (chunkbuf) - my_free(chunkbuf); - - if (encryptbuf && encryptbuflen) - my_free(encryptbuf); - - if (xbcrypt_file) - xb_crypt_write_close(xbcrypt_file); - - if (encrypt_algo != GCRY_CIPHER_NONE) - gcry_cipher_close(cipher_handle); - - return 1; -} - -static -int -get_options(int *argc, char ***argv) -{ - int ho_error; - - if ((ho_error= handle_options(argc, argv, my_long_options, - get_one_option))) { - exit(EXIT_FAILURE); - } - - return 0; -} - -static -my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument __attribute__((unused))) -{ - switch (optid) { - case 'd': - opt_run_mode = RUN_MODE_DECRYPT; - break; - case '?': - usage(); - exit(0); - } - - return FALSE; -} - -static -void -print_version(void) -{ - printf("%s Ver %s for %s (%s)\n", my_progname, XBCRYPT_VERSION, - SYSTEM_TYPE, MACHINE_TYPE); -} - -static -void -usage(void) -{ - print_version(); - puts("Copyright (C) 2011 Percona Inc."); - puts("This software comes with ABSOLUTELY NO WARRANTY. " - "This is free software,\nand you are welcome to modify and " - "redistribute it under the GPL license.\n"); - - puts("Encrypt or decrypt files in the XBCRYPT format.\n"); - - puts("Usage: "); - printf(" %s [OPTIONS...]" - " # read data from specified input, encrypting or decrypting " - " and writing the result to the specified output.\n", - my_progname); - puts("\nOptions:"); - my_print_help(my_long_options); -} diff --git a/extra/mariabackup/xbcrypt.h b/extra/mariabackup/xbcrypt.h deleted file mode 100644 index 0e832266847..00000000000 --- a/extra/mariabackup/xbcrypt.h +++ /dev/null @@ -1,79 +0,0 @@ -/****************************************************** -Copyright (c) 2011 Percona LLC and/or its affiliates. - -Encryption interface for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#ifndef XBCRYPT_H -#define XBCRYPT_H - -#include <my_base.h> -#include "common.h" - -#define XB_CRYPT_CHUNK_MAGIC1 "XBCRYP01" -#define XB_CRYPT_CHUNK_MAGIC2 "XBCRYP02" -#define XB_CRYPT_CHUNK_MAGIC3 "XBCRYP03" /* must be same size as ^^ */ -#define XB_CRYPT_CHUNK_MAGIC_CURRENT XB_CRYPT_CHUNK_MAGIC3 -#define XB_CRYPT_CHUNK_MAGIC_SIZE (sizeof(XB_CRYPT_CHUNK_MAGIC1)-1) - -#define XB_CRYPT_HASH GCRY_MD_SHA256 -#define XB_CRYPT_HASH_LEN 32 - -/****************************************************************************** -Write interface */ -typedef struct xb_wcrypt_struct xb_wcrypt_t; - -/* Callback on write for i/o, must return # of bytes written or -1 on error */ -typedef ssize_t xb_crypt_write_callback(void *userdata, - const void *buf, size_t len); - -xb_wcrypt_t *xb_crypt_write_open(void *userdata, - xb_crypt_write_callback *onwrite); - -/* Takes buffer, original length, encrypted length iv and iv length, formats - output buffer and calls write callback. - Returns 0 on success, 1 on error */ -int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen, - size_t elen, const void *iv, size_t ivlen); - -/* Returns 0 on success, 1 on error */ -int xb_crypt_write_close(xb_wcrypt_t *crypt); - -/****************************************************************************** -Read interface */ -typedef struct xb_rcrypt_struct xb_rcrypt_t; - -/* Callback on read for i/o, must return # of bytes read or -1 on error */ -typedef size_t xb_crypt_read_callback(void *userdata, void *buf, size_t len); - -xb_rcrypt_t *xb_crypt_read_open(void *userdata, - xb_crypt_read_callback *onread); - -typedef enum { - XB_CRYPT_READ_CHUNK, - XB_CRYPT_READ_INCOMPLETE, - XB_CRYPT_READ_EOF, - XB_CRYPT_READ_ERROR -} xb_rcrypt_result_t; - -xb_rcrypt_result_t xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf, - size_t *olen, size_t *elen, void **iv, - size_t *ivlen, my_bool *hash_appended); - -int xb_crypt_read_close(xb_rcrypt_t *crypt); - -#endif diff --git a/extra/mariabackup/xbcrypt_common.c b/extra/mariabackup/xbcrypt_common.c deleted file mode 100644 index 0cdb54dc66d..00000000000 --- a/extra/mariabackup/xbcrypt_common.c +++ /dev/null @@ -1,328 +0,0 @@ -/****************************************************** -Copyright (c) 2013, 2017 Percona LLC and/or its affiliates. - -Encryption configuration file interface for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include <my_base.h> -#include "common.h" -#include "xbcrypt.h" -#include "xbcrypt_common.h" - -/* Encryption options */ -char *ds_encrypt_key = NULL; -char *ds_encrypt_key_file = NULL; -ulong ds_encrypt_algo; - -static uint encrypt_key_len; -static uint encrypt_iv_len; - -static const uint encrypt_mode = GCRY_CIPHER_MODE_CTR; - -static uint encrypt_algos[] = { GCRY_CIPHER_NONE, GCRY_CIPHER_AES128, - GCRY_CIPHER_AES192, GCRY_CIPHER_AES256 }; -static uint encrypt_algo; - -#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600) -GCRY_THREAD_OPTION_PTHREAD_IMPL; -#endif - - -my_bool -xb_crypt_read_key_file(const char *filename, void** key, uint *keylength) -{ - FILE *fp; - - if (!(fp = my_fopen(filename, O_RDONLY, MYF(0)))) { - msg("%s:%s: unable to open config file \"%s\", errno(%d)\n", - my_progname, __FUNCTION__, filename, my_errno); - return FALSE; - } - - fseek(fp, 0 , SEEK_END); - *keylength = ftell(fp); - rewind(fp); - *key = my_malloc(*keylength, MYF(MY_FAE)); - *keylength = fread(*key, 1, *keylength, fp); - my_fclose(fp, MYF(0)); - return TRUE; -} - -void -xb_crypt_create_iv(void* ivbuf, size_t ivlen) -{ - gcry_create_nonce(ivbuf, ivlen); -} - -gcry_error_t -xb_crypt_init(uint *iv_len) -{ - gcry_error_t gcry_error; - - /* Acording to gcrypt docs (and my testing), setting up the threading - callbacks must be done first, so, lets give it a shot */ -#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600) - gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread); - if (gcry_error) { - msg("encryption: unable to set libgcrypt thread cbs - " - "%s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } -#endif - - /* Version check should be the very next call because it - makes sure that important subsystems are intialized. */ - if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) { - const char *gcrypt_version; - gcrypt_version = gcry_check_version(NULL); - /* No other library has already initialized libgcrypt. */ - if (!gcrypt_version) { - msg("encryption: failed to initialize libgcrypt\n"); - return 1; - } else { - msg("encryption: using gcrypt %s\n", gcrypt_version); - } - } - - /* Disable the gcry secure memory, not dealing with this for now */ - gcry_error = gcry_control(GCRYCTL_DISABLE_SECMEM, 0); - if (gcry_error) { - msg("encryption: unable to disable libgcrypt secmem - " - "%s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - /* Finalize gcry initialization. */ - gcry_error = gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); - if (gcry_error) { - msg("encryption: unable to finish libgcrypt initialization - " - "%s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - /* Determine the algorithm */ - encrypt_algo = encrypt_algos[ds_encrypt_algo]; - - /* Set up the iv length */ - encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo); - xb_a(encrypt_iv_len > 0); - if (iv_len != NULL) { - *iv_len = encrypt_iv_len; - } - - /* Now set up the key */ - if (ds_encrypt_key == NULL && - ds_encrypt_key_file == NULL) { - msg("encryption: no encryption key or key file specified.\n"); - return gcry_error; - } else if (ds_encrypt_key && ds_encrypt_key_file) { - msg("encryption: both encryption key and key file specified.\n"); - return gcry_error; - } else if (ds_encrypt_key_file) { - if (!xb_crypt_read_key_file(ds_encrypt_key_file, - (void**)&ds_encrypt_key, - &encrypt_key_len)) { - msg("encryption: unable to read encryption key file" - " \"%s\".\n", ds_encrypt_key_file); - return gcry_error; - } - } else if (ds_encrypt_key) { - encrypt_key_len = strlen(ds_encrypt_key); - } else { - msg("encryption: no encryption key or key file specified.\n"); - return gcry_error; - } - - return 0; -} - -gcry_error_t -xb_crypt_cipher_open(gcry_cipher_hd_t *cipher_handle) -{ - if (encrypt_algo != GCRY_CIPHER_NONE) { - gcry_error_t gcry_error; - - gcry_error = gcry_cipher_open(cipher_handle, - encrypt_algo, - encrypt_mode, 0); - if (gcry_error) { - msg("encryption: unable to open libgcrypt" - " cipher - %s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - gcry_cipher_close(*cipher_handle); - return gcry_error; - } - - gcry_error = gcry_cipher_setkey(*cipher_handle, - ds_encrypt_key, - encrypt_key_len); - if (gcry_error) { - msg("encryption: unable to set libgcrypt" - " cipher key - %s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - gcry_cipher_close(*cipher_handle); - return gcry_error; - } - return gcry_error; - } - return 0; -} - -void -xb_crypt_cipher_close(gcry_cipher_hd_t cipher_handle) -{ - if (encrypt_algo != GCRY_CIPHER_NONE) - gcry_cipher_close(cipher_handle); -} - -gcry_error_t -xb_crypt_decrypt(gcry_cipher_hd_t cipher_handle, const uchar *from, - size_t from_len, uchar *to, size_t *to_len, - const uchar *iv, size_t iv_len, my_bool hash_appended) -{ - *to_len = from_len; - - if (encrypt_algo != GCRY_CIPHER_NONE) { - - gcry_error_t gcry_error; - - gcry_error = gcry_cipher_reset(cipher_handle); - if (gcry_error) { - msg("%s:encryption: unable to reset libgcrypt" - " cipher - %s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - if (iv_len > 0) { - gcry_error = gcry_cipher_setctr(cipher_handle, - iv, iv_len); - } - if (gcry_error) { - msg("%s:encryption: unable to set cipher iv - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - /* Try to decrypt it */ - gcry_error = gcry_cipher_decrypt(cipher_handle, to, *to_len, - from, from_len); - if (gcry_error) { - msg("%s:encryption: unable to decrypt chunk - " - "%s : %s\n", my_progname, - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - gcry_cipher_close(cipher_handle); - return gcry_error; - } - - if (hash_appended) { - uchar hash[XB_CRYPT_HASH_LEN]; - - *to_len -= XB_CRYPT_HASH_LEN; - - /* ensure that XB_CRYPT_HASH_LEN is the correct length - of XB_CRYPT_HASH hashing algorithm output */ - xb_ad(gcry_md_get_algo_dlen(XB_CRYPT_HASH) == - XB_CRYPT_HASH_LEN); - gcry_md_hash_buffer(XB_CRYPT_HASH, hash, to, - *to_len); - if (memcmp(hash, (char *) to + *to_len, - XB_CRYPT_HASH_LEN) != 0) { - msg("%s:%s invalid plaintext hash. " - "Wrong encrytion key specified?\n", - my_progname, __FUNCTION__); - return 1; - } - } - - } else { - memcpy(to, from, *to_len); - } - - return 0; -} - -gcry_error_t -xb_crypt_encrypt(gcry_cipher_hd_t cipher_handle, const uchar *from, - size_t from_len, uchar *to, size_t *to_len, uchar *iv) -{ - gcry_error_t gcry_error; - - /* ensure that XB_CRYPT_HASH_LEN is the correct length - of XB_CRYPT_HASH hashing algorithm output */ - xb_ad(gcry_md_get_algo_dlen(XB_CRYPT_HASH) == - XB_CRYPT_HASH_LEN); - - memcpy(to, from, from_len); - gcry_md_hash_buffer(XB_CRYPT_HASH, to + from_len, - from, from_len); - - *to_len = from_len; - - if (encrypt_algo != GCRY_CIPHER_NONE) { - - gcry_error = gcry_cipher_reset(cipher_handle); - if (gcry_error) { - msg("encrypt: unable to reset cipher - " - "%s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - xb_crypt_create_iv(iv, encrypt_iv_len); - gcry_error = gcry_cipher_setctr(cipher_handle, iv, - encrypt_iv_len); - if (gcry_error) { - msg("encrypt: unable to set cipher ctr - " - "%s : %s\n", - gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - - gcry_error = gcry_cipher_encrypt(cipher_handle, to, - *to_len + XB_CRYPT_HASH_LEN, - to, - from_len + XB_CRYPT_HASH_LEN); - if (gcry_error) { - msg("encrypt: unable to encrypt buffer - " - "%s : %s\n", gcry_strsource(gcry_error), - gcry_strerror(gcry_error)); - return gcry_error; - } - } else { - memcpy(to, from, from_len + XB_CRYPT_HASH_LEN); - } - - *to_len += XB_CRYPT_HASH_LEN; - - return 0; -} -#endif
\ No newline at end of file diff --git a/extra/mariabackup/xbcrypt_common.h b/extra/mariabackup/xbcrypt_common.h deleted file mode 100644 index 85d13c01fc4..00000000000 --- a/extra/mariabackup/xbcrypt_common.h +++ /dev/null @@ -1,64 +0,0 @@ -/****************************************************** -Copyright (c) 2017 Percona LLC and/or its affiliates. - -Encryption datasink implementation for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include <my_base.h> -#if HAVE_GCRYPT -#if GCC_VERSION >= 4002 -/* Workaround to avoid "gcry_ac_* is deprecated" warnings in gcrypt.h */ -# pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif - -#include <gcrypt.h> - -extern char *ds_encrypt_key; -extern char *ds_encrypt_key_file; -extern int ds_encrypt_threads; -extern ulong ds_encrypt_algo; - -/****************************************************************************** -Utility interface */ -my_bool xb_crypt_read_key_file(const char *filename, - void** key, uint *keylength); - -void xb_crypt_create_iv(void* ivbuf, size_t ivlen); - -/* Initialize gcrypt and setup encryption key and IV lengths */ -gcry_error_t -xb_crypt_init(uint *iv_len); - -/* Setup gcrypt cipher */ -gcry_error_t -xb_crypt_cipher_open(gcry_cipher_hd_t *cipher_handle); - -/* Close gcrypt cipher */ -void -xb_crypt_cipher_close(gcry_cipher_hd_t cipher_handle); - -/* Decrypt buffer */ -gcry_error_t -xb_crypt_decrypt(gcry_cipher_hd_t cipher_handle, const uchar *from, - size_t from_len, uchar *to, size_t *to_len, const uchar *iv, - size_t iv_len, my_bool hash_appended); - -/* Encrypt buffer */ -gcry_error_t -xb_crypt_encrypt(gcry_cipher_hd_t cipher_handle, const uchar *from, - size_t from_len, uchar *to, size_t *to_len, uchar *iv); -#endif diff --git a/extra/mariabackup/xbcrypt_read.c b/extra/mariabackup/xbcrypt_read.c deleted file mode 100644 index 41790c7035d..00000000000 --- a/extra/mariabackup/xbcrypt_read.c +++ /dev/null @@ -1,252 +0,0 @@ -/****************************************************** -Copyright (c) 2013 Percona LLC and/or its affiliates. - -The xbcrypt format reader implementation. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include "xbcrypt.h" -#include "crc_glue.h" - -struct xb_rcrypt_struct { - void *userdata; - xb_crypt_read_callback *read; - void *buffer; - size_t bufsize; - void *ivbuffer; - size_t ivbufsize; - ulonglong offset; -}; - -xb_rcrypt_t * -xb_crypt_read_open(void *userdata, xb_crypt_read_callback *onread) -{ - xb_rcrypt_t *crypt; - - xb_ad(onread); - - crypt = (xb_rcrypt_t *) my_malloc(sizeof(xb_rcrypt_t), MYF(MY_FAE)); - - crypt->userdata = userdata; - crypt->read = onread; - crypt->buffer = NULL; - crypt->bufsize = 0; - crypt->offset = 0; - crypt->ivbuffer = NULL; - crypt->ivbufsize = 0; - return crypt; -} - -xb_rcrypt_result_t -xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf, size_t *olen, size_t *elen, - void **iv, size_t *ivlen, my_bool *hash_appended) - -{ - uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4]; - uchar *ptr; - ulonglong tmp; - ulong checksum, checksum_exp, version; - size_t bytesread; - xb_rcrypt_result_t result = XB_CRYPT_READ_CHUNK; - - if ((bytesread = crypt->read(crypt->userdata, tmpbuf, sizeof(tmpbuf))) - != sizeof(tmpbuf)) { - if (bytesread == 0) { - result = XB_CRYPT_READ_EOF; - goto err; - } else { - msg("%s:%s: unable to read chunk header data at " - "offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - } - - ptr = tmpbuf; - - if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC3, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 3; - } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC2, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 2; - } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC1, - XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) { - version = 1; - } else { - msg("%s:%s: wrong chunk magic at offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - - ptr += XB_CRYPT_CHUNK_MAGIC_SIZE; - crypt->offset += XB_CRYPT_CHUNK_MAGIC_SIZE; - - tmp = uint8korr(ptr); /* reserved */ - ptr += 8; - crypt->offset += 8; - - tmp = uint8korr(ptr); /* original size */ - ptr += 8; - if (tmp > INT_MAX) { - msg("%s:%s: invalid original size at offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - crypt->offset += 8; - *olen = (size_t)tmp; - - tmp = uint8korr(ptr); /* encrypted size */ - ptr += 8; - if (tmp > INT_MAX) { - msg("%s:%s: invalid encrypted size at offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - crypt->offset += 8; - *elen = (size_t)tmp; - - checksum_exp = uint4korr(ptr); /* checksum */ - ptr += 4; - crypt->offset += 4; - - /* iv size */ - if (version == 1) { - *ivlen = 0; - *iv = 0; - } else { - if ((bytesread = crypt->read(crypt->userdata, tmpbuf, 8)) - != 8) { - if (bytesread == 0) { - result = XB_CRYPT_READ_EOF; - goto err; - } else { - msg("%s:%s: unable to read chunk iv size at " - "offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - } - - tmp = uint8korr(tmpbuf); - if (tmp > INT_MAX) { - msg("%s:%s: invalid iv size at offset 0x%llx.\n", - my_progname, __FUNCTION__, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - crypt->offset += 8; - *ivlen = (size_t)tmp; - } - - if (*ivlen > crypt->ivbufsize) { - crypt->ivbuffer = my_realloc(crypt->ivbuffer, *ivlen, - MYF(MY_WME | MY_ALLOW_ZERO_PTR)); - if (crypt->ivbuffer == NULL) { - msg("%s:%s: failed to increase iv buffer to " - "%llu bytes.\n", my_progname, __FUNCTION__, - (ulonglong)*ivlen); - result = XB_CRYPT_READ_ERROR; - goto err; - } - crypt->ivbufsize = *ivlen; - } - - if (*ivlen > 0) { - if (crypt->read(crypt->userdata, crypt->ivbuffer, *ivlen) - != *ivlen) { - msg("%s:%s: failed to read %lld bytes for chunk iv " - "at offset 0x%llx.\n", my_progname, __FUNCTION__, - (ulonglong)*ivlen, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - *iv = crypt->ivbuffer; - } - - /* for version euqals 2 we need to read in the iv data but do not init - CTR with it */ - if (version == 2) { - *ivlen = 0; - *iv = 0; - } - - if (*olen > crypt->bufsize) { - crypt->buffer = my_realloc(crypt->buffer, *olen, - MYF(MY_WME | MY_ALLOW_ZERO_PTR)); - if (crypt->buffer == NULL) { - msg("%s:%s: failed to increase buffer to " - "%llu bytes.\n", my_progname, __FUNCTION__, - (ulonglong)*olen); - result = XB_CRYPT_READ_ERROR; - goto err; - } - crypt->bufsize = *olen; - } - - if (*elen > 0) { - if (crypt->read(crypt->userdata, crypt->buffer, *elen) - != *elen) { - msg("%s:%s: failed to read %lld bytes for chunk payload " - "at offset 0x%llx.\n", my_progname, __FUNCTION__, - (ulonglong)*elen, crypt->offset); - result = XB_CRYPT_READ_ERROR; - goto err; - } - } - - checksum = crc32_iso3309(0, crypt->buffer, *elen); - if (checksum != checksum_exp) { - msg("%s:%s invalid checksum at offset 0x%llx, " - "expected 0x%lx, actual 0x%lx.\n", my_progname, __FUNCTION__, - crypt->offset, checksum_exp, checksum); - result = XB_CRYPT_READ_ERROR; - goto err; - } - - crypt->offset += *elen; - *buf = crypt->buffer; - - *hash_appended = version > 2; - - goto exit; - -err: - *buf = NULL; - *olen = 0; - *elen = 0; - *ivlen = 0; - *iv = 0; -exit: - return result; -} - -int xb_crypt_read_close(xb_rcrypt_t *crypt) -{ - if (crypt->buffer) - my_free(crypt->buffer); - if (crypt->ivbuffer) - my_free(crypt->ivbuffer); - my_free(crypt); - - return 0; -} - diff --git a/extra/mariabackup/xbcrypt_write.c b/extra/mariabackup/xbcrypt_write.c deleted file mode 100644 index 91dbfc4eb29..00000000000 --- a/extra/mariabackup/xbcrypt_write.c +++ /dev/null @@ -1,105 +0,0 @@ -/****************************************************** -Copyright (c) 2013 Percona LLC and/or its affiliates. - -The xbcrypt format writer implementation. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include "xbcrypt.h" -#include "crc_glue.h" - -struct xb_wcrypt_struct { - void *userdata; - xb_crypt_write_callback *write; -}; - -xb_wcrypt_t * -xb_crypt_write_open(void *userdata, xb_crypt_write_callback *onwrite) -{ - xb_wcrypt_t *crypt; - - xb_ad(onwrite); - - crypt = (xb_wcrypt_t *) my_malloc(sizeof(xb_wcrypt_t), MYF(MY_FAE)); - - crypt->userdata = userdata; - crypt->write = onwrite; - - return crypt; -} - -int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen, - size_t elen, const void *iv, size_t ivlen) -{ - uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4 + 8]; - uchar *ptr; - ulong checksum; - - xb_ad(olen <= INT_MAX); - if (olen > INT_MAX) - return 0; - - xb_ad(elen <= INT_MAX); - if (elen > INT_MAX) - return 0; - - xb_ad(ivlen <= INT_MAX); - if (ivlen > INT_MAX) - return 0; - - ptr = tmpbuf; - - memcpy(ptr, XB_CRYPT_CHUNK_MAGIC_CURRENT, XB_CRYPT_CHUNK_MAGIC_SIZE); - ptr += XB_CRYPT_CHUNK_MAGIC_SIZE; - - int8store(ptr, (ulonglong)0); /* reserved */ - ptr += 8; - - int8store(ptr, (ulonglong)olen); /* original size */ - ptr += 8; - - int8store(ptr, (ulonglong)elen); /* encrypted (actual) size */ - ptr += 8; - - checksum = crc32_iso3309(0, buf, elen); - int4store(ptr, checksum); /* checksum */ - ptr += 4; - - int8store(ptr, (ulonglong)ivlen); /* iv size */ - ptr += 8; - - xb_ad(ptr <= tmpbuf + sizeof(tmpbuf)); - - if (crypt->write(crypt->userdata, tmpbuf, ptr-tmpbuf) == -1) - return 1; - - if (crypt->write(crypt->userdata, iv, ivlen) == -1) - return 1; - - if (crypt->write(crypt->userdata, buf, elen) == -1) - return 1; - - return 0; -} - -int xb_crypt_write_close(xb_wcrypt_t *crypt) -{ - my_free(crypt); - - return 0; -} - - diff --git a/extra/mariabackup/xbstream.c b/extra/mariabackup/xbstream.c index 2cc47ec7273..edfe20a9e3c 100644 --- a/extra/mariabackup/xbstream.c +++ b/extra/mariabackup/xbstream.c @@ -25,9 +25,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include <my_pthread.h> #include "common.h" #include "xbstream.h" -#include "xbcrypt_common.h" #include "datasink.h" -#include "ds_decrypt.h" #include "crc_glue.h" #define XBSTREAM_VERSION "1.0" @@ -41,33 +39,18 @@ typedef enum { RUN_MODE_EXTRACT } run_mode_t; -const char *xbstream_encrypt_algo_names[] = -{ "NONE", "AES128", "AES192", "AES256", NullS}; -TYPELIB xbstream_encrypt_algo_typelib= -{array_elements(xbstream_encrypt_algo_names)-1,"", - xbstream_encrypt_algo_names, NULL}; - /* Need the following definitions to avoid linking with ds_*.o and their link dependencies */ datasink_t datasink_archive; datasink_t datasink_xbstream; datasink_t datasink_compress; datasink_t datasink_tmpfile; -datasink_t datasink_encrypt; datasink_t datasink_buffer; static run_mode_t opt_mode; static char * opt_directory = NULL; static my_bool opt_verbose = 0; static int opt_parallel = 1; -static ulong opt_encrypt_algo; -static char *opt_encrypt_key_file = NULL; -static void *opt_encrypt_key = NULL; -static int opt_encrypt_threads = 1; - -enum { - OPT_ENCRYPT_THREADS = 256 -}; static struct my_option my_long_options[] = { @@ -86,20 +69,6 @@ static struct my_option my_long_options[] = {"parallel", 'p', "Number of worker threads for reading / writing.", &opt_parallel, &opt_parallel, 0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0}, - {"decrypt", 'd', "Decrypt files ending with .xbcrypt.", - &opt_encrypt_algo, &opt_encrypt_algo, &xbstream_encrypt_algo_typelib, - GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"encrypt-key", 'k', "Encryption key.", - &opt_encrypt_key, &opt_encrypt_key, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"encrypt-key-file", 'f', "File which contains encryption key.", - &opt_encrypt_key_file, &opt_encrypt_key_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"encrypt-threads", OPT_ENCRYPT_THREADS, - "Number of threads for parallel data encryption. " - "The default value is 1.", - &opt_encrypt_threads, &opt_encrypt_threads, - 0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -108,7 +77,6 @@ typedef struct { HASH *filehash; xb_rstream_t *stream; ds_ctxt_t *ds_ctxt; - ds_ctxt_t *ds_decrypt_ctxt; pthread_mutex_t *mutex; } extract_ctxt_t; @@ -348,19 +316,6 @@ err: return 1; } -/************************************************************************ -Check if string ends with given suffix. -@return true if string ends with given suffix. */ -static -my_bool -ends_with(const char *str, const char *suffix) -{ - size_t suffix_len = strlen(suffix); - size_t str_len = strlen(str); - return(str_len >= suffix_len - && strcmp(str + str_len - suffix_len, suffix) == 0); -} - static file_entry_t * file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen) @@ -380,11 +335,8 @@ file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen) } entry->pathlen = pathlen; - if (ctxt->ds_decrypt_ctxt && ends_with(path, ".xbcrypt")) { - file = ds_open(ctxt->ds_decrypt_ctxt, path, NULL); - } else { - file = ds_open(ctxt->ds_ctxt, path, NULL); - } + file = ds_open(ctxt->ds_ctxt, path, NULL); + if (file == NULL) { msg("%s: failed to create file.\n", my_progname); goto err; @@ -534,7 +486,6 @@ mode_extract(int n_threads, int argc __attribute__((unused)), xb_rstream_t *stream = NULL; HASH filehash; ds_ctxt_t *ds_ctxt = NULL; - ds_ctxt_t *ds_decrypt_ctxt = NULL; extract_ctxt_t ctxt; int i; pthread_t *tids = NULL; @@ -574,7 +525,6 @@ mode_extract(int n_threads, int argc __attribute__((unused)), ctxt.stream = stream; ctxt.filehash = &filehash; ctxt.ds_ctxt = ds_ctxt; - ctxt.ds_decrypt_ctxt = ds_decrypt_ctxt; ctxt.mutex = &mutex; tids = malloc(sizeof(pthread_t) * n_threads); @@ -604,9 +554,6 @@ exit: if (ds_ctxt != NULL) { ds_destroy(ds_ctxt); } - if (ds_decrypt_ctxt) { - ds_destroy(ds_decrypt_ctxt); - } xb_stream_read_done(stream); return ret; diff --git a/extra/mariabackup/xbstream.h b/extra/mariabackup/xbstream.h index ac1bf05e321..08b017ca5ce 100644 --- a/extra/mariabackup/xbstream.h +++ b/extra/mariabackup/xbstream.h @@ -42,7 +42,6 @@ typedef struct xb_wstream_file_struct xb_wstream_file_t; typedef enum { XB_STREAM_FMT_NONE, - XB_STREAM_FMT_TAR, XB_STREAM_FMT_XBSTREAM } xb_stream_fmt_t; diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 40cc2083e45..1592172e3f1 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -59,7 +59,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA #include <btr0sea.h> #include <dict0priv.h> -#include <dict0stats.h> #include <lock0lock.h> #include <log0recv.h> #include <row0mysql.h> @@ -112,7 +111,6 @@ char xtrabackup_real_target_dir[FN_REFLEN] = "./xtrabackup_backupfiles/"; char *xtrabackup_target_dir= xtrabackup_real_target_dir; my_bool xtrabackup_version = FALSE; my_bool xtrabackup_backup = FALSE; -my_bool xtrabackup_stats = FALSE; my_bool xtrabackup_prepare = FALSE; my_bool xtrabackup_copy_back = FALSE; my_bool xtrabackup_move_back = FALSE; @@ -144,8 +142,6 @@ char xtrabackup_real_incremental_basedir[FN_REFLEN]; char xtrabackup_real_extra_lsndir[FN_REFLEN]; char xtrabackup_real_incremental_dir[FN_REFLEN]; -lsn_t xtrabackup_archived_to_lsn = 0; /* for --archived-to-lsn */ - char *xtrabackup_tmpdir; char *xtrabackup_tables = NULL; @@ -197,21 +193,6 @@ ibool xtrabackup_compress = FALSE; uint xtrabackup_compress_threads; ulonglong xtrabackup_compress_chunk_size = 0; -const char *xtrabackup_encrypt_algo_names[] = -{ "NONE", "AES128", "AES192", "AES256", NullS}; -TYPELIB xtrabackup_encrypt_algo_typelib= -{array_elements(xtrabackup_encrypt_algo_names)-1,"", - xtrabackup_encrypt_algo_names, NULL}; - -ibool xtrabackup_encrypt = FALSE; -ulong xtrabackup_encrypt_algo; -char *xtrabackup_encrypt_key = NULL; -char *xtrabackup_encrypt_key_file = NULL; -uint xtrabackup_encrypt_threads; -ulonglong xtrabackup_encrypt_chunk_size = 0; - -ulint xtrabackup_rebuild_threads = 1; - /* sleep interval beetween log copy iterations in log copying thread in milliseconds (default is 1 second) */ ulint xtrabackup_log_copy_interval = 1000; @@ -271,7 +252,6 @@ are determined in innobase_init below: */ char* innobase_ignored_opt = NULL; char* innobase_data_home_dir = NULL; char* innobase_data_file_path = NULL; -char* innobase_log_arch_dir = NULL;/* unused */ /* The following has a misleading name: starting from 4.0.5, this also affects Windows: */ char* innobase_unix_file_flush_method = NULL; @@ -280,7 +260,6 @@ char* innobase_unix_file_flush_method = NULL; values */ ulong innobase_fast_shutdown = 1; -my_bool innobase_log_archive = FALSE;/* unused */ my_bool innobase_use_doublewrite = TRUE; my_bool innobase_use_checksums = TRUE; my_bool innobase_use_large_pages = FALSE; @@ -303,21 +282,10 @@ ulong innobase_active_counter = 0; static char *xtrabackup_debug_sync = NULL; -my_bool xtrabackup_compact = FALSE; -my_bool xtrabackup_rebuild_indexes = FALSE; - my_bool xtrabackup_incremental_force_scan = FALSE; /* The flushed lsn which is read from data files */ -lsn_t min_flushed_lsn= 0; -lsn_t max_flushed_lsn= 0; - -/* The size of archived log file */ -ib_int64_t xtrabackup_arch_file_size = 0ULL; -/* The minimal LSN of found archived log files */ -lsn_t xtrabackup_arch_first_file_lsn = 0ULL; -/* The maximum LSN of found archived log files */ -lsn_t xtrabackup_arch_last_file_lsn = 0ULL; +lsn_t flushed_lsn= 0; ulong xb_open_files_limit= 0; char *xb_plugin_dir; @@ -383,8 +351,6 @@ TYPELIB query_type_typelib= {array_elements(query_type_names) - 1, "", ulong opt_lock_wait_query_type; ulong opt_kill_long_query_type; -ulong opt_decrypt_algo = 0; - uint opt_kill_long_queries_timeout = 0; uint opt_lock_wait_timeout = 0; uint opt_lock_wait_threshold = 0; @@ -392,7 +358,6 @@ uint opt_debug_sleep_before_unlock = 0; uint opt_safe_slave_backup_timeout = 0; const char *opt_history = NULL; -my_bool opt_decrypt = FALSE; #if defined(HAVE_OPENSSL) my_bool opt_ssl_verify_server_cert = FALSE; @@ -496,7 +461,6 @@ enum options_xtrabackup OPT_XTRA_TARGET_DIR = 1000, /* make sure it is larger than OPT_MAX_CLIENT_OPTION */ OPT_XTRA_BACKUP, - OPT_XTRA_STATS, OPT_XTRA_PREPARE, OPT_XTRA_EXPORT, OPT_XTRA_APPLY_LOG_ONLY, @@ -508,7 +472,6 @@ enum options_xtrabackup OPT_XTRA_INCREMENTAL_BASEDIR, OPT_XTRA_EXTRA_LSNDIR, OPT_XTRA_INCREMENTAL_DIR, - OPT_XTRA_ARCHIVED_TO_LSN, OPT_XTRA_TABLES, OPT_XTRA_TABLES_FILE, OPT_XTRA_DATABASES, @@ -519,11 +482,6 @@ enum options_xtrabackup OPT_XTRA_COMPRESS, OPT_XTRA_COMPRESS_THREADS, OPT_XTRA_COMPRESS_CHUNK_SIZE, - OPT_XTRA_ENCRYPT, - OPT_XTRA_ENCRYPT_KEY, - OPT_XTRA_ENCRYPT_KEY_FILE, - OPT_XTRA_ENCRYPT_THREADS, - OPT_XTRA_ENCRYPT_CHUNK_SIZE, OPT_LOG, OPT_INNODB, OPT_INNODB_CHECKSUMS, @@ -536,8 +494,6 @@ enum options_xtrabackup OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, OPT_INNODB_FLUSH_METHOD, OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, - OPT_INNODB_LOG_ARCH_DIR, - OPT_INNODB_LOG_ARCHIVE, OPT_INNODB_LOG_GROUP_HOME_DIR, OPT_INNODB_MAX_DIRTY_PAGES_PCT, OPT_INNODB_MAX_PURGE_LAG, @@ -564,15 +520,11 @@ enum options_xtrabackup OPT_INNODB_LOG_BUFFER_SIZE, OPT_INNODB_LOG_FILE_SIZE, OPT_INNODB_LOG_FILES_IN_GROUP, - OPT_INNODB_MIRRORED_LOG_GROUPS, OPT_INNODB_OPEN_FILES, OPT_INNODB_SYNC_SPIN_LOOPS, OPT_INNODB_THREAD_CONCURRENCY, OPT_INNODB_THREAD_SLEEP_DELAY, OPT_XTRA_DEBUG_SYNC, - OPT_XTRA_COMPACT, - OPT_XTRA_REBUILD_INDEXES, - OPT_XTRA_REBUILD_THREADS, OPT_INNODB_CHECKSUM_ALGORITHM, OPT_INNODB_UNDO_DIRECTORY, OPT_INNODB_UNDO_TABLESPACES, @@ -599,7 +551,6 @@ enum options_xtrabackup OPT_DECOMPRESS, OPT_INCREMENTAL_HISTORY_NAME, OPT_INCREMENTAL_HISTORY_UUID, - OPT_DECRYPT, OPT_REMOVE_ORIGINAL, OPT_LOCK_WAIT_QUERY_TYPE, OPT_KILL_LONG_QUERY_TYPE, @@ -630,9 +581,6 @@ struct my_option xb_client_options[] = {"backup", OPT_XTRA_BACKUP, "take backup to target-dir", (G_PTR*) &xtrabackup_backup, (G_PTR*) &xtrabackup_backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"stats", OPT_XTRA_STATS, "calc statistic of datadir (offline mysqld is recommended)", - (G_PTR*) &xtrabackup_stats, (G_PTR*) &xtrabackup_stats, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"prepare", OPT_XTRA_PREPARE, "prepare a backup for starting mysql server on the backup.", (G_PTR*) &xtrabackup_prepare, (G_PTR*) &xtrabackup_prepare, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -671,10 +619,6 @@ struct my_option xb_client_options[] = {"incremental-dir", OPT_XTRA_INCREMENTAL_DIR, "(for --prepare): apply .delta files and logfile in the specified directory.", (G_PTR*) &xtrabackup_incremental_dir, (G_PTR*) &xtrabackup_incremental_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"to-archived-lsn", OPT_XTRA_ARCHIVED_TO_LSN, - "Don't apply archived logs with bigger log sequence number.", - (G_PTR*) &xtrabackup_archived_to_lsn, (G_PTR*) &xtrabackup_archived_to_lsn, 0, - GET_LL, REQUIRED_ARG, 0, 0, LONGLONG_MAX, 0, 0, 0}, {"tables", OPT_XTRA_TABLES, "filtering by regexp for table names.", (G_PTR*) &xtrabackup_tables, (G_PTR*) &xtrabackup_tables, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -704,11 +648,7 @@ struct my_option xb_client_options[] = {"stream", OPT_XTRA_STREAM, "Stream all backup files to the standard output " "in the specified format." -#ifdef HAVE_LIBARCHIVE - "Supported formats are 'tar' and 'xbstream'." -#else "Supported format is 'xbstream'." -#endif , (G_PTR*) &xtrabackup_stream_str, (G_PTR*) &xtrabackup_stream_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -730,46 +670,6 @@ struct my_option xb_client_options[] = (G_PTR*) &xtrabackup_compress_chunk_size, (G_PTR*) &xtrabackup_compress_chunk_size, 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0}, - {"encrypt", OPT_XTRA_ENCRYPT, "Encrypt individual backup files using the " - "specified encryption algorithm.", - &xtrabackup_encrypt_algo, &xtrabackup_encrypt_algo, - &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-key", OPT_XTRA_ENCRYPT_KEY, "Encryption key to use.", - (G_PTR*) &xtrabackup_encrypt_key, (G_PTR*) &xtrabackup_encrypt_key, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-key-file", OPT_XTRA_ENCRYPT_KEY_FILE, "File which contains encryption key to use.", - (G_PTR*) &xtrabackup_encrypt_key_file, (G_PTR*) &xtrabackup_encrypt_key_file, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - - {"encrypt-threads", OPT_XTRA_ENCRYPT_THREADS, - "Number of threads for parallel data encryption. The default value is 1.", - (G_PTR*) &xtrabackup_encrypt_threads, (G_PTR*) &xtrabackup_encrypt_threads, - 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0}, - - {"encrypt-chunk-size", OPT_XTRA_ENCRYPT_CHUNK_SIZE, - "Size of working buffer(S) for encryption threads in bytes. The default value is 64K.", - (G_PTR*) &xtrabackup_encrypt_chunk_size, (G_PTR*) &xtrabackup_encrypt_chunk_size, - 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0}, - - {"compact", OPT_XTRA_COMPACT, - "Create a compact backup by skipping secondary index pages.", - (G_PTR*) &xtrabackup_compact, (G_PTR*) &xtrabackup_compact, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - - {"rebuild_indexes", OPT_XTRA_REBUILD_INDEXES, - "Rebuild secondary indexes in InnoDB tables after applying the log. " - "Only has effect with --prepare.", - (G_PTR*) &xtrabackup_rebuild_indexes, (G_PTR*) &xtrabackup_rebuild_indexes, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - - {"rebuild_threads", OPT_XTRA_REBUILD_THREADS, - "Use this number of threads to rebuild indexes in a compact backup. " - "Only has effect with --prepare and --rebuild-indexes.", - (G_PTR*) &xtrabackup_rebuild_threads, (G_PTR*) &xtrabackup_rebuild_threads, - 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0}, - {"incremental-force-scan", OPT_XTRA_INCREMENTAL_FORCE_SCAN, "Perform a full-scan incremental backup even in the presence of changed " "page bitmap data", @@ -940,18 +840,6 @@ struct my_option xb_client_options[] = (uchar*) &opt_incremental_history_uuid, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"decrypt", OPT_DECRYPT, "Decrypts all files with the .xbcrypt " - "extension in a backup previously made with --encrypt option.", - &opt_decrypt_algo, &opt_decrypt_algo, - &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, - - {"remove-original", OPT_REMOVE_ORIGINAL, "Remove .qp and .xbcrypt files " - "after decryption and decompression.", - (uchar *) &opt_remove_original, - (uchar *) &opt_remove_original, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE, "This option specifies which types of queries are allowed to complete " "before innobackupex will issue the global lock. Default is all.", @@ -1131,9 +1019,6 @@ Disable with --skip-innodb-doublewrite.", (G_PTR*) &innobase_use_doublewrite, (G_PTR*) &innobase_force_recovery, (G_PTR*) &innobase_force_recovery, 0, GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0}, - {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR, - "Where full logs should be archived.", (G_PTR*) &innobase_log_arch_dir, - (G_PTR*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, "The size of the buffer which InnoDB uses to write log to the log files on disk.", (G_PTR*) &innobase_log_buffer_size, (G_PTR*) &innobase_log_buffer_size, 0, @@ -1453,9 +1338,7 @@ xb_get_one_option(int optid, xtrabackup_target_dir= xtrabackup_real_target_dir; break; case OPT_XTRA_STREAM: - if (!strcasecmp(argument, "tar")) - xtrabackup_stream_fmt = XB_STREAM_FMT_TAR; - else if (!strcasecmp(argument, "xbstream")) + if (!strcasecmp(argument, "xbstream")) xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM; else { @@ -1474,24 +1357,6 @@ xb_get_one_option(int optid, } xtrabackup_compress = TRUE; break; - case OPT_XTRA_ENCRYPT: - if (argument == NULL) - { - msg("Missing --encrypt argument, must specify a valid encryption " - " algorithm.\n"); - return 1; - } - xtrabackup_encrypt = TRUE; - break; - case OPT_DECRYPT: - if (argument == NULL) { - msg("Missing --decrypt argument, must specify a " - "valid encryption algorithm.\n"); - return(1); - } - opt_decrypt = TRUE; - xtrabackup_decrypt_decompress = true; - break; case OPT_DECOMPRESS: opt_decompress = TRUE; xtrabackup_decrypt_decompress = true; @@ -1645,7 +1510,7 @@ innodb_init_param(void) /* Set InnoDB initialization parameters according to the values read from MySQL .cnf file */ - if (xtrabackup_backup || xtrabackup_stats) { + if (xtrabackup_backup) { msg("xtrabackup: using the following InnoDB configuration:\n"); } else { msg("xtrabackup: using the following InnoDB configuration " @@ -1656,7 +1521,7 @@ innodb_init_param(void) /* The default dir for data files is the datadir of MySQL */ - srv_data_home = ((xtrabackup_backup || xtrabackup_stats) && innobase_data_home_dir + srv_data_home = (xtrabackup_backup && innobase_data_home_dir ? innobase_data_home_dir : default_path); msg("xtrabackup: innodb_data_home_dir = %s\n", srv_data_home); @@ -1705,8 +1570,7 @@ mem_free_and_error: /* The default dir for log files is the datadir of MySQL */ - if (!((xtrabackup_backup || xtrabackup_stats) && - srv_log_group_home_dir)) { + if (!(xtrabackup_backup && srv_log_group_home_dir)) { srv_log_group_home_dir = default_path; } if (xtrabackup_prepare && xtrabackup_incremental_dir) { @@ -1736,7 +1600,6 @@ mem_free_and_error: msg("xtrabackup: innodb_log_file_size = %lld\n", (long long int) srv_log_file_size); - srv_log_archive_on = (ulint) innobase_log_archive; srv_log_buffer_size = (ulint) innobase_log_buffer_size; /* We set srv_pool_size here in units of 1 kB. InnoDB internally @@ -1765,11 +1628,8 @@ mem_free_and_error: os_use_large_pages = (ibool) innobase_use_large_pages; os_large_page_size = (ulint) innobase_large_page_size; - - if (!innobase_log_arch_dir) { - static char default_dir[3] = "./"; - srv_arch_dir = default_dir; - } + static char default_dir[3] = "./"; + srv_arch_dir = default_dir; row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout; srv_file_per_table = (my_bool) innobase_file_per_table; @@ -1903,8 +1763,8 @@ error: return(TRUE); } -static my_bool -innodb_end(void) +static void +innodb_end() { srv_fast_shutdown = (ulint) innobase_fast_shutdown; innodb_inited = 0; @@ -1912,9 +1772,7 @@ innodb_end(void) msg("xtrabackup: starting shutdown with innodb_fast_shutdown = %lu\n", srv_fast_shutdown); - if (innobase_shutdown_for_mysql() != DB_SUCCESS) { - goto error; - } + innodb_shutdown(); free(internal_innobase_data_file_path); internal_innobase_data_file_path = NULL; @@ -1925,12 +1783,6 @@ innodb_end(void) // pthread_mutex_destroy(&commit_threads_m); // pthread_mutex_destroy(&commit_cond_m); // pthread_cond_destroy(&commit_cond); - - return(FALSE); - -error: - msg("xtrabackup: innodb_end(): Error occured.\n"); - return(TRUE); } /* ================= common ================= */ @@ -1997,13 +1849,11 @@ xtrabackup_print_metadata(char *buf, size_t buf_len) "from_lsn = " UINT64PF "\n" "to_lsn = " UINT64PF "\n" "last_lsn = " UINT64PF "\n" - "compact = %d\n" "recover_binlog_info = %d\n", metadata_type, metadata_from_lsn, metadata_to_lsn, metadata_last_lsn, - MY_TEST(false), MY_TEST(opt_binlog_info == BINLOG_INFO_LOCKLESS)); } @@ -2166,7 +2016,7 @@ xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info) void xtrabackup_io_throttling(void) { - if (xtrabackup_throttle && (io_ticket--) < 0) { + if (xtrabackup_backup && xtrabackup_throttle && (io_ticket--) < 0) { os_event_reset(wait_throttle); os_event_wait(wait_throttle); } @@ -2409,7 +2259,7 @@ check_if_skip_table( Reads the space flags from a given data file and returns the compressed page size, or 0 if the space is not compressed. */ ulint -xb_get_zip_size(os_file_t file) +xb_get_zip_size(pfs_os_file_t file) { byte *buf; byte *page; @@ -2441,25 +2291,13 @@ xb_get_copy_action(const char *dflt) if (xtrabackup_stream) { if (xtrabackup_compress) { - if (xtrabackup_encrypt) { - action = "Compressing, encrypting and streaming"; - } else { - action = "Compressing and streaming"; - } - } else if (xtrabackup_encrypt) { - action = "Encrypting and streaming"; + action = "Compressing and streaming"; } else { action = "Streaming"; } } else { if (xtrabackup_compress) { - if (xtrabackup_encrypt) { - action = "Compressing and encrypting"; - } else { - action = "Compressing"; - } - } else if (xtrabackup_encrypt) { - action = "Encrypting"; + action = "Compressing"; } else { action = dflt; } @@ -2608,96 +2446,6 @@ skip: return(FALSE); } -static -void -xtrabackup_choose_lsn_offset(lsn_t start_lsn) -{ -#if SUPPORT_PERCONA_5_5 - ulint no, alt_no, expected_no; - ulint blocks_in_group; - lsn_t tmp_offset, end_lsn; - int lsn_chosen = 0; - log_group_t *group; - - start_lsn = ut_uint64_align_down(start_lsn, OS_FILE_LOG_BLOCK_SIZE); - end_lsn = start_lsn + RECV_SCAN_SIZE; - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - if (mysql_server_version < 50500 || mysql_server_version > 50600) { - /* only make sense for Percona Server 5.5 */ - return; - } - - if (server_flavor == FLAVOR_PERCONA_SERVER) { - /* it is Percona Server 5.5 */ - group->alt_offset_chosen = true; - group->lsn_offset = group->lsn_offset_alt; - return; - } - - if (group->lsn_offset_alt == group->lsn_offset || - group->lsn_offset_alt == (lsn_t) -1) { - /* we have only one option */ - return; - } - - no = alt_no = (ulint) -1; - lsn_chosen = 0; - - blocks_in_group = log_block_convert_lsn_to_no( - log_group_get_capacity(group)) - 1; - - /* read log block number from usual offset */ - if (group->lsn_offset < group->file_size * group->n_files && - (log_group_calc_lsn_offset(start_lsn, group) % - UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) { - log_group_read_log_seg(LOG_RECOVER, log_sys->buf, - group, start_lsn, end_lsn); - no = log_block_get_hdr_no(log_sys->buf); - } - - /* read log block number from Percona Server 5.5 offset */ - tmp_offset = group->lsn_offset; - group->lsn_offset = group->lsn_offset_alt; - - if (group->lsn_offset < group->file_size * group->n_files && - (log_group_calc_lsn_offset(start_lsn, group) % - UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) { - log_group_read_log_seg(LOG_RECOVER, log_sys->buf, - group, start_lsn, end_lsn); - alt_no = log_block_get_hdr_no(log_sys->buf); - } - - expected_no = log_block_convert_lsn_to_no(start_lsn); - - ut_a(!(no == expected_no && alt_no == expected_no)); - - group->lsn_offset = tmp_offset; - - if ((no <= expected_no && - ((expected_no - no) % blocks_in_group) == 0) || - ((expected_no | 0x40000000UL) - no) % blocks_in_group == 0) { - /* default offset looks ok */ - ++lsn_chosen; - } - - if ((alt_no <= expected_no && - ((expected_no - alt_no) % blocks_in_group) == 0) || - ((expected_no | 0x40000000UL) - alt_no) % blocks_in_group == 0) { - /* PS 5.5 style offset looks ok */ - ++lsn_chosen; - group->alt_offset_chosen = true; - group->lsn_offset = group->lsn_offset_alt; - } - - /* We are in trouble, because we can not make a - decision to choose one over the other. Die just - like a Buridan's ass */ - ut_a(lsn_chosen == 1); -#endif -} - extern ibool log_block_checksum_is_ok_or_old_format(const byte* block); /*******************************************************//** @@ -2865,8 +2613,6 @@ static my_bool xtrabackup_copy_logfile(lsn_t from_lsn, my_bool is_last) { /* definition from recv_recovery_from_checkpoint_start() */ - log_group_t* group; - lsn_t group_scanned_lsn; lsn_t contiguous_lsn; ut_a(dst_log_file != NULL); @@ -2876,66 +2622,50 @@ xtrabackup_copy_logfile(lsn_t from_lsn, my_bool is_last) /* TODO: We must check the contiguous_lsn still exists in log file.. */ - group = UT_LIST_GET_FIRST(log_sys->log_groups); + bool finished; + lsn_t start_lsn; + lsn_t end_lsn; - while (group) { - bool finished; - lsn_t start_lsn; - lsn_t end_lsn; + /* reference recv_group_scan_log_recs() */ - /* reference recv_group_scan_log_recs() */ - finished = false; + start_lsn = contiguous_lsn; - start_lsn = contiguous_lsn; + do { + end_lsn = start_lsn + RECV_SCAN_SIZE; - while (!finished) { + xtrabackup_io_throttling(); - end_lsn = start_lsn + RECV_SCAN_SIZE; + log_mutex_enter(); - xtrabackup_io_throttling(); - - mutex_enter(&log_sys->mutex); - - log_group_read_log_seg(LOG_RECOVER, log_sys->buf, - group, start_lsn, end_lsn, false); - - if (!xtrabackup_scan_log_recs(group, is_last, - start_lsn, &contiguous_lsn, &group_scanned_lsn, - &finished)) { - goto error; - } + log_group_read_log_seg(LOG_RECOVER, log_sys->buf, + &log_sys->log, start_lsn, end_lsn); - mutex_exit(&log_sys->mutex); + bool success = xtrabackup_scan_log_recs( + &log_sys->log, is_last, + start_lsn, &contiguous_lsn, + &log_sys->log.scanned_lsn, + &finished); - start_lsn = end_lsn; + log_mutex_exit(); + if (!success) { + ds_close(dst_log_file); + msg("xtrabackup: Error: xtrabackup_copy_logfile()" + " failed.\n"); + return(TRUE); } - group->scanned_lsn = group_scanned_lsn; - - msg_ts(">> log scanned up to (" LSN_PF ")\n", - group->scanned_lsn); - - group = UT_LIST_GET_NEXT(log_groups, group); - - /* update global variable*/ - log_copy_scanned_lsn = group_scanned_lsn; - - /* innodb_mirrored_log_groups must be 1, no other groups */ - ut_a(group == NULL); - - debug_sync_point("xtrabackup_copy_logfile_pause"); + start_lsn = end_lsn; + } while (!finished); - } + msg_ts(">> log scanned up to (" LSN_PF ")\n", + log_sys->log.scanned_lsn); + /* update global variable*/ + log_copy_scanned_lsn = log_sys->log.scanned_lsn; + debug_sync_point("xtrabackup_copy_logfile_pause"); return(FALSE); - -error: - mutex_exit(&log_sys->mutex); - ds_close(dst_log_file); - msg("xtrabackup: Error: xtrabackup_copy_logfile() failed.\n"); - return(TRUE); } static @@ -3103,14 +2833,6 @@ files first, and then streams them in a serialized way when closed. */ static void xtrabackup_init_datasinks(void) { - if (xtrabackup_parallel > 1 && xtrabackup_stream && - xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) { - msg("xtrabackup: warning: the --parallel option does not have " - "any effect when streaming in the 'tar' format. " - "You can use the 'xbstream' format instead.\n"); - xtrabackup_parallel = 1; - } - /* Start building out the pipelines from the terminus back */ if (xtrabackup_stream) { /* All streaming goes to stdout */ @@ -3128,52 +2850,17 @@ xtrabackup_init_datasinks(void) /* Stream formatting */ if (xtrabackup_stream) { ds_ctxt_t *ds; - if (xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) { - ds = ds_create(xtrabackup_target_dir, DS_TYPE_ARCHIVE); - } else if (xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM) { - ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM); - } else { - /* bad juju... */ - ds = NULL; - } + + ut_a(xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM); + ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM); xtrabackup_add_datasink(ds); ds_set_pipe(ds, ds_data); ds_data = ds; - if (xtrabackup_stream_fmt != XB_STREAM_FMT_XBSTREAM) { - - /* 'tar' does not allow parallel streams */ - ds_redo = ds_meta = ds_create(xtrabackup_target_dir, - DS_TYPE_TMPFILE); - xtrabackup_add_datasink(ds_meta); - ds_set_pipe(ds_meta, ds); - } else { - ds_redo = ds_meta = ds_data; - } - } - - /* Encryption */ - if (xtrabackup_encrypt) { - ds_ctxt_t *ds; - - - - ds = ds_create(xtrabackup_target_dir, DS_TYPE_ENCRYPT); - xtrabackup_add_datasink(ds); - - ds_set_pipe(ds, ds_data); - if (ds_data != ds_meta) { - ds_data = ds; - ds = ds_create(xtrabackup_target_dir, DS_TYPE_ENCRYPT); - xtrabackup_add_datasink(ds); - ds_set_pipe(ds, ds_meta); - ds_redo = ds_meta = ds; - } else { - ds_redo = ds_data = ds_meta = ds; - } + ds_redo = ds_meta = ds_data; } /* Compression for ds_data and ds_redo */ @@ -3268,13 +2955,12 @@ xb_fil_io_init(void) Populates the tablespace memory cache by scanning for and opening data files. @returns DB_SUCCESS or error code.*/ static -ulint -xb_load_tablespaces(void) -/*=====================*/ +dberr_t +xb_load_tablespaces() { ulint i; - ibool create_new_db; - ulint err; + bool create_new_db; + dberr_t err; ulint sum_of_new_sizes; lsn_t min_arch_logno, max_arch_logno; @@ -3289,7 +2975,7 @@ xb_load_tablespaces(void) err = open_or_create_data_files(&create_new_db, &min_arch_logno, &max_arch_logno, - &min_flushed_lsn, &max_flushed_lsn, + &flushed_lsn, &sum_of_new_sizes); if (err != DB_SUCCESS) { msg("xtrabackup: Could not open or create data files.\n" @@ -3345,9 +3031,9 @@ xb_load_tablespaces(void) Initialize the tablespace memory cache and populate it by scanning for and opening data files. @returns DB_SUCCESS or error code.*/ -ulint -xb_data_files_init(void) -/*====================*/ +static +dberr_t +xb_data_files_init() { xb_fil_io_init(); @@ -3356,9 +3042,9 @@ xb_data_files_init(void) /************************************************************************ Destroy the tablespace memory cache. */ +static void -xb_data_files_close(void) -/*====================*/ +xb_data_files_close() { ulint i; @@ -3769,7 +3455,6 @@ open_or_create_log_file( ibool log_file_has_been_opened,/*!< in: TRUE if a log file has been opened before: then it is an error to try to create another log file */ - ulint k, /*!< in: log group number */ ulint i) /*!< in: log file number in group */ { ibool ret; @@ -3779,8 +3464,6 @@ open_or_create_log_file( UT_NOT_USED(create_new_db); UT_NOT_USED(log_file_has_been_opened); - UT_NOT_USED(k); - ut_ad(k == 0); *log_file_created = FALSE; @@ -3828,20 +3511,14 @@ open_or_create_log_file( which is for this log group */ fil_space_create(name, - 2 * k + SRV_LOG_SPACE_FIRST_ID, 0, FIL_LOG, 0, 0); + SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, 0, 0); + log_init(srv_n_log_files, srv_log_file_size * UNIV_PAGE_SIZE); } ut_a(fil_validate()); ut_a(fil_node_create(name, (ulint)srv_log_file_size, - 2 * k + SRV_LOG_SPACE_FIRST_ID, FALSE)); - if (i == 0) { - log_group_init(k, srv_n_log_files, - srv_log_file_size * UNIV_PAGE_SIZE, - 2 * k + SRV_LOG_SPACE_FIRST_ID, - SRV_LOG_SPACE_FIRST_ID + 1); /* dummy arch - space id */ - } + SRV_LOG_SPACE_FIRST_ID, FALSE)); return(DB_SUCCESS); } @@ -3935,7 +3612,7 @@ xtrabackup_backup_func(void) lsn_t latest_cp; uint i; uint count; - os_ib_mutex_t count_mutex; + pthread_mutex_t count_mutex; data_thread_ctxt_t *data_threads; #ifdef USE_POSIX_FADVISE @@ -3959,6 +3636,7 @@ xtrabackup_backup_func(void) mysql_data_home[0]=FN_CURLIB; // all paths are relative from here mysql_data_home[1]=0; + srv_n_purge_threads = 1; srv_read_only_mode = TRUE; srv_backup_mode = TRUE; @@ -4054,13 +3732,13 @@ xtrabackup_backup_func(void) xb_fil_io_init(); - log_init(); + log_sys_init(); lock_sys_create(srv_lock_table_size); for (i = 0; i < srv_n_log_files; i++) { err = open_or_create_log_file(FALSE, &log_file_created, - log_opened, 0, i); + log_opened, i); if (err != DB_SUCCESS) { //return((int) err); @@ -4115,72 +3793,60 @@ xtrabackup_backup_func(void) fil_system_t* f_system = fil_system; /* definition from recv_recovery_from_checkpoint_start() */ - log_group_t* max_cp_group; ulint max_cp_field; - byte* buf; - byte* log_hdr_buf_; - byte* log_hdr_buf; - ulint err; /* start back ground thread to copy newer log */ os_thread_id_t log_copying_thread_id; datafiles_iter_t *it; - log_hdr_buf_ = static_cast<byte *> - (ut_malloc(LOG_FILE_HDR_SIZE + UNIV_PAGE_SIZE_MAX)); - log_hdr_buf = static_cast<byte *> - (ut_align(log_hdr_buf_, UNIV_PAGE_SIZE_MAX)); - /* get current checkpoint_lsn */ /* Look for the latest checkpoint from any of the log groups */ - mutex_enter(&log_sys->mutex); + log_mutex_enter(); - err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field); + dberr_t err = recv_find_max_checkpoint(&max_cp_field); if (err != DB_SUCCESS) { + exit(EXIT_FAILURE); + } - ut_free(log_hdr_buf_); + if (log_sys->log.format == 0) { +old_format: + msg("xtrabackup: Error: cannot process redo log" + " before MariaDB 10.2.2\n"); exit(EXIT_FAILURE); } - log_group_read_checkpoint_info(max_cp_group, max_cp_field); - buf = log_sys->checkpoint_buf; + ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + & ~LOG_HEADER_FORMAT_ENCRYPTED)); + + const byte* buf = log_sys->checkpoint_buf; checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO); - mutex_exit(&log_sys->mutex); - reread_log_header: - fil_io(OS_FILE_READ | OS_FILE_LOG, true, max_cp_group->space_id, - 0, - 0, 0, LOG_FILE_HDR_SIZE, - log_hdr_buf, max_cp_group, NULL); - - /* check consistency of log file header to copy */ - mutex_enter(&log_sys->mutex); - - err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field); + err = recv_find_max_checkpoint(&max_cp_field); - if (err != DB_SUCCESS) { + if (err != DB_SUCCESS) { + exit(EXIT_FAILURE); + } - ut_free(log_hdr_buf_); - exit(EXIT_FAILURE); - } + if (log_sys->log.format == 0) { + goto old_format; + } - log_group_read_checkpoint_info(max_cp_group, max_cp_field); - buf = log_sys->checkpoint_buf; + ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + & ~LOG_HEADER_FORMAT_ENCRYPTED)); if(checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) { checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO); - mutex_exit(&log_sys->mutex); goto reread_log_header; } - mutex_exit(&log_sys->mutex); + log_mutex_exit(); xtrabackup_init_datasinks(); @@ -4227,10 +3893,6 @@ reread_log_header: &io_watching_thread_id); } - mutex_enter(&log_sys->mutex); - xtrabackup_choose_lsn_offset(checkpoint_lsn_start); - mutex_exit(&log_sys->mutex); - /* copy log file by current position */ if(xtrabackup_copy_logfile(checkpoint_lsn_start, FALSE)) exit(EXIT_FAILURE); @@ -4243,7 +3905,7 @@ reread_log_header: err = xb_load_tablespaces(); if (err != DB_SUCCESS) { msg("xtrabackup: error: xb_load_tablespaces() failed with" - "error code %lu\n", err); + "error code %u\n", err); exit(EXIT_FAILURE); } @@ -4320,35 +3982,24 @@ reread_log_header: } /* read the latest checkpoint lsn */ - latest_cp = 0; { - log_group_t* max_cp_group; ulint max_cp_field; - ulint err; - - mutex_enter(&log_sys->mutex); - err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field); + log_mutex_enter(); - if (err != DB_SUCCESS) { + if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS + && log_sys->log.format != 0) { + latest_cp = mach_read_from_8(log_sys->checkpoint_buf + + LOG_CHECKPOINT_LSN); + msg("xtrabackup: The latest check point" + " (for incremental): '" LSN_PF "'\n", latest_cp); + } else { + latest_cp = 0; msg("xtrabackup: Error: recv_find_max_checkpoint() failed.\n"); - mutex_exit(&log_sys->mutex); - goto skip_last_cp; } - - log_group_read_checkpoint_info(max_cp_group, max_cp_field); - - xtrabackup_choose_lsn_offset(checkpoint_lsn_start); - - latest_cp = mach_read_from_8(log_sys->checkpoint_buf + - LOG_CHECKPOINT_LSN); - - mutex_exit(&log_sys->mutex); - - msg("xtrabackup: The latest check point (for incremental): " - "'" LSN_PF "'\n", latest_cp); + log_mutex_exit(); } -skip_last_cp: + /* stop log_copying_thread */ log_copying = FALSE; os_event_set(log_copying_stop); @@ -4421,437 +4072,12 @@ skip_last_cp: } } -/* ================= stats ================= */ -static my_bool -xtrabackup_stats_level( - dict_index_t* index, - ulint level) -{ - ulint space; - page_t* page; - - rec_t* node_ptr; - - ulint right_page_no; - - page_cur_t cursor; - - mtr_t mtr; - mem_heap_t* heap = mem_heap_create(256); - - ulint* offsets = NULL; - - ulonglong n_pages, n_pages_extern; - ulonglong sum_data, sum_data_extern; - ulonglong n_recs; - ulint page_size; - buf_block_t* block; - ulint zip_size; - - n_pages = sum_data = n_recs = 0; - n_pages_extern = sum_data_extern = 0; - - - if (level == 0) - fprintf(stdout, " leaf pages: "); - else - fprintf(stdout, " level %lu pages: ", level); - - mtr_start(&mtr); - - mtr_x_lock(&(index->lock), &mtr); - block = btr_root_block_get(index, RW_X_LATCH, &mtr); - page = buf_block_get_frame(block); - - space = page_get_space_id(page); - zip_size = fil_space_get_zip_size(space); - - while (level != btr_page_get_level(page, &mtr)) { - - ut_a(space == buf_block_get_space(block)); - ut_a(space == page_get_space_id(page)); - ut_a(!page_is_leaf(page)); - - page_cur_set_before_first(block, &cursor); - page_cur_move_to_next(&cursor); - - node_ptr = page_cur_get_rec(&cursor); - offsets = rec_get_offsets(node_ptr, index, offsets, - ULINT_UNDEFINED, &heap); - block = btr_node_ptr_get_child(node_ptr, index, offsets, &mtr); - page = buf_block_get_frame(block); - } - -loop: - mem_heap_empty(heap); - offsets = NULL; - mtr_x_lock(&(index->lock), &mtr); - - right_page_no = btr_page_get_next(page, &mtr); - - - /*=================================*/ - //fprintf(stdout, "%lu ", (ulint) buf_frame_get_page_no(page)); - - n_pages++; - sum_data += page_get_data_size(page); - n_recs += page_get_n_recs(page); - - - if (level == 0) { - page_cur_t cur; - ulint n_fields; - ulint i; - mem_heap_t* local_heap = NULL; - ulint offsets_[REC_OFFS_NORMAL_SIZE]; - ulint* local_offsets = offsets_; - - *offsets_ = (sizeof offsets_) / sizeof *offsets_; - - page_cur_set_before_first(block, &cur); - page_cur_move_to_next(&cur); - - for (;;) { - if (page_cur_is_after_last(&cur)) { - break; - } - - local_offsets = rec_get_offsets(cur.rec, index, local_offsets, - ULINT_UNDEFINED, &local_heap); - n_fields = rec_offs_n_fields(local_offsets); - - for (i = 0; i < n_fields; i++) { - if (rec_offs_nth_extern(local_offsets, i)) { - page_t* local_page; - ulint space_id; - ulint page_no; - ulint offset; - byte* blob_header; - ulint part_len; - mtr_t local_mtr; - ulint local_len; - byte* data; - buf_block_t* local_block; - - data = rec_get_nth_field(cur.rec, local_offsets, i, &local_len); - - ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE); - local_len -= BTR_EXTERN_FIELD_REF_SIZE; - - space_id = mach_read_from_4(data + local_len + BTR_EXTERN_SPACE_ID); - page_no = mach_read_from_4(data + local_len + BTR_EXTERN_PAGE_NO); - offset = mach_read_from_4(data + local_len + BTR_EXTERN_OFFSET); - - if (offset != FIL_PAGE_DATA) - msg("\nWarning: several record may share same external page.\n"); - - for (;;) { - mtr_start(&local_mtr); - - local_block = btr_block_get(space_id, zip_size, page_no, RW_S_LATCH, index, &local_mtr); - local_page = buf_block_get_frame(local_block); - blob_header = local_page + offset; -#define BTR_BLOB_HDR_PART_LEN 0 -#define BTR_BLOB_HDR_NEXT_PAGE_NO 4 - //part_len = btr_blob_get_part_len(blob_header); - part_len = mach_read_from_4(blob_header + BTR_BLOB_HDR_PART_LEN); - - //page_no = btr_blob_get_next_page_no(blob_header); - page_no = mach_read_from_4(blob_header + BTR_BLOB_HDR_NEXT_PAGE_NO); - - offset = FIL_PAGE_DATA; - - - - - /*=================================*/ - //fprintf(stdout, "[%lu] ", (ulint) buf_frame_get_page_no(page)); - - n_pages_extern++; - sum_data_extern += part_len; - - - mtr_commit(&local_mtr); - - if (page_no == FIL_NULL) - break; - } - } - } - - page_cur_move_to_next(&cur); - } - } - - - - - mtr_commit(&mtr); - if (right_page_no != FIL_NULL) { - mtr_start(&mtr); - block = btr_block_get(space, zip_size, right_page_no, - RW_X_LATCH, index, &mtr); - page = buf_block_get_frame(block); - goto loop; - } - mem_heap_free(heap); - - if (zip_size) { - page_size = zip_size; - } else { - page_size = UNIV_PAGE_SIZE; - } - - if (level == 0) - fprintf(stdout, "recs=%llu, ", n_recs); - - fprintf(stdout, "pages=%llu, data=%llu bytes, data/pages=%lld%%", - n_pages, sum_data, - ((sum_data * 100)/ page_size)/n_pages); - - - if (level == 0 && n_pages_extern) { - putc('\n', stdout); - /* also scan blob pages*/ - fprintf(stdout, " external pages: "); - - fprintf(stdout, "pages=%llu, data=%llu bytes, data/pages=%lld%%", - n_pages_extern, sum_data_extern, - ((sum_data_extern * 100)/ page_size)/n_pages_extern); - } - - putc('\n', stdout); - - if (level > 0) { - xtrabackup_stats_level(index, level - 1); - } - - return(TRUE); -} - -static void -xtrabackup_stats_func(int argc, char **argv) -{ - ulint n; - - /* cd to datadir */ - - if (my_setwd(mysql_real_data_home,MYF(MY_WME))) - { - msg("xtrabackup: cannot my_setwd %s\n", mysql_real_data_home); - exit(EXIT_FAILURE); - } - msg("xtrabackup: cd to %s\n", mysql_real_data_home); - encryption_plugin_prepare_init(argc, argv); - mysql_data_home= mysql_data_home_buff; - mysql_data_home[0]=FN_CURLIB; // all paths are relative from here - mysql_data_home[1]=0; - - /* set read only */ - srv_read_only_mode = TRUE; - - /* initialize components */ - if(innodb_init_param()) - exit(EXIT_FAILURE); - - /* Check if the log files have been created, otherwise innodb_init() - will crash when called with srv_read_only == TRUE */ - for (n = 0; n < srv_n_log_files; n++) { - char logname[FN_REFLEN]; - ibool exists; - os_file_type_t type; - - snprintf(logname, sizeof(logname), "%s%c%s%lu", - srv_log_group_home_dir, SRV_PATH_SEPARATOR, - "ib_logfile", (ulong) n); - srv_normalize_path_for_win(logname); - - if (!os_file_status(logname, &exists, &type) || !exists || - type != OS_FILE_TYPE_FILE) { - msg("xtrabackup: Error: " - "Cannot find log file %s.\n", logname); - msg("xtrabackup: Error: " - "to use the statistics feature, you need a " - "clean copy of the database including " - "correctly sized log files, so you need to " - "execute with --prepare twice to use this " - "functionality on a backup.\n"); - exit(EXIT_FAILURE); - } - } - - msg("xtrabackup: Starting 'read-only' InnoDB instance to gather " - "index statistics.\n" - "xtrabackup: Using %lld bytes for buffer pool (set by " - "--use-memory parameter)\n", xtrabackup_use_memory); - - if(innodb_init()) - exit(EXIT_FAILURE); - - xb_filters_init(); - - fprintf(stdout, "\n\n<INDEX STATISTICS>\n"); - - /* gather stats */ - - { - dict_table_t* sys_tables; - dict_index_t* sys_index; - dict_table_t* table; - btr_pcur_t pcur; - rec_t* rec; - byte* field; - ulint len; - mtr_t mtr; - - /* Enlarge the fatal semaphore wait timeout during the InnoDB table - monitor printout */ - - os_increment_counter_by_amount(server_mutex, - srv_fatal_semaphore_wait_threshold, - 72000); - - mutex_enter(&(dict_sys->mutex)); - - mtr_start(&mtr); - - sys_tables = dict_table_get_low("SYS_TABLES"); - sys_index = UT_LIST_GET_FIRST(sys_tables->indexes); - - btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur, - TRUE, 0, &mtr); -loop: - btr_pcur_move_to_next_user_rec(&pcur, &mtr); - - rec = btr_pcur_get_rec(&pcur); - - if (!btr_pcur_is_on_user_rec(&pcur)) - { - /* end of index */ - - btr_pcur_close(&pcur); - mtr_commit(&mtr); - - mutex_exit(&(dict_sys->mutex)); - - /* Restore the fatal semaphore wait timeout */ - os_increment_counter_by_amount(server_mutex, - srv_fatal_semaphore_wait_threshold, - -72000); - - goto end; - } - - field = rec_get_nth_field_old(rec, 0, &len); - - if (!rec_get_deleted_flag(rec, 0)) { - - /* We found one */ - - char* table_name = mem_strdupl((char*) field, len); - - btr_pcur_store_position(&pcur, &mtr); - - mtr_commit(&mtr); - - table = dict_table_get_low(table_name); - mem_free(table_name); - - if (table && check_if_skip_table(table->name)) - goto skip; - - - if (table == NULL) { - fputs("InnoDB: Failed to load table ", stderr); - ut_print_namel(stderr, NULL, TRUE, (char*) field, len); - putc('\n', stderr); - } else { - dict_index_t* index; - - /* The table definition was corrupt if there - is no index */ - - if (dict_table_get_first_index(table)) { - dict_stats_update_transient(table); - } - - //dict_table_print_low(table); - - index = UT_LIST_GET_FIRST(table->indexes); - while (index != NULL) { -{ - ib_int64_t n_vals; - - if (index->n_user_defined_cols > 0) { - n_vals = index->stat_n_diff_key_vals[ - index->n_user_defined_cols]; - } else { - n_vals = index->stat_n_diff_key_vals[1]; - } - - fprintf(stdout, - " table: %s, index: %s, space id: %lu, root page: %lu" - ", zip size: %lu" - "\n estimated statistics in dictionary:\n" - " key vals: %lu, leaf pages: %lu, size pages: %lu\n" - " real statistics:\n", - table->name, index->name, - (ulong) index->space, - (ulong) index->page, - (ulong) fil_space_get_zip_size(index->space), - (ulong) n_vals, - (ulong) index->stat_n_leaf_pages, - (ulong) index->stat_index_size); - - { - mtr_t local_mtr; - page_t* root; - ulint page_level; - - mtr_start(&local_mtr); - - mtr_x_lock(&(index->lock), &local_mtr); - root = btr_root_get(index, &local_mtr); - page_level = btr_page_get_level(root, &local_mtr); - - xtrabackup_stats_level(index, page_level); - - mtr_commit(&local_mtr); - } - - putc('\n', stdout); -} - index = UT_LIST_GET_NEXT(indexes, index); - } - } - -skip: - mtr_start(&mtr); - - btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr); - } - - goto loop; - } - -end: - putc('\n', stdout); - - fflush(stdout); - - xb_filters_free(); - - /* shutdown InnoDB */ - if(innodb_end()) - exit(EXIT_FAILURE); -} - /* ================= prepare ================= */ static my_bool xtrabackup_init_temp_log(void) { - os_file_t src_file = XB_FILE_UNDEFINED; + pfs_os_file_t src_file; char src_path[FN_REFLEN]; char dst_path[FN_REFLEN]; ibool success; @@ -5178,7 +4404,7 @@ xb_space_create_file( ulint space_id, /*!<in: space id */ ulint flags __attribute__((unused)),/*!<in: tablespace flags */ - os_file_t* file) /*!<out: file handle */ + pfs_os_file_t* file) /*!<out: file handle */ { ibool ret; byte* buf; @@ -5257,7 +4483,7 @@ mismatching ID, renames it to xtrabackup_tmp_#ID.ibd. If there was no matching file, creates a new tablespace. @return file handle of matched or created file */ static -os_file_t +pfs_os_file_t xb_delta_open_matching_space( const char* dbname, /* in: path to destination database dir */ const char* name, /* in: name of delta file (without .delta) */ @@ -5271,7 +4497,7 @@ xb_delta_open_matching_space( char dest_space_name[FN_REFLEN]; ibool ok; fil_space_t* fil_space; - os_file_t file = 0; + pfs_os_file_t file; ulint tablespace_flags; xb_filter_entry_t* table; @@ -5435,8 +4661,8 @@ xtrabackup_apply_delta( including the .delta extension */ void* /*data*/) { - os_file_t src_file = XB_FILE_UNDEFINED; - os_file_t dst_file = XB_FILE_UNDEFINED; + pfs_os_file_t src_file; + pfs_os_file_t dst_file; char src_path[FN_REFLEN]; char dst_path[FN_REFLEN]; char meta_path[FN_REFLEN]; @@ -5810,7 +5036,7 @@ xtrabackup_apply_deltas() static my_bool xtrabackup_close_temp_log(my_bool clear_flag) { - os_file_t src_file = XB_FILE_UNDEFINED; + pfs_os_file_t src_file; char src_path[FN_REFLEN]; char dst_path[FN_REFLEN]; ibool success; @@ -6223,74 +5449,6 @@ xb_export_cfg_write( } -/********************************************************************//** -Searches archived log files in archived log directory. The min and max -LSN's of found files as well as archived log file size are stored in -xtrabackup_arch_first_file_lsn, xtrabackup_arch_last_file_lsn and -xtrabackup_arch_file_size respectively. -@return true on success -*/ -static -bool -xtrabackup_arch_search_files( -/*=========================*/ - ib_uint64_t start_lsn) /*!< in: filter out log files - witch does not contain data - with lsn < start_lsn */ -{ - os_file_dir_t dir; - os_file_stat_t fileinfo; - ut_ad(innobase_log_arch_dir); - - dir = os_file_opendir(innobase_log_arch_dir, FALSE); - if (!dir) { - msg("xtrabackup: error: cannot open archived log directory %s\n", - innobase_log_arch_dir); - return false; - } - - while(!os_file_readdir_next_file(innobase_log_arch_dir, - dir, - &fileinfo) ) { - lsn_t log_file_lsn; - char* log_str_end_lsn_ptr; - - if (strncmp(fileinfo.name, - IB_ARCHIVED_LOGS_PREFIX, - sizeof(IB_ARCHIVED_LOGS_PREFIX) - 1)) { - continue; - } - - log_file_lsn = strtoll(fileinfo.name + - sizeof(IB_ARCHIVED_LOGS_PREFIX) - 1, - &log_str_end_lsn_ptr, 10); - - if (*log_str_end_lsn_ptr) { - continue; - } - - if (log_file_lsn + (fileinfo.size - LOG_FILE_HDR_SIZE) < start_lsn) { - continue; - } - - if (!xtrabackup_arch_first_file_lsn || - log_file_lsn < xtrabackup_arch_first_file_lsn) { - xtrabackup_arch_first_file_lsn = log_file_lsn; - } - if (log_file_lsn > xtrabackup_arch_last_file_lsn) { - xtrabackup_arch_last_file_lsn = log_file_lsn; - } - - //TODO: find the more suitable way to extract archived log file - //size - if (fileinfo.size > (ib_int64_t)xtrabackup_arch_file_size) { - xtrabackup_arch_file_size = fileinfo.size; - } - } - - return xtrabackup_arch_first_file_lsn != 0; -} - static void innodb_free_param() @@ -6356,8 +5514,7 @@ xtrabackup_prepare_func(int argc, char ** argv) xtrabackup_target_dir[1]=0; /* - read metadata of target, we don't need metadata reading in the case - archived logs applying + read metadata of target */ sprintf(metadata_path, "%s/%s", xtrabackup_target_dir, XTRABACKUP_METADATA_FILENAME); @@ -6368,42 +5525,38 @@ xtrabackup_prepare_func(int argc, char ** argv) exit(EXIT_FAILURE); } - if (!innobase_log_arch_dir) - { - if (!strcmp(metadata_type, "full-backuped")) { - msg("xtrabackup: This target seems to be not prepared " - "yet.\n"); - } else if (!strcmp(metadata_type, "log-applied")) { - msg("xtrabackup: This target seems to be already " - "prepared with --apply-log-only.\n"); - goto skip_check; - } else if (!strcmp(metadata_type, "full-prepared")) { - msg("xtrabackup: This target seems to be already " - "prepared.\n"); - } else { - msg("xtrabackup: This target seems not to have correct " - "metadata...\n"); - exit(EXIT_FAILURE); - } + if (!strcmp(metadata_type, "full-backuped")) { + msg("xtrabackup: This target seems to be not prepared yet.\n"); + } else if (!strcmp(metadata_type, "log-applied")) { + msg("xtrabackup: This target seems to be already " + "prepared with --apply-log-only.\n"); + goto skip_check; + } else if (!strcmp(metadata_type, "full-prepared")) { + msg("xtrabackup: This target seems to be already prepared.\n"); + } else { + msg("xtrabackup: This target seems not to have correct " + "metadata...\n"); + exit(EXIT_FAILURE); + } - if (xtrabackup_incremental) { - msg("xtrabackup: error: applying incremental backup " - "needs target prepared with --apply-log-only.\n"); - exit(EXIT_FAILURE); - } + if (xtrabackup_incremental) { + msg("xtrabackup: error: applying incremental backup " + "needs target prepared with --apply-log-only.\n"); + exit(EXIT_FAILURE); + } skip_check: - if (xtrabackup_incremental - && metadata_to_lsn != incremental_lsn) { - msg("xtrabackup: error: This incremental backup seems " - "not to be proper for the target.\n" - "xtrabackup: Check 'to_lsn' of the target and " - "'from_lsn' of the incremental.\n"); - exit(EXIT_FAILURE); - } + if (xtrabackup_incremental + && metadata_to_lsn != incremental_lsn) { + msg("xtrabackup: error: This incremental backup seems " + "not to be proper for the target.\n" + "xtrabackup: Check 'to_lsn' of the target and " + "'from_lsn' of the incremental.\n"); + exit(EXIT_FAILURE); } /* Create logfiles for recovery from 'xtrabackup_logfile', before start InnoDB */ srv_max_n_threads = 1000; + srv_n_purge_threads = 1; ut_mem_init(); /* temporally dummy value to avoid crash */ srv_page_size_shift = 14; @@ -6421,7 +5574,7 @@ skip_check: xb_filters_init(); - if(!innobase_log_arch_dir && xtrabackup_init_temp_log()) + if (xtrabackup_init_temp_log()) goto error_cleanup; if(innodb_init_param()) { @@ -6430,7 +5583,7 @@ skip_check: xb_normalize_init_values(); - if (xtrabackup_incremental || innobase_log_arch_dir) { + if (xtrabackup_incremental) { err = xb_data_files_init(); if (err != DB_SUCCESS) { msg("xtrabackup: error: xb_data_files_init() failed " @@ -6447,7 +5600,7 @@ skip_check: goto error_cleanup; } } - if (xtrabackup_incremental || innobase_log_arch_dir) { + if (xtrabackup_incremental) { xb_data_files_close(); } if (xtrabackup_incremental) { @@ -6483,53 +5636,6 @@ skip_check: srv_n_write_io_threads = 4; } - if (innobase_log_arch_dir) { - srv_arch_dir = innobase_log_arch_dir; - srv_archive_recovery = true; - if (xtrabackup_archived_to_lsn) { - if (xtrabackup_archived_to_lsn < metadata_last_lsn) { - msg("xtrabackup: warning: logs applying lsn " - "limit " UINT64PF " is " - "less than metadata last-lsn " UINT64PF - " and will be set to metadata last-lsn value\n", - xtrabackup_archived_to_lsn, - metadata_last_lsn); - xtrabackup_archived_to_lsn = metadata_last_lsn; - } - if (xtrabackup_archived_to_lsn < min_flushed_lsn) { - msg("xtrabackup: error: logs applying " - "lsn limit " UINT64PF " is less than " - "min_flushed_lsn " UINT64PF - ", there is nothing to do\n", - xtrabackup_archived_to_lsn, - min_flushed_lsn); - goto error_cleanup; - } - } - srv_archive_recovery_limit_lsn= xtrabackup_archived_to_lsn; - /* - Unfinished transactions are not rolled back during log applying - as they can be finished at the firther files applyings. - */ - xtrabackup_apply_log_only = srv_apply_log_only = true; - - if (!xtrabackup_arch_search_files(min_flushed_lsn)) { - goto error_cleanup; - } - - /* - Check if last log file last lsn is big enough to overlap - last scanned lsn read from metadata. - */ - if (xtrabackup_arch_last_file_lsn + - xtrabackup_arch_file_size - - LOG_FILE_HDR_SIZE < metadata_last_lsn) { - msg("xtrabackup: error: there are no enough archived logs " - "to apply\n"); - goto error_cleanup; - } - } - msg("xtrabackup: Starting InnoDB instance for recovery.\n" "xtrabackup: Using %lld bytes for buffer pool " "(set by --use-memory parameter)\n", xtrabackup_use_memory); @@ -6591,7 +5697,7 @@ skip_check: if (xtrabackup_export) { msg("xtrabackup: export option is specified.\n"); - os_file_t info_file = XB_FILE_UNDEFINED; + pfs_os_file_t info_file; char info_file_path[FN_REFLEN]; ibool success; char table_name[FN_REFLEN]; @@ -6758,9 +5864,6 @@ next_node: exit(EXIT_FAILURE); } - if (innobase_log_arch_dir) - srv_start_lsn = log_sys->lsn = recv_sys->recovered_lsn; - /* Check whether the log is applied enough or not. */ if ((xtrabackup_incremental && srv_start_lsn < incremental_to_lsn) @@ -6785,8 +5888,7 @@ next_node: xb_write_galera_info(xtrabackup_incremental); #endif - if(innodb_end()) - goto error_cleanup; + innodb_end(); innodb_free_param(); @@ -6872,9 +5974,7 @@ next_node: if(innodb_init()) goto error; - if(innodb_end()) - goto error; - + innodb_end(); innodb_free_param(); } @@ -6936,8 +6036,6 @@ xb_init() if (opt_decompress) { mixed_options[n_mixed_options++] = "--decompress"; - } else if (opt_decrypt) { - mixed_options[n_mixed_options++] = "--decrypt"; } if (xtrabackup_copy_back) { @@ -7400,40 +6498,11 @@ int main(int argc, char **argv) innobase_file_per_table = TRUE; } - if (xtrabackup_incremental && xtrabackup_stream && - xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) { - msg("xtrabackup: error: " - "streaming incremental backups are incompatible with the \n" - "'tar' streaming format. Use --stream=xbstream instead.\n"); - exit(EXIT_FAILURE); - } - - if ((xtrabackup_compress || xtrabackup_encrypt) && xtrabackup_stream && - xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) { - msg("xtrabackup: error: " - "compressed and encrypted backups are incompatible with the \n" - "'tar' streaming format. Use --stream=xbstream instead.\n"); - exit(EXIT_FAILURE); - } - - if (!xtrabackup_prepare && - (innobase_log_arch_dir || xtrabackup_archived_to_lsn)) { - - /* Default my.cnf can contain innobase_log_arch_dir option set - for server, reset it to allow backup. */ - innobase_log_arch_dir= NULL; - xtrabackup_archived_to_lsn= 0; - msg("xtrabackup: warning: " - "as --innodb-log-arch-dir and --to-archived-lsn can be used " - "only with --prepare they will be reset\n"); - } - /* cannot execute both for now */ { int num = 0; if (xtrabackup_backup) num++; - if (xtrabackup_stats) num++; if (xtrabackup_prepare) num++; if (xtrabackup_copy_back) num++; if (xtrabackup_move_back) num++; @@ -7454,10 +6523,6 @@ int main(int argc, char **argv) if (xtrabackup_backup) xtrabackup_backup_func(); - /* --stats */ - if (xtrabackup_stats) - xtrabackup_stats_func(argc_server,server_defaults); - /* --prepare */ if (xtrabackup_prepare) { xtrabackup_prepare_func(argc_server, server_defaults); diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h index 51491ce1f00..eafc848fd43 100644 --- a/extra/mariabackup/xtrabackup.h +++ b/extra/mariabackup/xtrabackup.h @@ -27,7 +27,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "changed_page_bitmap.h" #ifdef __WIN__ -#define XB_FILE_UNDEFINED NULL +#define XB_FILE_UNDEFINED INVALID_HANDLE_VALUE #else #define XB_FILE_UNDEFINED (-1) #endif @@ -81,7 +81,6 @@ extern char *xtrabackup_tables_exclude; extern char *xtrabackup_databases_exclude; extern ibool xtrabackup_compress; -extern ibool xtrabackup_encrypt; extern my_bool xtrabackup_backup; extern my_bool xtrabackup_prepare; @@ -92,15 +91,10 @@ extern my_bool xtrabackup_decrypt_decompress; extern char *innobase_data_file_path; extern char *innobase_doublewrite_file; -extern char *xtrabackup_encrypt_key; -extern char *xtrabackup_encrypt_key_file; extern longlong innobase_log_file_size; extern long innobase_log_files_in_group; extern longlong innobase_page_size; -extern const char *xtrabackup_encrypt_algo_names[]; -extern TYPELIB xtrabackup_encrypt_algo_typelib; - extern int xtrabackup_parallel; extern my_bool xb_close_files; @@ -113,9 +107,6 @@ extern "C"{ #ifdef __cplusplus } #endif -extern ulong xtrabackup_encrypt_algo; -extern uint xtrabackup_encrypt_threads; -extern ulonglong xtrabackup_encrypt_chunk_size; extern my_bool xtrabackup_export; extern char *xtrabackup_incremental_basedir; extern char *xtrabackup_extra_lsndir; @@ -158,8 +149,6 @@ extern TYPELIB query_type_typelib; extern ulong opt_lock_wait_query_type; extern ulong opt_kill_long_query_type; -extern ulong opt_decrypt_algo; - extern uint opt_kill_long_queries_timeout; extern uint opt_lock_wait_timeout; extern uint opt_lock_wait_threshold; @@ -167,7 +156,6 @@ extern uint opt_debug_sleep_before_unlock; extern uint opt_safe_slave_backup_timeout; extern const char *opt_history; -extern my_bool opt_decrypt; enum binlog_info_enum { BINLOG_INFO_OFF, BINLOG_INFO_LOCKLESS, BINLOG_INFO_ON, BINLOG_INFO_AUTO}; @@ -182,19 +170,10 @@ datafiles_iter_t *datafiles_iter_new(fil_system_t *f_system); fil_node_t *datafiles_iter_next(datafiles_iter_t *it); void datafiles_iter_free(datafiles_iter_t *it); -/************************************************************************ -Initialize the tablespace memory cache and populate it by scanning for and -opening data files */ -ulint xb_data_files_init(void); - -/************************************************************************ -Destroy the tablespace memory cache. */ -void xb_data_files_close(void); - /*********************************************************************** Reads the space flags from a given data file and returns the compressed page size, or 0 if the space is not compressed. */ -ulint xb_get_zip_size(os_file_t file); +ulint xb_get_zip_size(pfs_os_file_t file); /************************************************************************ Checks if a table specified as a name in the form "database/name" (InnoDB 5.6) diff --git a/extra/replace.c b/extra/replace.c index a9982670384..b8c328f2902 100644 --- a/extra/replace.c +++ b/extra/replace.c @@ -174,6 +174,7 @@ register char **argv[]; break; case 'V': version=1; + /* fall through */ case 'I': case '?': help=1; /* Help text written */ diff --git a/include/my_sys.h b/include/my_sys.h index dfabda42022..ed5a7200ec8 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -949,6 +949,12 @@ extern ulonglong my_getcputime(void); #define hrtime_sec_part(X) ((ulong)((X).val % HRTIME_RESOLUTION)) #define my_time(X) hrtime_to_time(my_hrtime()) +#if STACK_DIRECTION < 0 +#define available_stack_size(CUR,END) (long) ((char*)(CUR) - (char*)(END)) +#else +#define available_stack_size(CUR,END) (long) ((char*)(END) - (char*)(CUR)) +#endif + #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 3316574a423..d061df71944 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -12,7 +12,8 @@ enum enum_server_command COM_UNIMPLEMENTED, COM_RESET_CONNECTION, COM_MDB_GAP_BEG, - COM_MDB_GAP_END=250, + COM_MDB_GAP_END=249, + COM_STMT_BULK_EXECUTE=250, COM_SLAVE_WORKER=251, COM_SLAVE_IO=252, COM_SLAVE_SQL=253, diff --git a/include/mysql_com.h b/include/mysql_com.h index d24957d1990..6f0a2097f13 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -115,7 +115,8 @@ enum enum_server_command COM_RESET_CONNECTION, /* don't forget to update const char *command_name[] in sql_parse.cc */ COM_MDB_GAP_BEG, - COM_MDB_GAP_END=250, + COM_MDB_GAP_END=249, + COM_STMT_BULK_EXECUTE=250, COM_SLAVE_WORKER=251, COM_SLAVE_IO=252, COM_SLAVE_SQL=253, @@ -136,6 +137,13 @@ enum enum_indicator_type STMT_INDICATOR_IGNORE }; +/* + bulk PS flags +*/ +#define STMT_BULK_FLAG_CLIENT_SEND_TYPES 128 +#define STMT_BULK_FLAG_INSERT_ID_REQUEST 64 + + /* sql type stored in .frm files for virtual fields */ #define MYSQL_TYPE_VIRTUAL 245 /* @@ -311,7 +319,8 @@ enum enum_indicator_type CLIENT_SESSION_TRACK |\ CLIENT_DEPRECATE_EOF |\ CLIENT_CONNECT_ATTRS |\ - MARIADB_CLIENT_COM_MULTI) + MARIADB_CLIENT_COM_MULTI |\ + MARIADB_CLIENT_STMT_BULK_OPERATIONS) /* To be added later: diff --git a/libmariadb b/libmariadb -Subproject 467a193b399dfda41d87368a898e9550d754fa1 +Subproject ff4bfdf1caf2e0a47382fdbe6d796d0e63ec106 diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c index 09d5abbbb6a..09a477d04ee 100644 --- a/libmysqld/libmysql.c +++ b/libmysqld/libmysql.c @@ -1404,7 +1404,7 @@ void set_stmt_errmsg(MYSQL_STMT *stmt, NET *net) DBUG_ASSERT(stmt != 0); stmt->last_errno= net->last_errno; - if (net->last_error && net->last_error[0]) + if (net->last_error[0]) strmov(stmt->last_error, net->last_error); strmov(stmt->sqlstate, net->sqlstate); @@ -4707,8 +4707,7 @@ my_bool STDCALL mysql_stmt_close(MYSQL_STMT *stmt) { uchar buff[MYSQL_STMT_HEADER]; /* 4 bytes - stmt id */ - if ((rc= reset_stmt_handle(stmt, RESET_ALL_BUFFERS | RESET_CLEAR_ERROR))) - return rc; + reset_stmt_handle(stmt, RESET_ALL_BUFFERS | RESET_CLEAR_ERROR); int4store(buff, stmt->stmt_id); if ((rc= stmt_command(mysql, COM_STMT_CLOSE, buff, 4, stmt))) diff --git a/mysql-test/include/innodb_page_size.combinations b/mysql-test/include/innodb_page_size.combinations new file mode 100644 index 00000000000..b9b1e1b106f --- /dev/null +++ b/mysql-test/include/innodb_page_size.combinations @@ -0,0 +1,16 @@ +[64k] +innodb-page-size=64K +innodb-buffer-pool-size=24M + +[32k] +innodb-page-size=32K +innodb-buffer-pool-size=24M + +[16k] +innodb-page-size=16K + +[8k] +innodb-page-size=8K + +[4k] +innodb-page-size=4K diff --git a/mysql-test/include/innodb_page_size.inc b/mysql-test/include/innodb_page_size.inc new file mode 100644 index 00000000000..754b640da86 --- /dev/null +++ b/mysql-test/include/innodb_page_size.inc @@ -0,0 +1,4 @@ +# The goal of including this file is to enable innodb_page_size combinations +# (see include/innodb_page_size.combinations) + +--source include/have_innodb.inc diff --git a/mysql-test/include/innodb_page_size_small.combinations b/mysql-test/include/innodb_page_size_small.combinations new file mode 100644 index 00000000000..a9b7c7ce14d --- /dev/null +++ b/mysql-test/include/innodb_page_size_small.combinations @@ -0,0 +1,8 @@ +[16k] +innodb-page-size=16K + +[8k] +innodb-page-size=8K + +[4k] +innodb-page-size=4K diff --git a/mysql-test/include/innodb_page_size_small.inc b/mysql-test/include/innodb_page_size_small.inc new file mode 100644 index 00000000000..754b640da86 --- /dev/null +++ b/mysql-test/include/innodb_page_size_small.inc @@ -0,0 +1,4 @@ +# The goal of including this file is to enable innodb_page_size combinations +# (see include/innodb_page_size.combinations) + +--source include/have_innodb.inc diff --git a/mysql-test/include/varchar.inc b/mysql-test/include/varchar.inc index 50741130895..7add7113b8d 100644 --- a/mysql-test/include/varchar.inc +++ b/mysql-test/include/varchar.inc @@ -90,6 +90,7 @@ explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' --replace_regex /Duplicate entry '[^']+' for key/Duplicate entry '{ ' for key/ --error ER_DUP_ENTRY alter table t1 add unique(v); +show warnings; alter table t1 add key(v); select concat('*',v,'*',c,'*',t,'*') as qq from t1 where v='a'; --replace_column 6 # 9 # 10 # diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm index 97ace54f0fb..d93d8adf34c 100644 --- a/mysql-test/lib/mtr_report.pm +++ b/mysql-test/lib/mtr_report.pm @@ -208,6 +208,10 @@ sub mtr_report_test ($) { { mtr_report("[ skipped ]"); } + if ( $tinfo->{'warnings'} ) + { + mtr_report($tinfo->{'warnings'}); + } } elsif ($result eq 'MTR_RES_PASSED') { diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 1d41067bb1e..4fc0c90c07d 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -2794,15 +2794,26 @@ sub mysql_server_start($) { } my $mysqld_basedir= $mysqld->value('basedir'); + my $extra_opts= get_extra_opts($mysqld, $tinfo); + if ( $basedir eq $mysqld_basedir ) { if (! $opt_start_dirty) # If dirty, keep possibly grown system db { - # Copy datadir from installed system db - my $path= ($opt_parallel == 1) ? "$opt_vardir" : "$opt_vardir/.."; - my $install_db= "$path/install.db"; - copytree($install_db, $datadir) if -d $install_db; - mtr_error("Failed to copy system db to '$datadir'") unless -d $datadir; + # Some InnoDB options are incompatible with the default bootstrap. + # If they are used, re-bootstrap + if ( $extra_opts and + "@$extra_opts" =~ /--innodb[-_](?:page[-_]size|checksum[-_]algorithm|undo[-_]tablespaces|log[-_]group[-_]home[-_]dir|data[-_]home[-_]dir)/ ) + { + mysql_install_db($mysqld, undef, $extra_opts); + } + else { + # Copy datadir from installed system db + my $path= ($opt_parallel == 1) ? "$opt_vardir" : "$opt_vardir/.."; + my $install_db= "$path/install.db"; + copytree($install_db, $datadir) if -d $install_db; + mtr_error("Failed to copy system db to '$datadir'") unless -d $datadir; + } } } else @@ -2841,7 +2852,6 @@ sub mysql_server_start($) { if (!$opt_embedded_server) { - my $extra_opts= get_extra_opts($mysqld, $tinfo); mysqld_start($mysqld,$extra_opts); # Save this test case information, so next can examine it @@ -3065,7 +3075,7 @@ sub default_mysqld { sub mysql_install_db { - my ($mysqld, $datadir)= @_; + my ($mysqld, $datadir, $extra_opts)= @_; my $install_datadir= $datadir || $mysqld->value('datadir'); my $install_basedir= $mysqld->value('basedir'); @@ -3106,6 +3116,13 @@ sub mysql_install_db { mtr_add_arg($args, $extra_opt); } } + # InnoDB options can come not only from the command line, but also + # from option files or combinations + foreach my $extra_opt ( @$extra_opts ) { + if ($extra_opt =~ /--innodb/) { + mtr_add_arg($args, $extra_opt); + } + } # If DISABLE_GRANT_OPTIONS is defined when the server is compiled (e.g., # configure --disable-grant-options), mysqld will not recognize the @@ -3990,12 +4007,13 @@ sub run_testcase ($$) { { my $res= $test->exit_status(); - if ($res == 0 and $opt_warnings and check_warnings($tinfo) ) + if (($res == 0 or $res == 62) and $opt_warnings and check_warnings($tinfo) ) { - # Test case suceeded, but it has produced unexpected - # warnings, continue in $res == 1 - $res= 1; - resfile_output($tinfo->{'warnings'}) if $opt_resfile; + # If test case suceeded, but it has produced unexpected + # warnings, continue with $res == 1; + # but if the test was skipped, it should remain skipped + $res= 1 if $res == 0; + resfile_output($tinfo->{'warnings'}) if $opt_resfile; } if ( $res == 0 ) diff --git a/mysql-test/r/alter_table_online.result b/mysql-test/r/alter_table_online.result index b3ef9c354f7..54df4e0c96c 100644 --- a/mysql-test/r/alter_table_online.result +++ b/mysql-test/r/alter_table_online.result @@ -184,6 +184,35 @@ CREATE TABLE t1 (a LONGTEXT COLLATE latin1_general_ci); ALTER TABLE t1 MODIFY a LONGTEXT COLLATE latin1_swedish_ci, ALGORITHM=INPLACE; ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY DROP TABLE t1; -# -# End of MDEV-8948 ALTER ... INPLACE does work for BINARY, BLOB -# +select @@global.delay_key_write; +@@global.delay_key_write +ON +create table t1 (a int, b int, key(b)); +flush tables; +flush status; +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 0 +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 0 +alter online table t1 delay_key_write=1; +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 1 +flush tables; +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 2 +alter online table t1 delay_key_write=0; +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 2 +flush tables; +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +Variable_name Value +Feature_delay_key_write 2 +drop table t1; diff --git a/mysql-test/r/analyze_format_json.result b/mysql-test/r/analyze_format_json.result index e5edc1e344b..e077f919aa0 100644 --- a/mysql-test/r/analyze_format_json.result +++ b/mysql-test/r/analyze_format_json.result @@ -614,22 +614,24 @@ ANALYZE }, "block-nl-join": { "table": { - "table_name": "<subquery2>", + "table_name": "t2", "access_type": "ALL", - "possible_keys": ["distinct_key"], "r_loops": 1, "rows": 2, "r_rows": 2, "r_total_time_ms": "REPLACED", "filtered": 100, - "r_filtered": 100 + "r_filtered": 0, + "attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))" }, "buffer_type": "flat", "buffer_size": "256Kb", "join_type": "BNL", - "r_filtered": 100, - "materialized": { - "unique": 1, + "attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))", + "r_filtered": null + }, + "subqueries": [ + { "query_block": { "select_id": 2, "r_loops": 1, @@ -646,24 +648,7 @@ ANALYZE } } } - }, - "block-nl-join": { - "table": { - "table_name": "t2", - "access_type": "ALL", - "r_loops": 1, - "rows": 2, - "r_rows": 2, - "r_total_time_ms": "REPLACED", - "filtered": 100, - "r_filtered": 100 - }, - "buffer_type": "incremental", - "buffer_size": "256Kb", - "join_type": "BNL", - "attached_condition": "t2.b = `<subquery2>`.a", - "r_filtered": 0 - } + ] } } drop table t1,t2; diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 798faaa1198..dca48c07fe9 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -667,7 +667,7 @@ create table t1 (a int); create table t1 select * from t1; ERROR 42S01: Table 't1' already exists create table t2 union = (t1) select * from t1; -ERROR HY000: 'test.t2' is not BASE TABLE +ERROR HY000: 'test.t2' is not of type 'BASE TABLE' flush tables with read lock; unlock tables; drop table t1; @@ -701,7 +701,7 @@ create database mysqltest; use mysqltest; create view v1 as select 'foo' from dual; create table t1 like v1; -ERROR HY000: 'mysqltest.v1' is not BASE TABLE +ERROR HY000: 'mysqltest.v1' is not of type 'BASE TABLE' drop view v1; drop database mysqltest; create database mysqltest; diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 317d8bd05df..262b49e9a60 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -507,6 +507,7 @@ select t.a, count(*) from t1,t where t1.a=t.a group by t.a; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 35 func 1 3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where 3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) explain @@ -522,6 +523,7 @@ where t1.a=t.a group by t.a; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 35 func 1 3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where 3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) # with clause in the specification of a derived table diff --git a/mysql-test/r/derived_view.result b/mysql-test/r/derived_view.result index e986b486bf2..deba9b1f09a 100644 --- a/mysql-test/r/derived_view.result +++ b/mysql-test/r/derived_view.result @@ -2916,5 +2916,64 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 27 deallocate prepare stmt1; drop table t1,t2; +# +# Bug mdev-12670: mergeable derived / view with subqueries +# subject to semi-join optimizations +# (actually this is a 5.3 bug.) +# +create table t1 (a int) engine=myisam; +insert into t1 values (5),(3),(2),(7),(2),(5),(1); +create table t2 (b int, index idx(b)) engine=myisam; +insert into t2 values (2),(3),(2),(1),(3),(4); +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +explain select a from t1 where a in (select b from t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1) +explain select * from (select a from t1 where a in (select b from t2)) t; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1) +create view v1 as select a from t1 where a in (select b from t2); +explain select * from v1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1) +drop view v1; +drop table t1,t2; +# +# Bug mdev-12812: mergeable derived / view with subqueries +# NOT subject to semi-join optimizations +# +CREATE TABLE t1 (c1 varchar(3)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('foo'); +CREATE TABLE t2 (c2 varchar(3)) ENGINE=MyISAM; +INSERT INTO t2 VALUES ('bar'),('qux'),('foo'); +SELECT STRAIGHT_JOIN * +FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq; +c1 +foo +foo +EXPLAIN EXTENDED SELECT STRAIGHT_JOIN * +FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select straight_join `test`.`t1`.`c1` AS `c1` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t2`.`c2` from `test`.`t2` where <cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c2`)) +DROP TABLE t1, t2; set optimizer_switch=@exit_optimizer_switch; set join_cache_level=@exit_join_cache_level; diff --git a/mysql-test/r/flush-innodb.result b/mysql-test/r/flush-innodb.result index bd09f0d62dc..21e5bda7785 100644 --- a/mysql-test/r/flush-innodb.result +++ b/mysql-test/r/flush-innodb.result @@ -60,7 +60,7 @@ DROP TABLE export; CREATE VIEW v1 AS SELECT 1; CREATE TEMPORARY TABLE t1 (a INT); FLUSH TABLES v1 FOR EXPORT; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' FLUSH TABLES t1 FOR EXPORT; ERROR 42S02: Table 'test.t1' doesn't exist FLUSH TABLES non_existent FOR EXPORT; diff --git a/mysql-test/r/flush.result b/mysql-test/r/flush.result index 2c651b71a66..5cd4fde477d 100644 --- a/mysql-test/r/flush.result +++ b/mysql-test/r/flush.result @@ -295,16 +295,16 @@ create view v1 as select 1; create view v2 as select * from t1; create view v3 as select * from v2; flush table v1, v2, v3 with read lock; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' flush table v1 with read lock; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' flush table v2 with read lock; -ERROR HY000: 'test.v2' is not BASE TABLE +ERROR HY000: 'test.v2' is not of type 'BASE TABLE' flush table v3 with read lock; -ERROR HY000: 'test.v3' is not BASE TABLE +ERROR HY000: 'test.v3' is not of type 'BASE TABLE' create temporary table v1 (a int); flush table v1 with read lock; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; create table v1 (a int); flush table v1 with read lock; diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index df953630a8b..3340328cad6 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -2448,6 +2448,15 @@ DROP TABLE t1; # End of 10.1 tests # # +# MDEV-13064: assertion `n < m_size' fails in Item::split_sum_func2() +# +create table t1 (i int) engine=MyISAM; +insert into t1 value (1),(2); +select count(*)+sleep(0) from t1; +count(*)+sleep(0) +2 +drop table t1; +# # Start of 10.3 tests # # diff --git a/mysql-test/r/func_hybrid_type.result b/mysql-test/r/func_hybrid_type.result index ee108c165df..fe45338b36f 100644 --- a/mysql-test/r/func_hybrid_type.result +++ b/mysql-test/r/func_hybrid_type.result @@ -3570,6 +3570,153 @@ t1 CREATE TABLE `t1` ( `c2` bigint(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; +CREATE TABLE t1 AS SELECT +9 AS i1, COALESCE(9) AS c1, +99 AS i2, COALESCE(99) AS c2, +999 AS i3, COALESCE(999) AS c3, +9999 AS i4, COALESCE(9999) AS c4, +99999 AS i5, COALESCE(99999) AS c5, +999999 AS i6, COALESCE(999999) AS c6, +9999999 AS i7, COALESCE(9999999) AS c7, +99999999 AS i8, COALESCE(99999999) AS c8, +999999999 AS i9, COALESCE(999999999) AS c9, +2147483647, COALESCE(2147483647), +2147483648, COALESCE(2147483648), +9999999999 AS i10, COALESCE(9999999999) AS c10, +99999999999 AS i11, COALESCE(99999999999) AS c11, +999999999999 AS i12, COALESCE(999999999999) AS c12, +9999999999999 AS i13, COALESCE(9999999999999) AS c13, +99999999999999 AS i14, COALESCE(99999999999999) AS c14, +999999999999999 AS i15, COALESCE(999999999999999) AS c15, +9999999999999999 AS i16, COALESCE(9999999999999999) AS c16, +99999999999999999 AS i17, COALESCE(99999999999999999) AS c17, +999999999999999999 AS i18, COALESCE(999999999999999999) AS c18, +9223372036854775807, COALESCE(9223372036854775807), +9223372036854775808, COALESCE(9223372036854775808), +9999999999999999999 AS i19, COALESCE(9999999999999999999) AS c19, +18446744073709551615, COALESCE(18446744073709551615), +18446744073709551616, COALESCE(18446744073709551616), +99999999999999999999 AS i20, COALESCE(99999999999999999999) AS c20, +999999999999999999999 AS i21, COALESCE(999999999999999999999) AS c21, +9999999999999999999999 AS i22, COALESCE(9999999999999999999999) AS c22; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i1` int(1) NOT NULL, + `c1` int(1) NOT NULL, + `i2` int(2) NOT NULL, + `c2` int(2) NOT NULL, + `i3` int(3) NOT NULL, + `c3` int(3) NOT NULL, + `i4` int(4) NOT NULL, + `c4` int(4) NOT NULL, + `i5` int(5) NOT NULL, + `c5` int(5) NOT NULL, + `i6` int(6) NOT NULL, + `c6` int(6) NOT NULL, + `i7` int(7) NOT NULL, + `c7` int(7) NOT NULL, + `i8` int(8) NOT NULL, + `c8` int(8) NOT NULL, + `i9` int(9) NOT NULL, + `c9` int(9) NOT NULL, + `2147483647` bigint(10) NOT NULL, + `COALESCE(2147483647)` bigint(10) NOT NULL, + `2147483648` bigint(10) NOT NULL, + `COALESCE(2147483648)` bigint(10) NOT NULL, + `i10` bigint(10) NOT NULL, + `c10` bigint(10) NOT NULL, + `i11` bigint(11) NOT NULL, + `c11` bigint(11) NOT NULL, + `i12` bigint(12) NOT NULL, + `c12` bigint(12) NOT NULL, + `i13` bigint(13) NOT NULL, + `c13` bigint(13) NOT NULL, + `i14` bigint(14) NOT NULL, + `c14` bigint(14) NOT NULL, + `i15` bigint(15) NOT NULL, + `c15` bigint(15) NOT NULL, + `i16` bigint(16) NOT NULL, + `c16` bigint(16) NOT NULL, + `i17` bigint(17) NOT NULL, + `c17` bigint(17) NOT NULL, + `i18` bigint(18) NOT NULL, + `c18` bigint(18) NOT NULL, + `9223372036854775807` bigint(19) NOT NULL, + `COALESCE(9223372036854775807)` bigint(19) NOT NULL, + `9223372036854775808` bigint(19) unsigned NOT NULL, + `COALESCE(9223372036854775808)` bigint(19) unsigned NOT NULL, + `i19` bigint(19) unsigned NOT NULL, + `c19` bigint(19) unsigned NOT NULL, + `18446744073709551615` bigint(20) unsigned NOT NULL, + `COALESCE(18446744073709551615)` bigint(20) unsigned NOT NULL, + `18446744073709551616` decimal(20,0) NOT NULL, + `COALESCE(18446744073709551616)` decimal(20,0) NOT NULL, + `i20` decimal(20,0) NOT NULL, + `c20` decimal(20,0) NOT NULL, + `i21` decimal(21,0) NOT NULL, + `c21` decimal(21,0) NOT NULL, + `i22` decimal(22,0) NOT NULL, + `c22` decimal(22,0) NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SELECT * FROM t1; +i1 9 +c1 9 +i2 99 +c2 99 +i3 999 +c3 999 +i4 9999 +c4 9999 +i5 99999 +c5 99999 +i6 999999 +c6 999999 +i7 9999999 +c7 9999999 +i8 99999999 +c8 99999999 +i9 999999999 +c9 999999999 +2147483647 2147483647 +COALESCE(2147483647) 2147483647 +2147483648 2147483648 +COALESCE(2147483648) 2147483648 +i10 9999999999 +c10 9999999999 +i11 99999999999 +c11 99999999999 +i12 999999999999 +c12 999999999999 +i13 9999999999999 +c13 9999999999999 +i14 99999999999999 +c14 99999999999999 +i15 999999999999999 +c15 999999999999999 +i16 9999999999999999 +c16 9999999999999999 +i17 99999999999999999 +c17 99999999999999999 +i18 999999999999999999 +c18 999999999999999999 +9223372036854775807 9223372036854775807 +COALESCE(9223372036854775807) 9223372036854775807 +9223372036854775808 9223372036854775808 +COALESCE(9223372036854775808) 9223372036854775808 +i19 9999999999999999999 +c19 9999999999999999999 +18446744073709551615 18446744073709551615 +COALESCE(18446744073709551615) 18446744073709551615 +18446744073709551616 18446744073709551616 +COALESCE(18446744073709551616) 18446744073709551616 +i20 99999999999999999999 +c20 99999999999999999999 +i21 999999999999999999999 +c21 999999999999999999999 +i22 9999999999999999999999 +c22 9999999999999999999999 +DROP TABLE t1; # # MDEV-9406 CREATE TABLE..SELECT creates different columns for IFNULL() and equivalent COALESCE,CASE,IF # diff --git a/mysql-test/r/func_regexp_pcre.result b/mysql-test/r/func_regexp_pcre.result index 18aa7ed8379..266ea6c5eef 100644 --- a/mysql-test/r/func_regexp_pcre.result +++ b/mysql-test/r/func_regexp_pcre.result @@ -880,3 +880,17 @@ SET @regCheck= '\\xE0\\x01'; SELECT CAST(0xE001 AS BINARY) REGEXP @regCheck; CAST(0xE001 AS BINARY) REGEXP @regCheck 1 +# MDEV-12420: Testing recursion overflow +SELECT 1 FROM dual WHERE ('Alpha,Bravo,Charlie,Delta,Echo,Foxtrot,StrataCentral,Golf,Hotel,India,Juliet,Kilo,Lima,Mike,StrataL3,November,Oscar,StrataL2,Sand,P3,P4SwitchTest,Arsys,Poppa,ExtensionMgr,Arp,Quebec,Romeo,StrataApiV2,PtReyes,Sierra,SandAcl,Arrow,Artools,BridgeTest,Tango,SandT,PAlaska,Namespace,Agent,Qos,PatchPanel,ProjectReport,Ark,Gimp,Agent,SliceAgent,Arnet,Bgp,Ale,Tommy,Central,AsicPktTestLib,Hsc,SandL3,Abuild,Pca9555,Standby,ControllerDut,CalSys,SandLib,Sb820,PointV2,BfnLib,Evpn,BfnSdk,Sflow,ManagementActive,AutoTest,GatedTest,Bgp,Sand,xinetd,BfnAgentLib,bf-utils,Hello,BfnState,Eos,Artest,Qos,Scd,ThermoMgr,Uniform,EosUtils,Eb,FanController,Central,BfnL3,BfnL2,tcp_wrappers,Victor,Environment,Route,Failover,Whiskey,Xray,Gimp,BfnFixed,Strata,SoCal,XApi,Msrp,XpProfile,tcpdump,PatchPanel,ArosTest,FhTest,Arbus,XpAcl,MacConc,XpApi,telnet,QosTest,Alpha2,BfnVlan,Stp,VxlanControllerTest,MplsAgent,Bravo2,Lanz,BfnMbb,Intf,XCtrl,Unicast,SandTunnel,L3Unicast,Ipsec,MplsTest,Rsvp,EthIntf,StageMgr,Sol,MplsUtils,Nat,Ira,P4NamespaceDut,Counters,Charlie2,Aqlc,Mlag,Power,OpenFlow,Lag,RestApi,BfdTest,strongs,Sfa,CEosUtils,Adt746,MaintenanceMode,MlagDut,EosImage,IpEth,MultiProtocol,Launcher,Max3179,Snmp,Acl,IpEthTest,PhyEee,bf-syslibs,tacc,XpL2,p4-ar-switch,p4-bf-switch,LdpTest,BfnPhy,Mirroring,Phy6,Ptp' REGEXP '^((?!\b(Strata|StrataApi|StrataApiV2)\b).)*$'); +1 +Warnings: +Warning 1139 Got error 'pcre_exec: recursion limit of NUM exceeded' from regexp +SELECT REGEXP_INSTR('a_kollision', 'oll'); +REGEXP_INSTR('a_kollision', 'oll') +4 +SELECT REGEXP_INSTR('a_kollision', '(oll)'); +REGEXP_INSTR('a_kollision', '(oll)') +4 +SELECT REGEXP_INSTR('a_kollision', 'o([lm])\\1'); +REGEXP_INSTR('a_kollision', 'o([lm])\\1') +4 diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result index 3e3c84d4300..9e16cf370d8 100644 --- a/mysql-test/r/grant.result +++ b/mysql-test/r/grant.result @@ -1090,7 +1090,7 @@ t_nn CREATE TABLE `t_nn` ( `c1` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 SHOW CREATE VIEW mysqltest2.t_nn; -ERROR HY000: 'mysqltest2.t_nn' is not VIEW +ERROR HY000: 'mysqltest2.t_nn' is not of type 'VIEW' SHOW CREATE VIEW mysqltest2.v_yy; View Create View character_set_client collation_connection v_yy CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `mysqltest2`.`v_yy` AS select `mysqltest2`.`t_nn`.`c1` AS `c1` from `mysqltest2`.`t_nn` where `mysqltest2`.`t_nn`.`c1` = 55 latin1 latin1_swedish_ci @@ -1110,7 +1110,7 @@ t_nn CREATE TABLE `t_nn` ( `c1` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 SHOW CREATE VIEW mysqltest2.t_nn; -ERROR HY000: 'mysqltest2.t_nn' is not VIEW +ERROR HY000: 'mysqltest2.t_nn' is not of type 'VIEW' DROP VIEW mysqltest2.v_nn; DROP VIEW mysqltest2.v_yn; DROP VIEW mysqltest2.v_ny; diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result index 1305be86e5a..c55e8d138f8 100644 --- a/mysql-test/r/innodb_ext_key.result +++ b/mysql-test/r/innodb_ext_key.result @@ -1133,5 +1133,78 @@ where index_date_updated= 10 and index_id < 800; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 range index_date_updated index_date_updated 13 NULL # Using index condition drop table t0,t1,t2; -set optimizer_switch=@save_ext_key_optimizer_switch; -SET SESSION STORAGE_ENGINE=DEFAULT; +# +# MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff' +# was corrupted, server crashes in opt_sum_query +CREATE TABLE t1 ( +pk INT, +f1 VARCHAR(3), +f2 VARCHAR(1024), +PRIMARY KEY (pk), +KEY(f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; +INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def'); +SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4; +MAX(t2.pk) +2 +drop table t1; +CREATE TABLE t1 ( +pk1 INT, +pk2 INT, +f1 VARCHAR(3), +f2 VARCHAR(1021), +PRIMARY KEY (pk1,pk2), +KEY(f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; +INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def'); +explain format= json +select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["f2"], + "key": "f2", + "key_length": "3070", + "used_key_parts": ["f2", "pk1"], + "rows": 1, + "filtered": 100, + "index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'", + "attached_condition": "t1.f1 <= '3'" + } + } +} +drop table t1; +CREATE TABLE t1 ( +f2 INT, +pk2 INT, +f1 VARCHAR(3), +pk1 VARCHAR(1000), +PRIMARY KEY (pk1,pk2), +KEY k1(pk1,f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; +INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def'); +explain format= json +select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["k1"], + "key": "k1", + "key_length": "3011", + "used_key_parts": ["pk1", "f2", "pk2"], + "rows": 1, + "filtered": 100, + "index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'", + "attached_condition": "t1.f1 <= '3'" + } + } +} +drop table t1; diff --git a/mysql-test/r/insert_notembedded.result b/mysql-test/r/insert_notembedded.result index d2733eac061..8dd4aa7d71e 100644 --- a/mysql-test/r/insert_notembedded.result +++ b/mysql-test/r/insert_notembedded.result @@ -4,7 +4,7 @@ drop table if exists t1; create table t1 (n int); create view v1 as select * from t1; insert delayed into v1 values (1); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop table t1; drop view v1; CREATE DATABASE meow; diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index d7b4e107a8d..46e542910a1 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -2337,4 +2337,99 @@ id select_type table type possible_keys key key_len ref rows filtered Extra Warnings: Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3` drop table t1,t2,t3; +# +# MDEV-11958: LEFT JOIN with stored routine produces incorrect result +# +CREATE TABLE t (x INT); +INSERT INTO t VALUES(1),(NULL); +CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret); +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE IFNULL(t2.x,0)=0; +x x IFNULL(t2.x,0) f(t2.x,0) +NULL NULL 0 0 +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE IFNULL(t2.x,0)=0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where ifnull(`test`.`t2`.`x`,0) = 0 +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE f(t2.x,0)=0; +x x IFNULL(t2.x,0) f(t2.x,0) +NULL NULL 0 0 +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE f(t2.x,0)=0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where `f`(`test`.`t2`.`x`,0) = 0 +drop function f; +drop table t; +CREATE TABLE t1 ( +col1 DECIMAL(33,5) NULL DEFAULT NULL, +col2 DECIMAL(33,5) NULL DEFAULT NULL +); +CREATE TABLE t2 ( +col1 DECIMAL(33,5) NULL DEFAULT NULL, +col2 DECIMAL(33,5) NULL DEFAULT NULL, +col3 DECIMAL(33,5) NULL DEFAULT NULL +); +INSERT INTO t1 VALUES (2, 1.1), (2, 2.1); +INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL); +CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15)) +RETURNS decimal(33,5) +LANGUAGE SQL +DETERMINISTIC +CONTAINS SQL +SQL SECURITY INVOKER +BEGIN +IF p_num IS NULL THEN +RETURN p_return; +ELSE +RETURN p_num; +END IF; +END | +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE IFNULL(t2.col3,0) = 0; +col1 col1 col3 +2.00000 NULL NULL +2.00000 NULL NULL +EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE IFNULL(t2.col3,0) = 0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where ifnull(`test`.`t2`.`col3`,0) = 0 +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE f1(t2.col3,0) = 0; +col1 col1 col3 +2.00000 NULL NULL +2.00000 NULL NULL +EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE f1(t2.col3,0) = 0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where `f1`(`test`.`t2`.`col3`,0) = 0 +DROP FUNCTION f1; +DROP TABLE t1,t2; +# end of 5.5 tests SET optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result index cab4c78eb3a..65e2dde409e 100644 --- a/mysql-test/r/join_outer_jcl6.result +++ b/mysql-test/r/join_outer_jcl6.result @@ -2348,6 +2348,101 @@ id select_type table type possible_keys key key_len ref rows filtered Extra Warnings: Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t1`.`v1` AS `v1`,`test`.`t2`.`i2` AS `i2`,`test`.`t2`.`v2` AS `v2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`v3` AS `v3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`v3` = 4 and `test`.`t1`.`i1` = `test`.`t3`.`i3` and `test`.`t2`.`i2` = `test`.`t3`.`i3` drop table t1,t2,t3; +# +# MDEV-11958: LEFT JOIN with stored routine produces incorrect result +# +CREATE TABLE t (x INT); +INSERT INTO t VALUES(1),(NULL); +CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret); +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE IFNULL(t2.x,0)=0; +x x IFNULL(t2.x,0) f(t2.x,0) +NULL NULL 0 0 +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE IFNULL(t2.x,0)=0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where ifnull(`test`.`t2`.`x`,0) = 0 +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE f(t2.x,0)=0; +x x IFNULL(t2.x,0) f(t2.x,0) +NULL NULL 0 0 +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) +FROM t t1 LEFT JOIN t t2 +ON t1.x = t2.x +WHERE f(t2.x,0)=0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x`,`test`.`t2`.`x` AS `x`,ifnull(`test`.`t2`.`x`,0) AS `IFNULL(t2.x,0)`,`f`(`test`.`t2`.`x`,0) AS `f(t2.x,0)` from `test`.`t` `t1` left join `test`.`t` `t2` on(`test`.`t2`.`x` = `test`.`t1`.`x`) where `f`(`test`.`t2`.`x`,0) = 0 +drop function f; +drop table t; +CREATE TABLE t1 ( +col1 DECIMAL(33,5) NULL DEFAULT NULL, +col2 DECIMAL(33,5) NULL DEFAULT NULL +); +CREATE TABLE t2 ( +col1 DECIMAL(33,5) NULL DEFAULT NULL, +col2 DECIMAL(33,5) NULL DEFAULT NULL, +col3 DECIMAL(33,5) NULL DEFAULT NULL +); +INSERT INTO t1 VALUES (2, 1.1), (2, 2.1); +INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL); +CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15)) +RETURNS decimal(33,5) +LANGUAGE SQL +DETERMINISTIC +CONTAINS SQL +SQL SECURITY INVOKER +BEGIN +IF p_num IS NULL THEN +RETURN p_return; +ELSE +RETURN p_num; +END IF; +END | +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE IFNULL(t2.col3,0) = 0; +col1 col1 col3 +2.00000 NULL NULL +2.00000 NULL NULL +EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE IFNULL(t2.col3,0) = 0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where ifnull(`test`.`t2`.`col3`,0) = 0 +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE f1(t2.col3,0) = 0; +col1 col1 col3 +2.00000 NULL NULL +2.00000 NULL NULL +EXPLAIN EXTENDED SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE f1(t2.col3,0) = 0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1`,`test`.`t2`.`col1` AS `col1`,`test`.`t2`.`col3` AS `col3` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`col2` = `test`.`t1`.`col1`) where `f1`(`test`.`t2`.`col3`,0) = 0 +DROP FUNCTION f1; +DROP TABLE t1,t2; +# end of 5.5 tests SET optimizer_switch=@save_optimizer_switch; set join_cache_level=default; show variables like 'join_cache_level'; diff --git a/mysql-test/r/limit_rows_examined.result b/mysql-test/r/limit_rows_examined.result index 3bc97859303..c94599235b1 100644 --- a/mysql-test/r/limit_rows_examined.result +++ b/mysql-test/r/limit_rows_examined.result @@ -425,7 +425,7 @@ c1 bb cc Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 18 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete +Warning 1931 Query execution was interrupted. The query examined at least 17 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete select * from v1 LIMIT ROWS EXAMINED 11; c1 bb @@ -438,7 +438,8 @@ from (select * from t1 where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)) as tmp LIMIT ROWS EXAMINED 11; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where +1 PRIMARY t1 ALL NULL NULL NULL NULL 4 +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 2 func 1 3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where select * from (select * from t1 diff --git a/mysql-test/r/log_tables-big.result b/mysql-test/r/log_tables-big.result index 1e189a7726f..0a33510fe77 100644 --- a/mysql-test/r/log_tables-big.result +++ b/mysql-test/r/log_tables-big.result @@ -1,8 +1,12 @@ set @@global.log_output = 'TABLE'; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; set session long_query_time=10; select get_lock('bug27638', 1); get_lock('bug27638', 1) 1 +connection con2; set session long_query_time=1; select get_lock('bug27638', 2); get_lock('bug27638', 2) @@ -25,7 +29,11 @@ select if (query_time >= '00:01:40', 'OK', 'WRONG') as qt, sql_text from mysql.s where sql_text = 'select get_lock(\'bug27638\', 101)'; qt sql_text OK select get_lock('bug27638', 101) +connection con1; select release_lock('bug27638'); release_lock('bug27638') 1 +connection default; +disconnect con1; +disconnect con2; set @@global.log_output=default; diff --git a/mysql-test/r/mdl_sync.result b/mysql-test/r/mdl_sync.result index 1e285650c77..3880fc5ef91 100644 --- a/mysql-test/r/mdl_sync.result +++ b/mysql-test/r/mdl_sync.result @@ -3049,3 +3049,21 @@ SET DEBUG_SYNC= 'RESET'; disconnect con1; disconnect con2; disconnect con3; +# +# MDEV-12620 - set lock_wait_timeout = 1;flush tables with read lock; +# lock not released after timeout +# +CREATE TABLE t1(a INT) ENGINE=InnoDB; +SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; +SELECT * FROM t1; +connect con1,localhost,root,,; +SET debug_sync='now WAIT_FOR ready'; +SET lock_wait_timeout=1; +FLUSH TABLES WITH READ LOCK; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SET debug_sync='now SIGNAL go'; +connection default; +a +SET debug_sync='RESET'; +DROP TABLE t1; +disconnect con1; diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index da194c7e11c..f468f47c4c9 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -629,7 +629,7 @@ INSERT INTO t3 VALUES (3), (33); LOCK TABLES t3 READ; CREATE TEMPORARY TABLE t4 (c1 INT NOT NULL) ENGINE=MERGE UNION=(t1,t2) INSERT_METHOD=LAST SELECT * FROM t3; -ERROR HY000: 'test.t4' is not BASE TABLE +ERROR HY000: 'test.t4' is not of type 'BASE TABLE' SELECT * FROM t4; ERROR HY000: Table 't4' was not locked with LOCK TABLES UNLOCK TABLES; @@ -702,11 +702,11 @@ create table t2 (a int); insert into t1 values (0); insert into t2 values (1); create table t3 engine=merge union=(t1, t2) select * from t1; -ERROR HY000: 'test.t3' is not BASE TABLE +ERROR HY000: 'test.t3' is not of type 'BASE TABLE' create table t3 engine=merge union=(t1, t2) select * from t2; -ERROR HY000: 'test.t3' is not BASE TABLE +ERROR HY000: 'test.t3' is not of type 'BASE TABLE' create table t3 engine=merge union=(t1, t2) select (select max(a) from t2); -ERROR HY000: 'test.t3' is not BASE TABLE +ERROR HY000: 'test.t3' is not of type 'BASE TABLE' drop table t1, t2; create table t1 ( a double(14,4), @@ -1176,7 +1176,7 @@ SHOW CREATE TABLE t3; ERROR 42S02: Table 'test.t3' doesn't exist CREATE TABLE t3 ENGINE=MRG_MYISAM UNION=(t1) INSERT_METHOD=LAST SELECT * FROM t2; -ERROR HY000: 'test.t3' is not BASE TABLE +ERROR HY000: 'test.t3' is not of type 'BASE TABLE' SHOW CREATE TABLE t3; ERROR 42S02: Table 'test.t3' doesn't exist DROP TABLE t1, t2; @@ -3115,11 +3115,11 @@ DROP TABLE m2; # CREATE TABLE m2 (c1 INT, c2 INT) ENGINE=MRG_MyISAM UNION=(t3,t4) INSERT_METHOD=LAST SELECT * FROM m1; -ERROR HY000: 'test.m2' is not BASE TABLE +ERROR HY000: 'test.m2' is not of type 'BASE TABLE' # CREATE TEMPORARY TABLE m2 (c1 INT, c2 INT) ENGINE=MRG_MyISAM UNION=(t3,t4) INSERT_METHOD=LAST SELECT * FROM m1; -ERROR HY000: 'test.m2' is not BASE TABLE +ERROR HY000: 'test.m2' is not of type 'BASE TABLE' # CREATE TABLE m2 LIKE m1; SHOW CREATE TABLE m2; @@ -3519,7 +3519,7 @@ Got one of the listed errors # CREATE TEMPORARY TABLE m2 (c1 INT, c2 INT) ENGINE=MRG_MyISAM UNION=(t3,t4) INSERT_METHOD=LAST SELECT * FROM m1; -ERROR HY000: 'test.m2' is not BASE TABLE +ERROR HY000: 'test.m2' is not of type 'BASE TABLE' # CREATE TEMPORARY TABLE m2 LIKE m1; SHOW CREATE TABLE m2; diff --git a/mysql-test/r/mix2_myisam.result b/mysql-test/r/mix2_myisam.result index b282be15a56..34764466d2a 100644 --- a/mysql-test/r/mix2_myisam.result +++ b/mysql-test/r/mix2_myisam.result @@ -1549,6 +1549,9 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref v v 13 const # Using where; Using index alter table t1 add unique(v); ERROR 23000: Duplicate entry '{ ' for key 'v_2' +show warnings; +Level Code Message +Error 1062 Duplicate entry 'a' for key 'v_2' alter table t1 add key(v); Warnings: Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release diff --git a/mysql-test/r/mrr_icp_extra.result b/mysql-test/r/mrr_icp_extra.result index 5706bf21895..95788b393dd 100644 --- a/mysql-test/r/mrr_icp_extra.result +++ b/mysql-test/r/mrr_icp_extra.result @@ -350,6 +350,9 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref v v 13 const # Using where; Using index alter table t1 add unique(v); ERROR 23000: Duplicate entry '{ ' for key 'v_2' +show warnings; +Level Code Message +Error 1062 Duplicate entry 'a' for key 'v_2' alter table t1 add key(v); Warnings: Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index f7eb5db6468..5b41b35c1ba 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1255,6 +1255,9 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref v v 13 const # Using where; Using index alter table t1 add unique(v); ERROR 23000: Duplicate entry '{ ' for key 'v_2' +show warnings; +Level Code Message +Error 1062 Duplicate entry 'a' for key 'v_2' alter table t1 add key(v); Warnings: Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release diff --git a/mysql-test/r/myisam_debug.result b/mysql-test/r/myisam_debug.result index 39cbd69cdb0..6232e3eac0e 100644 --- a/mysql-test/r/myisam_debug.result +++ b/mysql-test/r/myisam_debug.result @@ -29,3 +29,15 @@ Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1,t2; disconnect insertConn; +call mtr.add_suppression("Index for table '.*test.t1\\.MYI' is corrupt; try to repair it"); +create table t1 (a int, index(a)); +lock tables t1 write; +insert t1 values (1),(2),(1); +set @old_dbug=@@debug_dbug; +set debug_dbug='+d,mi_lock_database_failure'; +unlock tables; +Warnings: +Error 126 Index for table './test/t1.MYI' is corrupt; try to repair it +Error 1030 Got error 22 "Invalid argument" from storage engine MyISAM +set debug_dbug=@old_dbug; +drop table t1; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 564269319cb..1fd365e3029 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -1404,7 +1404,7 @@ performance-schema-max-rwlock-instances -1 performance-schema-max-socket-classes 10 performance-schema-max-socket-instances -1 performance-schema-max-stage-classes 150 -performance-schema-max-statement-classes 190 +performance-schema-max-statement-classes 191 performance-schema-max-table-handles -1 performance-schema-max-table-instances -1 performance-schema-max-thread-classes 50 diff --git a/mysql-test/r/partition_alter.result b/mysql-test/r/partition_alter.result index cbd90b5ba7c..76b55cefb07 100644 --- a/mysql-test/r/partition_alter.result +++ b/mysql-test/r/partition_alter.result @@ -51,3 +51,50 @@ execute stmt; execute stmt; deallocate prepare stmt; drop table test_data; +create table t1(id int, d date not null, b bool not null default 0, primary key(id,d)) +engine=innodb +partition by range columns (d) ( +partition p1 values less than ('2016-10-18'), +partition p2 values less than ('2020-10-19')); +insert t1 values (0, '2000-01-02', 0); +insert t1 values (1, '2020-01-02', 10); +alter table t1 add check (b in (0, 1)); +ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`#sql-temporary` +alter table t1 add check (b in (0, 10)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `d` date NOT NULL, + `b` tinyint(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`id`,`d`), + CONSTRAINT `CONSTRAINT_1` CHECK (`b` in (0,10)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 + PARTITION BY RANGE COLUMNS(d) +(PARTITION p1 VALUES LESS THAN ('2016-10-18') ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN ('2020-10-19') ENGINE = InnoDB) +insert t1 values (2, '2020-01-03', 20); +ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1` +drop table t1; +create table t1(id int, d date not null, b bool not null default 0, primary key(id,d)) +partition by range columns (d) ( +partition p1 values less than ('2016-10-18'), +partition p2 values less than ('2020-10-19')); +insert t1 values (0, '2000-01-02', 0); +insert t1 values (1, '2020-01-02', 10); +alter table t1 add check (b in (0, 1)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `d` date NOT NULL, + `b` tinyint(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`id`,`d`), + CONSTRAINT `CONSTRAINT_1` CHECK (`b` in (0,1)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 + PARTITION BY RANGE COLUMNS(d) +(PARTITION p1 VALUES LESS THAN ('2016-10-18') ENGINE = MyISAM, + PARTITION p2 VALUES LESS THAN ('2020-10-19') ENGINE = MyISAM) +insert t1 values (2, '2020-01-03', 20); +ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1` +drop table t1; diff --git a/mysql-test/r/rename.result b/mysql-test/r/rename.result index 2b8c925b309..ff8566abe02 100644 --- a/mysql-test/r/rename.result +++ b/mysql-test/r/rename.result @@ -74,7 +74,7 @@ End of 4.1 tests create table t1(f1 int); create view v1 as select * from t1; alter table v1 rename to v2; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; drop table t1; End of 5.0 tests diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 641c756691e..71ca5de2990 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -4329,57 +4329,57 @@ Table Op Msg_type Msg_text test.t1 repair status OK test.t2 repair status OK test.t3 repair status OK -test.v1 repair Error 'test.v1' is not BASE TABLE +test.v1 repair Error 'test.v1' is not of type 'BASE TABLE' test.v1 repair status Operation failed Table Op Msg_type Msg_text test.t1 optimize status OK test.t2 optimize status OK test.t3 optimize status OK -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed Table Op Msg_type Msg_text test.t1 analyze status Table is already up to date test.t2 analyze status Table is already up to date test.t3 analyze status Table is already up to date -test.v1 analyze Error 'test.v1' is not BASE TABLE +test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE' test.v1 analyze status Operation failed call bug13012()| Table Op Msg_type Msg_text test.t1 repair status OK test.t2 repair status OK test.t3 repair status OK -test.v1 repair Error 'test.v1' is not BASE TABLE +test.v1 repair Error 'test.v1' is not of type 'BASE TABLE' test.v1 repair status Operation failed Table Op Msg_type Msg_text test.t1 optimize status OK test.t2 optimize status OK test.t3 optimize status OK -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed Table Op Msg_type Msg_text test.t1 analyze status Table is already up to date test.t2 analyze status Table is already up to date test.t3 analyze status Table is already up to date -test.v1 analyze Error 'test.v1' is not BASE TABLE +test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE' test.v1 analyze status Operation failed call bug13012()| Table Op Msg_type Msg_text test.t1 repair status OK test.t2 repair status OK test.t3 repair status OK -test.v1 repair Error 'test.v1' is not BASE TABLE +test.v1 repair Error 'test.v1' is not of type 'BASE TABLE' test.v1 repair status Operation failed Table Op Msg_type Msg_text test.t1 optimize status OK test.t2 optimize status OK test.t3 optimize status OK -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed Table Op Msg_type Msg_text test.t1 analyze status Table is already up to date test.t2 analyze status Table is already up to date test.t3 analyze status Table is already up to date -test.v1 analyze Error 'test.v1' is not BASE TABLE +test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE' test.v1 analyze status Operation failed drop procedure bug13012| drop view v1| diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result index 01257c33361..ec7f2c0a3d5 100644 --- a/mysql-test/r/subselect_innodb.result +++ b/mysql-test/r/subselect_innodb.result @@ -576,3 +576,42 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL # 2 DEPENDENT SUBQUERY t2 ref key1 key1 5 test.t1.a # Using where; Using filesort drop table t1,t2; +# +# mdev-12931: semi-join in ON expression of STRAIGHT_JOIN +# joining a base table and a mergeable derived table +# +CREATE TABLE t1 (f1 int) ENGINE=InnoDB; +INSERT INTO t1 VALUES (3),(2); +CREATE TABLE t2 (f2 int) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1),(4); +CREATE TABLE t3 (f3 int) ENGINE=InnoDB; +INSERT INTO t3 VALUES (5),(6); +CREATE TABLE t4 (f4 int) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1),(8); +SELECT * +FROM t1 +INNER JOIN +( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq +ON ( 1 IN ( SELECT f4 FROM t4 ) ) ) +ON ( f1 >= f2 ); +f1 f2 f3 +3 1 5 +2 1 5 +3 1 6 +2 1 6 +EXPLAIN EXTENDED +SELECT * +FROM t1 +INNER JOIN +( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq +ON ( 1 IN ( SELECT f4 FROM t4 ) ) ) +ON ( f1 >= f2 ); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00 +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join) +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join) +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (incremental, BNL join) +3 MATERIALIZED t4 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t2`.`f2` AS `f2`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` join `test`.`t2` semi join (`test`.`t4`) join `test`.`t3` where `test`.`t4`.`f4` = 1 and `test`.`t1`.`f1` >= `test`.`t2`.`f2` +DROP TABLE t1,t2,t3,t4; diff --git a/mysql-test/r/subselect_mat_cost_bugs.result b/mysql-test/r/subselect_mat_cost_bugs.result index b4ddd5e5849..f5d5be8f726 100644 --- a/mysql-test/r/subselect_mat_cost_bugs.result +++ b/mysql-test/r/subselect_mat_cost_bugs.result @@ -379,6 +379,7 @@ drop table t3, t4, t5; # # LP BUG#858038 The result of a query with NOT IN subquery depends on the state of the optimizer switch # +set @optimizer_switch_save= @@optimizer_switch; create table t1 (c1 char(2) not null, c2 char(2)); create table t2 (c3 char(2), c4 char(2)); insert into t1 values ('a1', 'b1'); @@ -400,6 +401,7 @@ id select_type table type possible_keys key key_len ref rows Extra select * from t1 where c1 = 'a2' and (c1, c2) not in (select * from t2); c1 c2 drop table t1, t2; +set optimizer_switch= @optimizer_switch_save; # # MDEV-12673: cost-based choice between materialization and in-to-exists # @@ -442,3 +444,44 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t3 const PRIMARY PRIMARY 4 const 1 2 DEPENDENT SUBQUERY t2 index NULL i2 11 NULL 2 Using where; Using index DROP TABLE t1,t2,t3; +# +# MDEV-7599: in-to-exists chosen after min/max optimization +# +set @optimizer_switch_save= @@optimizer_switch; +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (1,6),(2,4), (8,9); +SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b); +b c +EXPLAIN EXTENDED SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +2 MATERIALIZED t1 index NULL a 5 NULL 2 100.00 Using index +2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 /* select#1 */ select `test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`b`>(<in_optimizer>(`test`.`t2`.`b`,`test`.`t2`.`b` in ( <materialize> (/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c` = `test`.`t2`.`b` ), <primary_index_lookup>(`test`.`t2`.`b` in <temporary table> on distinct_key where `test`.`t2`.`b` = `<subquery2>`.`MIN(a)`)))) +set optimizer_switch= 'materialization=off'; +SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b); +b c +EXPLAIN EXTENDED SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +2 DEPENDENT SUBQUERY t1 index NULL a 5 NULL 2 100.00 Using index +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join) +Warnings: +Note 1003 /* select#1 */ select `test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`b`>(<in_optimizer>(`test`.`t2`.`b`,<exists>(/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c` = `test`.`t2`.`b` having trigcond(<cache>(`test`.`t2`.`b`) = <ref_null_helper>(min(`test`.`t1`.`a`)))))) +set optimizer_switch= @optimizer_switch_save; +DROP TABLE t1,t2; +CREATE TABLE t1 (f1 varchar(10)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); +CREATE TABLE t2 (f2 varchar(10), key(f2)) ENGINE=MyISAM; +INSERT INTO t2 VALUES ('baz'),('qux'); +CREATE TABLE t3 (f3 varchar(10)) ENGINE=MyISAM; +INSERT INTO t3 VALUES ('abc'),('def'); +SELECT * FROM t1 +WHERE f1 = ALL( SELECT MAX(t2a.f2) +FROM t2 AS t2a INNER JOIN t2 t2b INNER JOIN t3 +ON (f3 = t2b.f2) ); +f1 +DROP TABLE t1,t2,t3; diff --git a/mysql-test/r/subselect_sj.result b/mysql-test/r/subselect_sj.result index 861360ddf9a..a601dac5337 100644 --- a/mysql-test/r/subselect_sj.result +++ b/mysql-test/r/subselect_sj.result @@ -1652,9 +1652,9 @@ CREATE VIEW v1 AS SELECT 1; EXPLAIN SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1); id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived3> system NULL NULL NULL NULL 1 1 PRIMARY t1 ALL NULL NULL NULL NULL 2 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) -2 MATERIALIZED <derived3> system NULL NULL NULL NULL 1 3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1); a a @@ -3072,4 +3072,97 @@ project_number aaa drop table t1, t2, t3; set optimizer_switch= @tmp_mdev6859; +# +# MDEV-12675: subquery subject to semi-join optimizations +# in ON expression of INNER JOIN +# +set @tmp_mdev12675=@@optimizer_switch; +set optimizer_switch=default; +create table t1 (a int) engine=myisam; +insert into t1 values (5),(3),(2),(7),(2),(5),(1); +create table t2 (b int, index idx(b)) engine=myisam; +insert into t2 values (2),(3),(2),(1),(3),(4); +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +explain +select a from t1, t2 where b between 1 and 2 and a in (select b from t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1) +1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) +explain +select a from t1 join t2 on b between 1 and 2 and a in (select b from t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1) +1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) +drop table t1,t2; +set optimizer_switch= @tmp_mdev12675; +# +# MDEV-12817: subquery NOT subject to semi-join optimizations +# in ON expression of INNER JOIN +# +CREATE TABLE t1 (c1 int) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (c2 int) ENGINE=MyISAM; +INSERT INTO t2 VALUES (3),(4); +CREATE TABLE t3 (c3 int) ENGINE=MyISAM; +INSERT INTO t3 VALUES (5),(6); +CREATE TABLE t4 (c4 int) ENGINE=MyISAM; +INSERT INTO t4 VALUES (7),(8); +SELECT c1 +FROM t1 +LEFT JOIN +( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) ) +ON (c1 = c3); +c1 +1 +2 +EXPLAIN EXTENDED SELECT c1 +FROM t1 +LEFT JOIN +( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) ) +ON (c1 = c3); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where +2 SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t3`.`c3` = `test`.`t1`.`c1` and <cache>(<in_optimizer>(1,<exists>(/* select#2 */ select `test`.`t4`.`c4` from `test`.`t4` where 1 = `test`.`t4`.`c4`)))) where 1 +# mdev-12820 +SELECT * +FROM t1 +LEFT JOIN +( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 ) +ON (c1 = c2); +c1 c2 c4 +1 NULL NULL +2 NULL NULL +EXPLAIN EXTENDED SELECT * +FROM t1 +LEFT JOIN +( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 ) +ON (c1 = c2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where +1 PRIMARY t4 ALL NULL NULL NULL NULL 2 100.00 +3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where <cache>(`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1 +DROP TABLE t1,t2,t3,t4; set optimizer_switch=@subselect_sj_tmp; diff --git a/mysql-test/r/subselect_sj2_mat.result b/mysql-test/r/subselect_sj2_mat.result index 07c9e74b4d7..8466f9fba4e 100644 --- a/mysql-test/r/subselect_sj2_mat.result +++ b/mysql-test/r/subselect_sj2_mat.result @@ -1625,3 +1625,26 @@ i1 DROP TABLE t1,t2,t3; set join_cache_level= @save_join_cache_level; set optimizer_switch=@save_optimizer_switch; +# +# mdev-7791: materialization of a semi-join subquery + +# RAND() in WHERE +# (materialized table is accessed last) +# +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='materialization=on'; +create table t1(i int); +insert into t1 values (1), (2), (3), (7), (9), (10); +create table t2(i int); +insert into t2 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +select * from t1 where (rand() < 0) and i in (select i from t2); +i +explain extended +select * from t1 where (rand() < 0) and i in (select i from t2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00 +2 MATERIALIZED t2 ALL NULL NULL NULL NULL 10 100.00 +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1` semi join (`test`.`t2`) where rand() < 0 +drop table t1,t2; +set optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/r/subselect_sj_jcl6.result b/mysql-test/r/subselect_sj_jcl6.result index bcfa1b14be9..77a073ea2d3 100644 --- a/mysql-test/r/subselect_sj_jcl6.result +++ b/mysql-test/r/subselect_sj_jcl6.result @@ -1665,9 +1665,9 @@ CREATE VIEW v1 AS SELECT 1; EXPLAIN SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1); id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived3> system NULL NULL NULL NULL 1 1 PRIMARY t1 ALL NULL NULL NULL NULL 2 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) -2 MATERIALIZED <derived3> system NULL NULL NULL NULL 1 3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used SELECT * FROM t1 INNER JOIN t2 ON t2.a != 0 AND t2.a IN (SELECT * FROM v1); a a @@ -3086,6 +3086,99 @@ project_number aaa drop table t1, t2, t3; set optimizer_switch= @tmp_mdev6859; +# +# MDEV-12675: subquery subject to semi-join optimizations +# in ON expression of INNER JOIN +# +set @tmp_mdev12675=@@optimizer_switch; +set optimizer_switch=default; +create table t1 (a int) engine=myisam; +insert into t1 values (5),(3),(2),(7),(2),(5),(1); +create table t2 (b int, index idx(b)) engine=myisam; +insert into t2 values (2),(3),(2),(1),(3),(4); +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +explain +select a from t1, t2 where b between 1 and 2 and a in (select b from t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1) +1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) +explain +select a from t1 join t2 on b between 1 and 2 and a in (select b from t2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where +1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1) +1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) +drop table t1,t2; +set optimizer_switch= @tmp_mdev12675; +# +# MDEV-12817: subquery NOT subject to semi-join optimizations +# in ON expression of INNER JOIN +# +CREATE TABLE t1 (c1 int) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (c2 int) ENGINE=MyISAM; +INSERT INTO t2 VALUES (3),(4); +CREATE TABLE t3 (c3 int) ENGINE=MyISAM; +INSERT INTO t3 VALUES (5),(6); +CREATE TABLE t4 (c4 int) ENGINE=MyISAM; +INSERT INTO t4 VALUES (7),(8); +SELECT c1 +FROM t1 +LEFT JOIN +( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) ) +ON (c1 = c3); +c1 +1 +2 +EXPLAIN EXTENDED SELECT c1 +FROM t1 +LEFT JOIN +( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) ) +ON (c1 = c3); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join) +2 SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t3`.`c3` = `test`.`t1`.`c1` and <cache>(<in_optimizer>(1,<exists>(/* select#2 */ select `test`.`t4`.`c4` from `test`.`t4` where 1 = `test`.`t4`.`c4`)))) where 1 +# mdev-12820 +SELECT * +FROM t1 +LEFT JOIN +( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 ) +ON (c1 = c2); +c1 c2 c4 +1 NULL NULL +2 NULL NULL +EXPLAIN EXTENDED SELECT * +FROM t1 +LEFT JOIN +( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 ) +ON (c1 = c2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) +1 PRIMARY t4 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (incremental, BNL join) +3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and <in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where <cache>(`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1 +DROP TABLE t1,t2,t3,t4; set optimizer_switch=@subselect_sj_tmp; # # BUG#49129: Wrong result with IN-subquery with join_cache_level=6 and firstmatch=off diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 77590d8508b..b751632307a 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -313,7 +313,7 @@ drop trigger trg; ERROR HY000: Trigger does not exist create view v1 as select * from t1; create trigger trg before insert on v1 for each row set @a:=1; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; drop table t1; drop table t3; @@ -2368,6 +2368,16 @@ tr1 1 2016-01-01 10:10:10.33 tr2 2 2016-01-01 10:10:10.99 drop table t1; set time_zone= @@global.time_zone; +# MDEV-12992: Increasing memory consumption +with each invocation of trigger +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (b INT); +CREATE TRIGGER tr +AFTER UPDATE ON t1 FOR EACH ROW SELECT (SELECT b FROM t2) INTO @x; +# Running 20000 queries +DROP TABLE t1,t2; # # Start of 10.3 tests # diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index e0aa93d5c97..956703237a1 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -2178,6 +2178,35 @@ WHERE t1_2.b NOT IN ( SELECT 4 UNION ALL SELECT 5 ); a b a b 1 1 1 1 DROP TABLE t1; +# Bug mdev-12788: UNION ALL + impossible having for derived +# with IN subquery in WHERE +# +CREATE TABLE t1 (i int) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (pk int PRIMARY KEY) ENGINE=MyISAM; +INSERT INTO t2 VALUES (1),(2); +SELECT 1, 2 +UNION ALL +SELECT i, COUNT(*) FROM ( +SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 ) +) AS sq +GROUP BY i +HAVING i = 10; +1 2 +1 2 +EXPLAIN EXTENDED SELECT 1, 2 +UNION ALL +SELECT i, COUNT(*) FROM ( +SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 ) +) AS sq +GROUP BY i +HAVING i = 10; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ select 1 AS `1`,2 AS `2` union all /* select#2 */ select 1 AS `i`,count(0) AS `COUNT(*)` from `test`.`t2` where 1 group by 1 having 0 +DROP TABLE t1,t2; # # Start of 10.3 tests # diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index e200af46b87..37c3fe55a52 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -41,7 +41,7 @@ show create view v1; View Create View character_set_client collation_connection v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`b` + 1 AS `c` from `t1` latin1 latin1_swedish_ci show create view t1; -ERROR HY000: 'test.t1' is not VIEW +ERROR HY000: 'test.t1' is not of type 'VIEW' drop table t1; select a from v1; ERROR 42S22: Unknown column 'a' in 'field list' @@ -199,7 +199,7 @@ c d drop view v100; ERROR 42S02: Unknown VIEW: 'test.v100' drop view t1; -ERROR HY000: 'test.t1' is not VIEW +ERROR HY000: 'test.t1' is not of type 'VIEW' drop table v1; ERROR 42S02: 'test.v1' is a view drop view v1,v2; @@ -675,7 +675,7 @@ drop view v1; create table t1 (col1 int,col2 char(22)); create view v1 as select * from t1; create index i1 on v1 (col1); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; drop table t1; CREATE VIEW v1 (f1,f2,f3,f4) AS SELECT connection_id(), pi(), current_user(), version(); @@ -1061,7 +1061,7 @@ drop table t1,t2,t3; create table t1 (s1 int); create view v1 as select * from t1; handler v1 open as xx; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; drop table t1; create table t1(a int); @@ -2445,28 +2445,28 @@ CREATE TABLE t1(id INT); CREATE VIEW v1 AS SELECT id FROM t1; OPTIMIZE TABLE v1; Table Op Msg_type Msg_text -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed ANALYZE TABLE v1; Table Op Msg_type Msg_text -test.v1 analyze Error 'test.v1' is not BASE TABLE +test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE' test.v1 analyze status Operation failed REPAIR TABLE v1; Table Op Msg_type Msg_text -test.v1 repair Error 'test.v1' is not BASE TABLE +test.v1 repair Error 'test.v1' is not of type 'BASE TABLE' test.v1 repair status Operation failed DROP TABLE t1; OPTIMIZE TABLE v1; Table Op Msg_type Msg_text -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed ANALYZE TABLE v1; Table Op Msg_type Msg_text -test.v1 analyze Error 'test.v1' is not BASE TABLE +test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE' test.v1 analyze status Operation failed REPAIR TABLE v1; Table Op Msg_type Msg_text -test.v1 repair Error 'test.v1' is not BASE TABLE +test.v1 repair Error 'test.v1' is not of type 'BASE TABLE' test.v1 repair status Operation failed DROP VIEW v1; create definer = current_user() sql security invoker view v1 as select 1; @@ -2920,7 +2920,7 @@ Tables_in_test t1 CREATE VIEW v1 AS SELECT id FROM t1; DROP VIEW t1,v1; -ERROR HY000: 'test.t1' is not VIEW +ERROR HY000: 'test.t1' is not of type 'VIEW' SHOW TABLES; Tables_in_test t1 @@ -3706,7 +3706,7 @@ CREATE TABLE t1(c1 INT); SELECT * FROM t1; c1 ALTER ALGORITHM=TEMPTABLE SQL SECURITY INVOKER VIEW t1 (c2) AS SELECT (1); -ERROR HY000: 'test.t1' is not VIEW +ERROR HY000: 'test.t1' is not of type 'VIEW' DROP TABLE t1; @@ -4011,7 +4011,7 @@ drop procedure p; CREATE TABLE t1 (a INT); CREATE VIEW v1 AS SELECT a FROM t1; ALTER TABLE v1; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' DROP VIEW v1; DROP TABLE t1; # @@ -5584,7 +5584,7 @@ create table t1 (a int, b int); create view v1 as select a+b from t1; alter table v1 check partition p1; Table Op Msg_type Msg_text -test.v1 check Error 'test.v1' is not BASE TABLE +test.v1 check Error 'test.v1' is not of type 'BASE TABLE' test.v1 check status Operation failed drop view v1; drop table t1; diff --git a/mysql-test/suite/binlog/r/mysqladmin.result b/mysql-test/suite/binlog/r/mysqladmin.result new file mode 100644 index 00000000000..4be6c96d55b --- /dev/null +++ b/mysql-test/suite/binlog/r/mysqladmin.result @@ -0,0 +1,12 @@ +create user adm@localhost identified by 'foobar'; +grant reload on *.* to adm@localhost; +reset master; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # flush status +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # flush status +drop user adm@localhost; diff --git a/mysql-test/suite/binlog/t/mysqladmin.test b/mysql-test/suite/binlog/t/mysqladmin.test new file mode 100644 index 00000000000..3c2fbc0a708 --- /dev/null +++ b/mysql-test/suite/binlog/t/mysqladmin.test @@ -0,0 +1,12 @@ +source include/have_binlog_format_statement.inc; +# +# MDEV-12612 mysqladmin --local flush... to use FLUSH LOCAL +# +create user adm@localhost identified by 'foobar'; +grant reload on *.* to adm@localhost; +reset master; +exec $MYSQLADMIN -uadm -pfoobar flush-status; +source include/show_binlog_events.inc; +exec $MYSQLADMIN --local -uadm -pfoobar flush-status; +source include/show_binlog_events.inc; +drop user adm@localhost; diff --git a/mysql-test/suite/csv/read_only.result b/mysql-test/suite/csv/read_only.result new file mode 100644 index 00000000000..d6936681f65 --- /dev/null +++ b/mysql-test/suite/csv/read_only.result @@ -0,0 +1,30 @@ +create table t1 (a int not null) engine=csv; +insert t1 values (1),(2); +flush tables; +select * from information_schema.tables where table_schema='test'; +TABLE_CATALOG def +TABLE_SCHEMA test +TABLE_NAME t1 +TABLE_TYPE BASE TABLE +ENGINE NULL +VERSION NULL +ROW_FORMAT NULL +TABLE_ROWS NULL +AVG_ROW_LENGTH NULL +DATA_LENGTH NULL +MAX_DATA_LENGTH NULL +INDEX_LENGTH NULL +DATA_FREE NULL +AUTO_INCREMENT NULL +CREATE_TIME NULL +UPDATE_TIME NULL +CHECK_TIME NULL +TABLE_COLLATION NULL +CHECKSUM NULL +CREATE_OPTIONS NULL +TABLE_COMMENT File './test/t1.CSM' not found (Errcode: 13 "Permission denied") +Warnings: +Level Warning +Code 29 +Message File './test/t1.CSM' not found (Errcode: 13 "Permission denied") +drop table t1; diff --git a/mysql-test/suite/csv/read_only.test b/mysql-test/suite/csv/read_only.test new file mode 100644 index 00000000000..2af209182d0 --- /dev/null +++ b/mysql-test/suite/csv/read_only.test @@ -0,0 +1,19 @@ +# +# MDEV-11883 MariaDB crashes with out-of-memory when query information_schema +# +source include/have_csv.inc; + +let datadir=`select @@datadir`; + +create table t1 (a int not null) engine=csv; +insert t1 values (1),(2); +flush tables; + +chmod 0400 $datadir/test/t1.CSM; +chmod 0400 $datadir/test/t1.CSV; + +--replace_result $datadir ./ +query_vertical select * from information_schema.tables where table_schema='test'; + +drop table t1; + diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff new file mode 100644 index 00000000000..cd66df7440b --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,32k.rdiff @@ -0,0 +1,38 @@ +--- suite/encryption/r/innodb-checksum-algorithm.result ++++ suite/encryption/r/innodb-checksum-algorithm,32k.reject +@@ -13,9 +13,9 @@ + SET GLOBAL innodb_default_encryption_key_id=4; + SET GLOBAL innodb_checksum_algorithm=crc32; + create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_crc32(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_crc32(a serial, b blob, index(b(10))) engine=innodb +@@ -222,9 +222,9 @@ + t_crc32, tpe_crc32, tp_crc32; + SET GLOBAL innodb_checksum_algorithm=innodb; + create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_innodb(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_innodb(a serial, b blob, index(b(10))) engine=innodb +@@ -431,9 +431,9 @@ + t_innodb, tpe_innodb, tp_innodb; + SET GLOBAL innodb_checksum_algorithm=none; + create table tce_none(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_none(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_none(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_none(a serial, b blob, index(b(10))) engine=innodb diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff new file mode 100644 index 00000000000..523074297da --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm,64k.rdiff @@ -0,0 +1,38 @@ +--- suite/encryption/r/innodb-checksum-algorithm.result ++++ suite/encryption/r/innodb-checksum-algorithm,64k.reject +@@ -13,9 +13,9 @@ + SET GLOBAL innodb_default_encryption_key_id=4; + SET GLOBAL innodb_checksum_algorithm=crc32; + create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_crc32(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_crc32(a serial, b blob, index(b(10))) engine=innodb +@@ -222,9 +222,9 @@ + t_crc32, tpe_crc32, tp_crc32; + SET GLOBAL innodb_checksum_algorithm=innodb; + create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_innodb(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_innodb(a serial, b blob, index(b(10))) engine=innodb +@@ -431,9 +431,9 @@ + t_innodb, tpe_innodb, tp_innodb; + SET GLOBAL innodb_checksum_algorithm=none; + create table tce_none(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=yes; ++ROW_FORMAT=DYNAMIC encrypted=yes; + create table tc_none(a serial, b blob, index(b(10))) engine=innodb +-ROW_FORMAT=COMPRESSED encrypted=no; ++ROW_FORMAT=DYNAMIC encrypted=no; + create table te_none(a serial, b blob, index(b(10))) engine=innodb + encrypted=yes; + create table t_none(a serial, b blob, index(b(10))) engine=innodb diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result new file mode 100644 index 00000000000..a12b47ef31d --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result @@ -0,0 +1,643 @@ +SET @saved_file_per_table = @@global.innodb_file_per_table; +SET @saved_checksum_algorithm = @@global.innodb_checksum_algorithm; +SET @saved_encrypt_tables = @@global.innodb_encrypt_tables; +SET @saved_encryption_threads = @@global.innodb_encryption_threads; +SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id; +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_encrypt_tables = ON; +SET GLOBAL innodb_encryption_threads = 4; +call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to \"strict_(crc32|none|innodb)\" but the page \\[page id: space=[0-9]+, page number=[0-9]+\\] contains a valid checksum \"(innodb|none|crc32)\""); +SET GLOBAL innodb_checksum_algorithm = innodb; +SET GLOBAL innodb_default_encryption_key_id=4; +SET GLOBAL innodb_checksum_algorithm=crc32; +create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=yes; +create table tc_crc32(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=no; +create table te_crc32(a serial, b blob, index(b(10))) engine=innodb +encrypted=yes; +create table t_crc32(a serial, b blob, index(b(10))) engine=innodb +encrypted=no; +create table tpe_crc32(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=yes; +create table tp_crc32(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=no; +begin; +insert into tce_crc32(b) values (repeat('secret',20)); +insert into tc_crc32(b) values (repeat('secret',20)); +insert into te_crc32(b) values (repeat('secret',20)); +insert into t_crc32(b) values (repeat('secret',20)); +insert into tpe_crc32(b) values (repeat('secret',20)); +insert into tp_crc32(b) values (repeat('secret',20)); +commit; +FLUSH TABLES tce_crc32, tc_crc32, te_crc32, +t_crc32, tpe_crc32, tp_crc32 FOR EXPORT; +backup: tce_crc32 +backup: tc_crc32 +backup: te_crc32 +backup: t_crc32 +backup: tpe_crc32 +backup: tp_crc32 +t_crc32.cfg +t_crc32.frm +t_crc32.ibd +tc_crc32.cfg +tc_crc32.frm +tc_crc32.ibd +tce_crc32.cfg +tce_crc32.frm +tce_crc32.ibd +te_crc32.cfg +te_crc32.frm +te_crc32.ibd +tp_crc32.cfg +tp_crc32.frm +tp_crc32.ibd +tpe_crc32.cfg +tpe_crc32.frm +tpe_crc32.ibd +UNLOCK TABLES; +SET GLOBAL innodb_checksum_algorithm=strict_crc32; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=crc32; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_innodb; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=innodb; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_none; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=none; +ALTER TABLE tce_crc32 DISCARD TABLESPACE; +ALTER TABLE tc_crc32 DISCARD TABLESPACE; +ALTER TABLE te_crc32 DISCARD TABLESPACE; +ALTER TABLE t_crc32 DISCARD TABLESPACE; +ALTER TABLE tpe_crc32 DISCARD TABLESPACE; +ALTER TABLE tp_crc32 DISCARD TABLESPACE; +restore: tce_crc32 .ibd and .cfg files +restore: tc_crc32 .ibd and .cfg files +restore: te_crc32 .ibd and .cfg files +restore: t_crc32 .ibd and .cfg files +restore: tpe_crc32 .ibd and .cfg files +restore: tp_crc32 .ibd and .cfg files +ALTER TABLE tce_crc32 IMPORT TABLESPACE; +update tce_crc32 set b=substr(b,1); +ALTER TABLE tc_crc32 IMPORT TABLESPACE; +update tc_crc32 set b=substr(b,1); +ALTER TABLE te_crc32 IMPORT TABLESPACE; +update te_crc32 set b=substr(b,1); +ALTER TABLE t_crc32 IMPORT TABLESPACE; +update t_crc32 set b=substr(b,1); +ALTER TABLE tpe_crc32 IMPORT TABLESPACE; +update tpe_crc32 set b=substr(b,1); +ALTER TABLE tp_crc32 IMPORT TABLESPACE; +update tp_crc32 set b=substr(b,1); +CHECK TABLE tce_crc32, tc_crc32, te_crc32, +t_crc32, tpe_crc32, tp_crc32; +Table Op Msg_type Msg_text +test.tce_crc32 check status OK +test.tc_crc32 check status OK +test.te_crc32 check status OK +test.t_crc32 check status OK +test.tpe_crc32 check status OK +test.tp_crc32 check status OK +DROP TABLE tce_crc32, tc_crc32, te_crc32, +t_crc32, tpe_crc32, tp_crc32; +SET GLOBAL innodb_checksum_algorithm=innodb; +create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=yes; +create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=no; +create table te_innodb(a serial, b blob, index(b(10))) engine=innodb +encrypted=yes; +create table t_innodb(a serial, b blob, index(b(10))) engine=innodb +encrypted=no; +create table tpe_innodb(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=yes; +create table tp_innodb(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=no; +begin; +insert into tce_innodb(b) values (repeat('secret',20)); +insert into tc_innodb(b) values (repeat('secret',20)); +insert into te_innodb(b) values (repeat('secret',20)); +insert into t_innodb(b) values (repeat('secret',20)); +insert into tpe_innodb(b) values (repeat('secret',20)); +insert into tp_innodb(b) values (repeat('secret',20)); +commit; +FLUSH TABLES tce_innodb, tc_innodb, te_innodb, +t_innodb, tpe_innodb, tp_innodb FOR EXPORT; +backup: tce_innodb +backup: tc_innodb +backup: te_innodb +backup: t_innodb +backup: tpe_innodb +backup: tp_innodb +t_innodb.cfg +t_innodb.frm +t_innodb.ibd +tc_innodb.cfg +tc_innodb.frm +tc_innodb.ibd +tce_innodb.cfg +tce_innodb.frm +tce_innodb.ibd +te_innodb.cfg +te_innodb.frm +te_innodb.ibd +tp_innodb.cfg +tp_innodb.frm +tp_innodb.ibd +tpe_innodb.cfg +tpe_innodb.frm +tpe_innodb.ibd +UNLOCK TABLES; +SET GLOBAL innodb_checksum_algorithm=strict_crc32; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=crc32; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_innodb; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=innodb; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_none; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=none; +ALTER TABLE tce_innodb DISCARD TABLESPACE; +ALTER TABLE tc_innodb DISCARD TABLESPACE; +ALTER TABLE te_innodb DISCARD TABLESPACE; +ALTER TABLE t_innodb DISCARD TABLESPACE; +ALTER TABLE tpe_innodb DISCARD TABLESPACE; +ALTER TABLE tp_innodb DISCARD TABLESPACE; +restore: tce_innodb .ibd and .cfg files +restore: tc_innodb .ibd and .cfg files +restore: te_innodb .ibd and .cfg files +restore: t_innodb .ibd and .cfg files +restore: tpe_innodb .ibd and .cfg files +restore: tp_innodb .ibd and .cfg files +ALTER TABLE tce_innodb IMPORT TABLESPACE; +update tce_innodb set b=substr(b,1); +ALTER TABLE tc_innodb IMPORT TABLESPACE; +update tc_innodb set b=substr(b,1); +ALTER TABLE te_innodb IMPORT TABLESPACE; +update te_innodb set b=substr(b,1); +ALTER TABLE t_innodb IMPORT TABLESPACE; +update t_innodb set b=substr(b,1); +ALTER TABLE tpe_innodb IMPORT TABLESPACE; +update tpe_innodb set b=substr(b,1); +ALTER TABLE tp_innodb IMPORT TABLESPACE; +update tp_innodb set b=substr(b,1); +CHECK TABLE tce_innodb, tc_innodb, te_innodb, +t_innodb, tpe_innodb, tp_innodb; +Table Op Msg_type Msg_text +test.tce_innodb check status OK +test.tc_innodb check status OK +test.te_innodb check status OK +test.t_innodb check status OK +test.tpe_innodb check status OK +test.tp_innodb check status OK +DROP TABLE tce_innodb, tc_innodb, te_innodb, +t_innodb, tpe_innodb, tp_innodb; +SET GLOBAL innodb_checksum_algorithm=none; +create table tce_none(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=yes; +create table tc_none(a serial, b blob, index(b(10))) engine=innodb +ROW_FORMAT=COMPRESSED encrypted=no; +create table te_none(a serial, b blob, index(b(10))) engine=innodb +encrypted=yes; +create table t_none(a serial, b blob, index(b(10))) engine=innodb +encrypted=no; +create table tpe_none(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=yes; +create table tp_none(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=no; +begin; +insert into tce_none(b) values (repeat('secret',20)); +insert into tc_none(b) values (repeat('secret',20)); +insert into te_none(b) values (repeat('secret',20)); +insert into t_none(b) values (repeat('secret',20)); +insert into tpe_none(b) values (repeat('secret',20)); +insert into tp_none(b) values (repeat('secret',20)); +commit; +FLUSH TABLES tce_none, tc_none, te_none, +t_none, tpe_none, tp_none FOR EXPORT; +backup: tce_none +backup: tc_none +backup: te_none +backup: t_none +backup: tpe_none +backup: tp_none +t_none.cfg +t_none.frm +t_none.ibd +tc_none.cfg +tc_none.frm +tc_none.ibd +tce_none.cfg +tce_none.frm +tce_none.ibd +te_none.cfg +te_none.frm +te_none.ibd +tp_none.cfg +tp_none.frm +tp_none.ibd +tpe_none.cfg +tpe_none.frm +tpe_none.ibd +UNLOCK TABLES; +SET GLOBAL innodb_checksum_algorithm=strict_crc32; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=crc32; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_innodb; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=innodb; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=strict_none; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +SET GLOBAL innodb_checksum_algorithm=none; +ALTER TABLE tce_none DISCARD TABLESPACE; +ALTER TABLE tc_none DISCARD TABLESPACE; +ALTER TABLE te_none DISCARD TABLESPACE; +ALTER TABLE t_none DISCARD TABLESPACE; +ALTER TABLE tpe_none DISCARD TABLESPACE; +ALTER TABLE tp_none DISCARD TABLESPACE; +restore: tce_none .ibd and .cfg files +restore: tc_none .ibd and .cfg files +restore: te_none .ibd and .cfg files +restore: t_none .ibd and .cfg files +restore: tpe_none .ibd and .cfg files +restore: tp_none .ibd and .cfg files +ALTER TABLE tce_none IMPORT TABLESPACE; +update tce_none set b=substr(b,1); +ALTER TABLE tc_none IMPORT TABLESPACE; +update tc_none set b=substr(b,1); +ALTER TABLE te_none IMPORT TABLESPACE; +update te_none set b=substr(b,1); +ALTER TABLE t_none IMPORT TABLESPACE; +update t_none set b=substr(b,1); +ALTER TABLE tpe_none IMPORT TABLESPACE; +update tpe_none set b=substr(b,1); +ALTER TABLE tp_none IMPORT TABLESPACE; +update tp_none set b=substr(b,1); +CHECK TABLE tce_none, tc_none, te_none, +t_none, tpe_none, tp_none; +Table Op Msg_type Msg_text +test.tce_none check status OK +test.tc_none check status OK +test.te_none check status OK +test.t_none check status OK +test.tpe_none check status OK +test.tp_none check status OK +DROP TABLE tce_none, tc_none, te_none, +t_none, tpe_none, tp_none; +SET GLOBAL innodb_file_per_table = @saved_file_per_table; +SET GLOBAL innodb_checksum_algorithm = @saved_checksum_algorithm; +SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables; +SET GLOBAL innodb_encryption_threads = @saved_encryption_threads; +SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id; diff --git a/mysql-test/suite/encryption/r/innodb-compressed-blob.result b/mysql-test/suite/encryption/r/innodb-compressed-blob.result index 5753188b168..bf43e1b30d6 100644 --- a/mysql-test/suite/encryption/r/innodb-compressed-blob.result +++ b/mysql-test/suite/encryption/r/innodb-compressed-blob.result @@ -1,4 +1,5 @@ call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted\\."); +call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]"); # Restart mysqld --file-key-management-filename=keys2.txt SET GLOBAL innodb_file_per_table = ON; set GLOBAL innodb_default_encryption_key_id=4; diff --git a/mysql-test/suite/encryption/r/innodb-first-page-read.result b/mysql-test/suite/encryption/r/innodb-first-page-read.result new file mode 100644 index 00000000000..a630aa3cb09 --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb-first-page-read.result @@ -0,0 +1,87 @@ +create database innodb_test; +use innodb_test; +create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; +create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact; +create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic; +create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed; +create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1; +create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2; +create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4; +create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8; +create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16; +create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant; +create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes; +create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1; +create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2; +create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3; +create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4; +create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5; +create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6; +create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7; +create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8; +create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9; +create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='MYSQL_TMP_DIR'; +create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='MYSQL_TMP_DIR'; +create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='MYSQL_TMP_DIR'; +begin; +insert into innodb_normal values (1,'secret'); +insert into innodb_compact select * from innodb_normal; +insert into innodb_dynamic select * from innodb_normal; +insert into innodb_compressed select * from innodb_normal; +insert into innodb_compressed1 select * from innodb_normal; +insert into innodb_compressed2 select * from innodb_normal; +insert into innodb_compressed4 select * from innodb_normal; +insert into innodb_compressed8 select * from innodb_normal; +insert into innodb_compressed16 select * from innodb_normal; +insert into innodb_redundant select * from innodb_normal; +insert into innodb_pagecomp select * from innodb_normal; +insert into innodb_pagecomp1 select * from innodb_normal; +insert into innodb_pagecomp2 select * from innodb_normal; +insert into innodb_pagecomp3 select * from innodb_normal; +insert into innodb_pagecomp4 select * from innodb_normal; +insert into innodb_pagecomp5 select * from innodb_normal; +insert into innodb_pagecomp6 select * from innodb_normal; +insert into innodb_pagecomp7 select * from innodb_normal; +insert into innodb_pagecomp8 select * from innodb_normal; +insert into innodb_pagecomp9 select * from innodb_normal; +insert into innodb_datadir1 select * from innodb_normal; +insert into innodb_datadir2 select * from innodb_normal; +insert into innodb_datadir3 select * from innodb_normal; +commit; +# Restart server and see how many page 0's are read +# result should be less than actual number of tables +# i.e. < 23 + 3 = 26 +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use innodb_test; +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use test; +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +set global innodb_encrypt_tables=OFF; +# wait until tables are decrypted +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use innodb_test; +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use test; +# restart and see number read page 0 +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use innodb_test; +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 +use test; +drop database innodb_test; +show status like 'innodb_pages0_read%'; +Variable_name Value +Innodb_pages0_read 26 diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result index afd3ac412ef..b39b3d13c97 100644 --- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result +++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result @@ -64,5 +64,4 @@ FOUND 1 /public/ in t7.ibd FOUND 1 /public/ in t8.ibd # t9 page compressed expecting NOT FOUND NOT FOUND /public/ in t9.ibd -use test; drop database enctests; diff --git a/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test new file mode 100644 index 00000000000..7eaa1bd64c6 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test @@ -0,0 +1,120 @@ +-- source include/innodb_page_size.inc +-- source include/have_file_key_management_plugin.inc + +SET @saved_file_per_table = @@global.innodb_file_per_table; +SET @saved_checksum_algorithm = @@global.innodb_checksum_algorithm; +SET @saved_encrypt_tables = @@global.innodb_encrypt_tables; +SET @saved_encryption_threads = @@global.innodb_encryption_threads; +SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id; + +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_encrypt_tables = ON; +SET GLOBAL innodb_encryption_threads = 4; + +call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to \"strict_(crc32|none|innodb)\" but the page \\[page id: space=[0-9]+, page number=[0-9]+\\] contains a valid checksum \"(innodb|none|crc32)\""); + +SET GLOBAL innodb_checksum_algorithm = innodb; +SET GLOBAL innodb_default_encryption_key_id=4; + +let MYSQLD_DATADIR =`SELECT @@datadir`; + +# ROW_FORMAT=COMPRESSED is unavailable with innodb_page_size=32k or 64k +let $row_format_compressed= `select case when @@global.innodb_page_size>16384 +then 'ROW_FORMAT=DYNAMIC' else 'ROW_FORMAT=COMPRESSED' end`; + +let $from = 3; +while ($from) +{ +dec $from; +let checksum = `select case $from + when 0 then 'none' + when 1 then 'innodb' + when 2 then 'crc32' + end`; +eval SET GLOBAL innodb_checksum_algorithm=$checksum; + +eval create table tce_$checksum(a serial, b blob, index(b(10))) engine=innodb +$row_format_compressed encrypted=yes; +eval create table tc_$checksum(a serial, b blob, index(b(10))) engine=innodb +$row_format_compressed encrypted=no; +eval create table te_$checksum(a serial, b blob, index(b(10))) engine=innodb +encrypted=yes; +eval create table t_$checksum(a serial, b blob, index(b(10))) engine=innodb +encrypted=no; +eval create table tpe_$checksum(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=yes; +eval create table tp_$checksum(a serial, b blob, index(b(10))) engine=innodb +page_compressed=yes encrypted=no; + +begin; +eval insert into tce_$checksum(b) values (repeat('secret',20)); +eval insert into tc_$checksum(b) values (repeat('secret',20)); +eval insert into te_$checksum(b) values (repeat('secret',20)); +eval insert into t_$checksum(b) values (repeat('secret',20)); +eval insert into tpe_$checksum(b) values (repeat('secret',20)); +eval insert into tp_$checksum(b) values (repeat('secret',20)); +commit; + +eval FLUSH TABLES tce_$checksum, tc_$checksum, te_$checksum, +t_$checksum, tpe_$checksum, tp_$checksum FOR EXPORT; +perl; +do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl"; +my @tables = ("tce_", "tc_", "te_", "t_", "tpe_", "tp_"); +ib_backup_tablespaces("test", map{ $_ . $ENV{checksum} } @tables); +EOF +--list_files $MYSQLD_DATADIR/test +UNLOCK TABLES; + +let $to = 6; +while ($to) +{ +dec $to; +let $tocksum = `select case $to + when 0 then 'none' + when 1 then 'strict_none' + when 2 then 'innodb' + when 3 then 'strict_innodb' + when 4 then 'crc32' + when 5 then 'strict_crc32' + end`; + +eval SET GLOBAL innodb_checksum_algorithm=$tocksum; + +eval ALTER TABLE tce_$checksum DISCARD TABLESPACE; +eval ALTER TABLE tc_$checksum DISCARD TABLESPACE; +eval ALTER TABLE te_$checksum DISCARD TABLESPACE; +eval ALTER TABLE t_$checksum DISCARD TABLESPACE; +eval ALTER TABLE tpe_$checksum DISCARD TABLESPACE; +eval ALTER TABLE tp_$checksum DISCARD TABLESPACE; + +perl; +do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl"; +my @tables = ("tce_", "tc_", "te_", "t_", "tpe_", "tp_"); +ib_restore_tablespaces("test", map{ $_ . $ENV{checksum} } @tables); +EOF + +eval ALTER TABLE tce_$checksum IMPORT TABLESPACE; +eval update tce_$checksum set b=substr(b,1); +eval ALTER TABLE tc_$checksum IMPORT TABLESPACE; +eval update tc_$checksum set b=substr(b,1); +eval ALTER TABLE te_$checksum IMPORT TABLESPACE; +eval update te_$checksum set b=substr(b,1); +eval ALTER TABLE t_$checksum IMPORT TABLESPACE; +eval update t_$checksum set b=substr(b,1); +eval ALTER TABLE tpe_$checksum IMPORT TABLESPACE; +eval update tpe_$checksum set b=substr(b,1); +eval ALTER TABLE tp_$checksum IMPORT TABLESPACE; +eval update tp_$checksum set b=substr(b,1); +} + +eval CHECK TABLE tce_$checksum, tc_$checksum, te_$checksum, +t_$checksum, tpe_$checksum, tp_$checksum; +eval DROP TABLE tce_$checksum, tc_$checksum, te_$checksum, +t_$checksum, tpe_$checksum, tp_$checksum; +} + +SET GLOBAL innodb_file_per_table = @saved_file_per_table; +SET GLOBAL innodb_checksum_algorithm = @saved_checksum_algorithm; +SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables; +SET GLOBAL innodb_encryption_threads = @saved_encryption_threads; +SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id; diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations b/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations new file mode 100644 index 00000000000..e096b023b52 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb-compressed-blob.combinations @@ -0,0 +1,12 @@ +[crc32] +loose-innodb-tablespaces-encryption +loose-innodb-encrypt-tables=on +loose-innodb-encryption-threads=4 +max_allowed_packet=64K +loose-innodb-checksum-algorithm=crc32 +[none] +loose-innodb-tablespaces-encryption +loose-innodb-encrypt-tables=on +loose-innodb-encryption-threads=4 +max_allowed_packet=64K +loose-innodb-checksum-algorithm=none diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.opt b/mysql-test/suite/encryption/t/innodb-compressed-blob.opt deleted file mode 100644 index 36dcb6c6f26..00000000000 --- a/mysql-test/suite/encryption/t/innodb-compressed-blob.opt +++ /dev/null @@ -1,4 +0,0 @@ ---innodb-tablespaces-encryption ---innodb-encrypt-tables=on ---innodb-encryption-threads=2 ---max_allowed_packet=64K diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.test b/mysql-test/suite/encryption/t/innodb-compressed-blob.test index 6256aaf5c33..4f28f8e183d 100644 --- a/mysql-test/suite/encryption/t/innodb-compressed-blob.test +++ b/mysql-test/suite/encryption/t/innodb-compressed-blob.test @@ -1,10 +1,11 @@ --- source include/have_innodb.inc +-- source include/innodb_page_size_small.inc -- source include/have_file_key_management_plugin.inc # embedded does not support restart -- source include/not_embedded.inc call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted\\."); +call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]"); --echo # Restart mysqld --file-key-management-filename=keys2.txt -- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt diff --git a/mysql-test/suite/encryption/t/innodb-first-page-read.opt b/mysql-test/suite/encryption/t/innodb-first-page-read.opt new file mode 100644 index 00000000000..38d69691ed6 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb-first-page-read.opt @@ -0,0 +1,5 @@ +--innodb-encrypt-tables=ON +--innodb-encrypt-log=ON +--innodb-encryption-rotate-key-age=15 +--innodb-encryption-threads=4 +--innodb-tablespaces-encryption diff --git a/mysql-test/suite/encryption/t/innodb-first-page-read.test b/mysql-test/suite/encryption/t/innodb-first-page-read.test new file mode 100644 index 00000000000..216a864f746 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb-first-page-read.test @@ -0,0 +1,92 @@ +-- source include/have_innodb.inc +-- source include/have_file_key_management_plugin.inc +-- source include/not_embedded.inc + +create database innodb_test; +use innodb_test; +create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; +create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact; +create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic; +create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed; +create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1; +create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2; +create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4; +create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8; +create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16; +create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant; +create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes; +create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1; +create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2; +create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3; +create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4; +create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5; +create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6; +create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7; +create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8; +create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9; + +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='$MYSQL_TMP_DIR'; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='$MYSQL_TMP_DIR'; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='$MYSQL_TMP_DIR'; + +begin; +insert into innodb_normal values (1,'secret'); +insert into innodb_compact select * from innodb_normal; +insert into innodb_dynamic select * from innodb_normal; +insert into innodb_compressed select * from innodb_normal; +insert into innodb_compressed1 select * from innodb_normal; +insert into innodb_compressed2 select * from innodb_normal; +insert into innodb_compressed4 select * from innodb_normal; +insert into innodb_compressed8 select * from innodb_normal; +insert into innodb_compressed16 select * from innodb_normal; +insert into innodb_redundant select * from innodb_normal; +insert into innodb_pagecomp select * from innodb_normal; +insert into innodb_pagecomp1 select * from innodb_normal; +insert into innodb_pagecomp2 select * from innodb_normal; +insert into innodb_pagecomp3 select * from innodb_normal; +insert into innodb_pagecomp4 select * from innodb_normal; +insert into innodb_pagecomp5 select * from innodb_normal; +insert into innodb_pagecomp6 select * from innodb_normal; +insert into innodb_pagecomp7 select * from innodb_normal; +insert into innodb_pagecomp8 select * from innodb_normal; +insert into innodb_pagecomp9 select * from innodb_normal; +insert into innodb_datadir1 select * from innodb_normal; +insert into innodb_datadir2 select * from innodb_normal; +insert into innodb_datadir3 select * from innodb_normal; +commit; + +--echo # Restart server and see how many page 0's are read +--source include/restart_mysqld.inc + +--echo # result should be less than actual number of tables +--echo # i.e. < 23 + 3 = 26 +show status like 'innodb_pages0_read%'; +use innodb_test; +show status like 'innodb_pages0_read%'; +use test; +show status like 'innodb_pages0_read%'; + +set global innodb_encrypt_tables=OFF; + +--echo # wait until tables are decrypted +--let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +--source include/wait_condition.inc + +show status like 'innodb_pages0_read%'; +use innodb_test; +show status like 'innodb_pages0_read%'; +use test; + +--echo # restart and see number read page 0 +-- source include/restart_mysqld.inc + +show status like 'innodb_pages0_read%'; +use innodb_test; +show status like 'innodb_pages0_read%'; +use test; + +drop database innodb_test; +show status like 'innodb_pages0_read%'; diff --git a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test index c1e3dd4cfb5..3167cee4b4b 100644 --- a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test +++ b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test @@ -80,5 +80,4 @@ SET GLOBAL innodb_encrypt_tables=ON; -- source include/start_mysqld.inc -use test; drop database enctests; diff --git a/mysql-test/suite/federated/assisted_discovery.result b/mysql-test/suite/federated/assisted_discovery.result index f79e47da8b4..4818ff7bb02 100644 --- a/mysql-test/suite/federated/assisted_discovery.result +++ b/mysql-test/suite/federated/assisted_discovery.result @@ -38,6 +38,40 @@ id group a\\b a\\ name 1 1 2 NULL foo 2 1 2 NULL fee DROP TABLE t1; +create table t1 ( +a bigint(20) not null auto_increment, +b bigint(20) not null, +c tinyint(4) not null, +d varchar(4096) not null, +primary key (a), +key (b,c,d(255)) +); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) NOT NULL AUTO_INCREMENT, + `b` bigint(20) NOT NULL, + `c` tinyint(4) NOT NULL, + `d` varchar(4096) NOT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`,`c`,`d`(255)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +connection master; +create table t1 engine=federated +connection='mysql://root@127.0.0.1:SLAVE_PORT/test/t1'; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) NOT NULL AUTO_INCREMENT, + `b` bigint(20) NOT NULL, + `c` tinyint(4) NOT NULL, + `d` varchar(4096) NOT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`,`c`,`d`(255)) +) ENGINE=FEDERATED DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/t1' +drop table t1; +connection slave; +drop table t1; connection master; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; diff --git a/mysql-test/suite/federated/assisted_discovery.test b/mysql-test/suite/federated/assisted_discovery.test index 9f3abe74ecc..fa83a2a8e19 100644 --- a/mysql-test/suite/federated/assisted_discovery.test +++ b/mysql-test/suite/federated/assisted_discovery.test @@ -30,5 +30,29 @@ connection slave; SELECT * FROM t1; DROP TABLE t1; +# +# +# +create table t1 ( + a bigint(20) not null auto_increment, + b bigint(20) not null, + c tinyint(4) not null, + d varchar(4096) not null, + primary key (a), + key (b,c,d(255)) +); +show create table t1; + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table t1 engine=federated + connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT +show create table t1; +drop table t1; + +connection slave; +drop table t1; + source include/federated_cleanup.inc; diff --git a/mysql-test/suite/funcs_1/r/innodb_trig_0407.result b/mysql-test/suite/funcs_1/r/innodb_trig_0407.result index b6a3a34207d..4753efd1794 100644 --- a/mysql-test/suite/funcs_1/r/innodb_trig_0407.result +++ b/mysql-test/suite/funcs_1/r/innodb_trig_0407.result @@ -245,7 +245,7 @@ Testcase 3.5.5.3: Create view vw3 as select f118 from tb3; Create trigger trg3 before INSERT on vw3 for each row set new.f118='s'; -ERROR HY000: 'test.vw3' is not BASE TABLE +ERROR HY000: 'test.vw3' is not of type 'BASE TABLE' drop view vw3; Testcase 3.5.5.4: diff --git a/mysql-test/suite/funcs_1/r/innodb_views.result b/mysql-test/suite/funcs_1/r/innodb_views.result index dfe46bbf645..d81d09353e1 100644 --- a/mysql-test/suite/funcs_1/r/innodb_views.result +++ b/mysql-test/suite/funcs_1/r/innodb_views.result @@ -3520,12 +3520,12 @@ f1 DROP TRIGGER tr1 ; SET @a:=0 ; CREATE TRIGGER tr1 BEFORE INSERT ON v1 FOR EACH ROW SET @a:=1 ; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' RENAME TABLE v1 TO v2; RENAME VIEW v2 TO v1; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'VIEW v2 TO v1' at line 1 ALTER TABLE v2 RENAME AS v1; -ERROR HY000: 'test.v2' is not BASE TABLE +ERROR HY000: 'test.v2' is not of type 'BASE TABLE' ALTER VIEW v1 RENAME AS v2; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'RENAME AS v2' at line 1 DROP TABLE IF EXISTS t1, t2 ; @@ -3535,12 +3535,12 @@ CREATE TABLE t1 ( f1 DATE, f2 BLOB, f3 DOUBLE ); CREATE VIEW v1 AS SELECT f1, f2, f3 FROM t1; ALTER TABLE t1 ADD PRIMARY KEY(f1); ALTER TABLE v1 ADD PRIMARY KEY(f1); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' ALTER VIEW v1 ADD PRIMARY KEY(f1); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD PRIMARY KEY(f1)' at line 1 CREATE INDEX t1_idx ON t1(f3); CREATE INDEX v1_idx ON v1(f3); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' DROP TABLE t1; DROP VIEW v1; @@ -4085,9 +4085,9 @@ Drop view test.v1 ; Testcase 3.3.1.14 -------------------------------------------------------------------------------- CREATE OR REPLACE VIEW test.tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' CREATE OR REPLACE VIEW tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' Testcase 3.3.1.15 -------------------------------------------------------------------------------- diff --git a/mysql-test/suite/funcs_1/r/memory_trig_0407.result b/mysql-test/suite/funcs_1/r/memory_trig_0407.result index 57b14e49f72..c21c206f118 100644 --- a/mysql-test/suite/funcs_1/r/memory_trig_0407.result +++ b/mysql-test/suite/funcs_1/r/memory_trig_0407.result @@ -245,7 +245,7 @@ Testcase 3.5.5.3: Create view vw3 as select f118 from tb3; Create trigger trg3 before INSERT on vw3 for each row set new.f118='s'; -ERROR HY000: 'test.vw3' is not BASE TABLE +ERROR HY000: 'test.vw3' is not of type 'BASE TABLE' drop view vw3; Testcase 3.5.5.4: diff --git a/mysql-test/suite/funcs_1/r/memory_views.result b/mysql-test/suite/funcs_1/r/memory_views.result index b81ea2f309b..ca24c73d933 100644 --- a/mysql-test/suite/funcs_1/r/memory_views.result +++ b/mysql-test/suite/funcs_1/r/memory_views.result @@ -3521,12 +3521,12 @@ f1 DROP TRIGGER tr1 ; SET @a:=0 ; CREATE TRIGGER tr1 BEFORE INSERT ON v1 FOR EACH ROW SET @a:=1 ; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' RENAME TABLE v1 TO v2; RENAME VIEW v2 TO v1; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'VIEW v2 TO v1' at line 1 ALTER TABLE v2 RENAME AS v1; -ERROR HY000: 'test.v2' is not BASE TABLE +ERROR HY000: 'test.v2' is not of type 'BASE TABLE' ALTER VIEW v1 RENAME AS v2; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'RENAME AS v2' at line 1 DROP TABLE IF EXISTS t1, t2 ; @@ -3536,12 +3536,12 @@ CREATE TABLE t1 ( f1 DATE, f2 BLOB, f3 DOUBLE ); CREATE VIEW v1 AS SELECT f1, f2, f3 FROM t1; ALTER TABLE t1 ADD PRIMARY KEY(f1); ALTER TABLE v1 ADD PRIMARY KEY(f1); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' ALTER VIEW v1 ADD PRIMARY KEY(f1); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD PRIMARY KEY(f1)' at line 1 CREATE INDEX t1_idx ON t1(f3); CREATE INDEX v1_idx ON v1(f3); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' DROP TABLE t1; DROP VIEW v1; @@ -4086,9 +4086,9 @@ Drop view test.v1 ; Testcase 3.3.1.14 -------------------------------------------------------------------------------- CREATE OR REPLACE VIEW test.tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' CREATE OR REPLACE VIEW tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' Testcase 3.3.1.15 -------------------------------------------------------------------------------- diff --git a/mysql-test/suite/funcs_1/r/myisam_trig_0407.result b/mysql-test/suite/funcs_1/r/myisam_trig_0407.result index 57b14e49f72..c21c206f118 100644 --- a/mysql-test/suite/funcs_1/r/myisam_trig_0407.result +++ b/mysql-test/suite/funcs_1/r/myisam_trig_0407.result @@ -245,7 +245,7 @@ Testcase 3.5.5.3: Create view vw3 as select f118 from tb3; Create trigger trg3 before INSERT on vw3 for each row set new.f118='s'; -ERROR HY000: 'test.vw3' is not BASE TABLE +ERROR HY000: 'test.vw3' is not of type 'BASE TABLE' drop view vw3; Testcase 3.5.5.4: diff --git a/mysql-test/suite/funcs_1/r/myisam_views-big.result b/mysql-test/suite/funcs_1/r/myisam_views-big.result index 9296b9d98a2..feb67539cb9 100644 --- a/mysql-test/suite/funcs_1/r/myisam_views-big.result +++ b/mysql-test/suite/funcs_1/r/myisam_views-big.result @@ -4023,12 +4023,12 @@ f1 DROP TRIGGER tr1 ; SET @a:=0 ; CREATE TRIGGER tr1 BEFORE INSERT ON v1 FOR EACH ROW SET @a:=1 ; -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' RENAME TABLE v1 TO v2; RENAME VIEW v2 TO v1; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'VIEW v2 TO v1' at line 1 ALTER TABLE v2 RENAME AS v1; -ERROR HY000: 'test.v2' is not BASE TABLE +ERROR HY000: 'test.v2' is not of type 'BASE TABLE' ALTER VIEW v1 RENAME AS v2; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'RENAME AS v2' at line 1 DROP TABLE IF EXISTS t1, t2 ; @@ -4038,12 +4038,12 @@ CREATE TABLE t1 ( f1 DATE, f2 BLOB, f3 DOUBLE ); CREATE VIEW v1 AS SELECT f1, f2, f3 FROM t1; ALTER TABLE t1 ADD PRIMARY KEY(f1); ALTER TABLE v1 ADD PRIMARY KEY(f1); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' ALTER VIEW v1 ADD PRIMARY KEY(f1); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD PRIMARY KEY(f1)' at line 1 CREATE INDEX t1_idx ON t1(f3); CREATE INDEX v1_idx ON v1(f3); -ERROR HY000: 'test.v1' is not BASE TABLE +ERROR HY000: 'test.v1' is not of type 'BASE TABLE' DROP TABLE t1; DROP VIEW v1; @@ -4604,9 +4604,9 @@ Drop view test.v1 ; Testcase 3.3.1.14 -------------------------------------------------------------------------------- CREATE OR REPLACE VIEW test.tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' CREATE OR REPLACE VIEW tb2 AS SELECT * From tb2 LIMIT 2; -ERROR HY000: 'test.tb2' is not VIEW +ERROR HY000: 'test.tb2' is not of type 'VIEW' Testcase 3.3.1.15 -------------------------------------------------------------------------------- @@ -4784,7 +4784,7 @@ CREATE VIEW v2 AS Select * from test.v1; ERROR 42S02: Table 'test.v1' doesn't exist DROP VIEW IF EXISTS v2; Warnings: -Note 4067 Unknown VIEW: 'test.v2' +Note 4068 Unknown VIEW: 'test.v2' Testcase 3.3.1.25 -------------------------------------------------------------------------------- @@ -8387,7 +8387,7 @@ Call sp1() ; ERROR 42000: PROCEDURE test.sp1 does not exist Drop view if exists test.v1 ; Warnings: -Note 4067 Unknown VIEW: 'test.v1' +Note 4068 Unknown VIEW: 'test.v1' Drop procedure sp1 ; ERROR 42000: PROCEDURE test.sp1 does not exist @@ -22989,7 +22989,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1; DROP VIEW IF EXISTS v1; DROP VIEW IF EXISTS v1; Warnings: -Note 4067 Unknown VIEW: 'test.v1' +Note 4068 Unknown VIEW: 'test.v1' Testcase 3.3.1.68 -------------------------------------------------------------------------------- diff --git a/mysql-test/suite/galera/galera_2nodes.cnf b/mysql-test/suite/galera/galera_2nodes.cnf index 34bf1fc58fe..f120775d433 100644 --- a/mysql-test/suite/galera/galera_2nodes.cnf +++ b/mysql-test/suite/galera/galera_2nodes.cnf @@ -16,7 +16,7 @@ wsrep-sync-wait=7 #ist_port=@OPT.port #sst_port=@OPT.port wsrep-cluster-address=gcomm:// -wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;gcache.size=10M' wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' diff --git a/mysql-test/suite/galera/r/MW-309.result b/mysql-test/suite/galera/r/MW-309.result new file mode 100644 index 00000000000..3dd49a041ee --- /dev/null +++ b/mysql-test/suite/galera/r/MW-309.result @@ -0,0 +1,22 @@ +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +SET GLOBAL wsrep_max_ws_rows = 2; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +SELECT * FROM t1 GROUP BY f1; +f1 +1 +SELECT * FROM t1 GROUP BY f1; +f1 +1 +SELECT * FROM t1 GROUP BY f1; +f1 +1 +SHOW STATUS LIKE '%wsrep%'; +SET GLOBAL wsrep_max_ws_rows = 0; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MW-309.test b/mysql-test/suite/galera/t/MW-309.test new file mode 100644 index 00000000000..351a508ecec --- /dev/null +++ b/mysql-test/suite/galera/t/MW-309.test @@ -0,0 +1,32 @@ +# +# MW-309 Regression: wsrep_max_ws_rows limit also applies to certain SELECT queries +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +SET GLOBAL wsrep_max_ws_rows = 2; + +SET AUTOCOMMIT=OFF; +START TRANSACTION; +SELECT * FROM t1 GROUP BY f1; +SELECT * FROM t1 GROUP BY f1; + +--error 0 +SELECT * FROM t1 GROUP BY f1; + +--disable_result_log +--error 0 +SHOW STATUS LIKE '%wsrep%'; +--enable_result_log + +SET GLOBAL wsrep_max_ws_rows = 0; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf index 7d684cef67d..b4bf5f02171 100644 --- a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf +++ b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.cnf @@ -1,9 +1,8 @@ !include ../galera_2nodes.cnf [mysqld] -#wsrep_sst_method=xtrabackup-v2 -#wsrep_sst_auth="root:" -#wsrep_debug=ON +wsrep_sst_method=xtrabackup-v2 +wsrep_sst_auth="root:" [mysqld.1] wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true' diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test index 251450f7099..30ce9bc4ceb 100644 --- a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test +++ b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test @@ -1,6 +1,6 @@ # # Test that autoincrement works correctly while the cluster membership -# is changing and IST takes place. +# is changing and SST takes place. # --source include/big_test.inc diff --git a/mysql-test/suite/galera/t/galera_var_cluster_address.test b/mysql-test/suite/galera/t/galera_var_cluster_address.test index 0f857eb1aac..03706bbbb12 100644 --- a/mysql-test/suite/galera/t/galera_var_cluster_address.test +++ b/mysql-test/suite/galera/t/galera_var_cluster_address.test @@ -76,4 +76,3 @@ CALL mtr.add_suppression("WSREP: wsrep::connect\\(gcomm://192.0.2.1\\) failed: 7 --source include/galera_end.inc --echo # End of test - diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def index ca55c41ff72..502e7bfba68 100644 --- a/mysql-test/suite/galera_3nodes/disabled.def +++ b/mysql-test/suite/galera_3nodes/disabled.def @@ -5,3 +5,4 @@ galera_slave_options_do :MDEV-8798 galera_slave_options_ignore : MDEV-8798 galera_pc_bootstrap : TODO: Investigate: Timeout in wait_condition.inc galera_pc_weight : Test times out +galera_safe_to_bootstrap : I Really dont know :( diff --git a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf index 1ed273fdcb5..305bdaaae3a 100644 --- a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf +++ b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf @@ -14,11 +14,10 @@ wsrep-causal-reads=ON wsrep-sync-wait=7 [mysqld.1] -#galera_port=@OPT.port -#ist_port=@OPT.port -#sst_port=@OPT.port -wsrep-cluster-address=gcomm:// -wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' +wsrep-cluster-address='gcomm://' +wsrep_provider_options='base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S' + +wsrep_sst_receive_address=127.0.0.2:@mysqld.1.#sst_port wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' diff --git a/mysql-test/suite/innodb/include/innodb-page-compression.inc b/mysql-test/suite/innodb/include/innodb-page-compression.inc new file mode 100644 index 00000000000..0e7ae8cf029 --- /dev/null +++ b/mysql-test/suite/innodb/include/innodb-page-compression.inc @@ -0,0 +1,126 @@ +create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb; +create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1; +create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2; +create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3; +create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4; +create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5; +create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6; +create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7; +create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8; +create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9; + +--disable_query_log +begin; +let $i = 2000; +while ($i) +{ + insert into innodb_normal(b) values(REPEAT('Aa',50)); + insert into innodb_normal(b) values(REPEAT('a',100)); + insert into innodb_normal(b) values(REPEAT('b',100)); + insert into innodb_normal(b) values(REPEAT('0',100)); + insert into innodb_normal(b) values(REPEAT('1',100)); + dec $i; +} + +insert into innodb_page_compressed1 select * from innodb_normal; +insert into innodb_page_compressed2 select * from innodb_normal; +insert into innodb_page_compressed3 select * from innodb_normal; +insert into innodb_page_compressed4 select * from innodb_normal; +insert into innodb_page_compressed5 select * from innodb_normal; +insert into innodb_page_compressed6 select * from innodb_normal; +insert into innodb_page_compressed7 select * from innodb_normal; +insert into innodb_page_compressed8 select * from innodb_normal; +insert into innodb_page_compressed9 select * from innodb_normal; +commit; +--enable_query_log + +select count(*) from innodb_page_compressed1; +select count(*) from innodb_page_compressed3; +select count(*) from innodb_page_compressed4; +select count(*) from innodb_page_compressed5; +select count(*) from innodb_page_compressed6; +select count(*) from innodb_page_compressed6; +select count(*) from innodb_page_compressed7; +select count(*) from innodb_page_compressed8; +select count(*) from innodb_page_compressed9; + +# +# Wait until pages are really compressed +# +let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_COMPRESSED'; +--source include/wait_condition.inc + +--let $MYSQLD_DATADIR=`select @@datadir` + +# shutdown before grep + +--source include/shutdown_mysqld.inc + +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_normal.ibd +--let SEARCH_RANGE = 10000000 +--let SEARCH_PATTERN=AaAaAaAa +--echo # innodb_normal expected FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed1.ibd +--echo # innodb_page_compressed1 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed2.ibd +--echo # innodb_page_compressed2 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed3.ibd +--echo # innodb_page_compressed3 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed4.ibd +--echo # innodb_page_compressed4 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed5.ibd +--echo # innodb_page_compressed5 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed6.ibd +--echo # innodb_page_compressed6 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed7.ibd +--echo # innodb_page_compressed7 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed8.ibd +--echo # innodb_page_compressed8 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_page_compressed9.ibd +--echo # innodb_page_compressed9 page compressed expected NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc + +-- source include/start_mysqld.inc + +select count(*) from innodb_page_compressed1; +select count(*) from innodb_page_compressed3; +select count(*) from innodb_page_compressed4; +select count(*) from innodb_page_compressed5; +select count(*) from innodb_page_compressed6; +select count(*) from innodb_page_compressed6; +select count(*) from innodb_page_compressed7; +select count(*) from innodb_page_compressed8; +select count(*) from innodb_page_compressed9; + +let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_DECOMPRESSED'; +--source include/wait_condition.inc + +drop table innodb_normal; +drop table innodb_page_compressed1; +drop table innodb_page_compressed2; +drop table innodb_page_compressed3; +drop table innodb_page_compressed4; +drop table innodb_page_compressed5; +drop table innodb_page_compressed6; +drop table innodb_page_compressed7; +drop table innodb_page_compressed8; +drop table innodb_page_compressed9; diff --git a/mysql-test/suite/innodb/include/wait_all_purged.inc b/mysql-test/suite/innodb/include/wait_all_purged.inc new file mode 100644 index 00000000000..7dbb59a5d32 --- /dev/null +++ b/mysql-test/suite/innodb/include/wait_all_purged.inc @@ -0,0 +1,19 @@ +# Wait for everything to be purged. +# The user should have set innodb_purge_rseg_truncate_frequency=1. + +let $wait_counter= 300; +while ($wait_counter) +{ + --replace_regex /.*History list length ([0-9]+).*/\1/ + let $remaining= `SHOW ENGINE INNODB STATUS`; + if ($remaining == 'InnoDB 0') + { + let $wait_counter= 0; + } + if ($wait_counter) + { + real_sleep 0.1; + dec $wait_counter; + } +} +echo $remaining transactions not purged; diff --git a/mysql-test/suite/innodb/r/doublewrite.result b/mysql-test/suite/innodb/r/doublewrite.result index 6b913f49972..61c81ee9dff 100644 --- a/mysql-test/suite/innodb/r/doublewrite.result +++ b/mysql-test/suite/innodb/r/doublewrite.result @@ -231,6 +231,7 @@ set global innodb_buf_flush_list_now = 1; check table t1; Table Op Msg_type Msg_text test.t1 check status OK +FOUND 1 /\[ERROR\] InnoDB: .*test.t1\.ibd.*/ in mysqld.1.err select f1, f2 from t1; f1 f2 1 ############ @@ -238,6 +239,13 @@ f1 f2 3 //////////// 4 ------------ 5 ............ -# Test End -# --------------------------------------------------------------- drop table t1; +# +# MDEV-12600 crash during install_db with innodb_page_size=32K +# and ibdata1=3M +# +SELECT * FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /\[ERROR\] InnoDB: Cannot create doublewrite buffer/ in mysqld.1.err diff --git a/mysql-test/suite/innodb/r/drop_table_background.result b/mysql-test/suite/innodb/r/drop_table_background.result new file mode 100644 index 00000000000..a6f5672ba7f --- /dev/null +++ b/mysql-test/suite/innodb/r/drop_table_background.result @@ -0,0 +1,9 @@ +CREATE TABLE t(c0 SERIAL, c1 INT, c2 INT, c3 INT, c4 INT, +KEY(c1), KEY(c2), KEY(c2,c1), +KEY(c3), KEY(c3,c1), KEY(c3,c2), KEY(c3,c2,c1), +KEY(c4), KEY(c4,c1), KEY(c4,c2), KEY(c4,c2,c1), +KEY(c4,c3), KEY(c4,c3,c1), KEY(c4,c3,c2), KEY(c4,c3,c2,c1)) ENGINE=InnoDB; +SET DEBUG_DBUG='+d,row_drop_table_add_to_background'; +DROP TABLE t; +CREATE TABLE t (a INT) ENGINE=InnoDB; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_default.result b/mysql-test/suite/innodb/r/innodb-page_compression_default.result index 9c1d8924230..39a14072571 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_default.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_default.result @@ -1,19 +1,96 @@ -SET GLOBAL innodb_file_per_table = ON; -create table t1 (c1 int not null primary key auto_increment, b char(200)) engine=innodb page_compressed=1; -insert into t1 values(NULL,'compressed_text_aaaaaaaaabbbbbbbbbbbbbccccccccccccc'); -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -# t1 compressed expected NOT FOUND -NOT FOUND /compressed_text/ in t1.ibd -drop table t1; +call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+."); +create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb; +create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1; +create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2; +create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3; +create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4; +create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5; +create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6; +create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7; +create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8; +create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9; +select count(*) from innodb_page_compressed1; +count(*) +10000 +select count(*) from innodb_page_compressed3; +count(*) +10000 +select count(*) from innodb_page_compressed4; +count(*) +10000 +select count(*) from innodb_page_compressed5; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed7; +count(*) +10000 +select count(*) from innodb_page_compressed8; +count(*) +10000 +select count(*) from innodb_page_compressed9; +count(*) +10000 +# innodb_normal expected FOUND +FOUND 24084 /AaAaAaAa/ in innodb_normal.ibd +# innodb_page_compressed1 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed1.ibd +# innodb_page_compressed2 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed2.ibd +# innodb_page_compressed3 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed3.ibd +# innodb_page_compressed4 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed4.ibd +# innodb_page_compressed5 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed5.ibd +# innodb_page_compressed6 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed6.ibd +# innodb_page_compressed7 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd +# innodb_page_compressed8 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd +# innodb_page_compressed9 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd +select count(*) from innodb_page_compressed1; +count(*) +10000 +select count(*) from innodb_page_compressed3; +count(*) +10000 +select count(*) from innodb_page_compressed4; +count(*) +10000 +select count(*) from innodb_page_compressed5; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed7; +count(*) +10000 +select count(*) from innodb_page_compressed8; +count(*) +10000 +select count(*) from innodb_page_compressed9; +count(*) +10000 +drop table innodb_normal; +drop table innodb_page_compressed1; +drop table innodb_page_compressed2; +drop table innodb_page_compressed3; +drop table innodb_page_compressed4; +drop table innodb_page_compressed5; +drop table innodb_page_compressed6; +drop table innodb_page_compressed7; +drop table innodb_page_compressed8; +drop table innodb_page_compressed9; +#done diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result index 442885755fd..e99e55ed9a8 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result @@ -1,430 +1,90 @@ -call mtr.add_suppression("InnoDB: Compression failed for space.*"); -set global innodb_compression_algorithm = 6; -create table innodb_compressed(c1 int, b char(200)) engine=innodb row_format=compressed key_block_size=8; -show warnings; -Level Code Message -create table innodb_normal (c1 int, b char(200)) engine=innodb; -show warnings; -Level Code Message -create table innodb_page_compressed1 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=1; -show warnings; -Level Code Message -show create table innodb_page_compressed1; -Table Create Table -innodb_page_compressed1 CREATE TABLE `innodb_page_compressed1` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=1 -create table innodb_page_compressed2 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=2; -show warnings; -Level Code Message -show create table innodb_page_compressed2; -Table Create Table -innodb_page_compressed2 CREATE TABLE `innodb_page_compressed2` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=2 -create table innodb_page_compressed3 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=3; -show warnings; -Level Code Message -show create table innodb_page_compressed3; -Table Create Table -innodb_page_compressed3 CREATE TABLE `innodb_page_compressed3` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=3 -create table innodb_page_compressed4 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=4; -show warnings; -Level Code Message -show create table innodb_page_compressed4; -Table Create Table -innodb_page_compressed4 CREATE TABLE `innodb_page_compressed4` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=4 -create table innodb_page_compressed5 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=5; -show warnings; -Level Code Message -show create table innodb_page_compressed5; -Table Create Table -innodb_page_compressed5 CREATE TABLE `innodb_page_compressed5` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=5 -create table innodb_page_compressed6 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=6; -show warnings; -Level Code Message -show create table innodb_page_compressed6; -Table Create Table -innodb_page_compressed6 CREATE TABLE `innodb_page_compressed6` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=6 -create table innodb_page_compressed7 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=7; -show warnings; -Level Code Message -show create table innodb_page_compressed7; -Table Create Table -innodb_page_compressed7 CREATE TABLE `innodb_page_compressed7` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=7 -create table innodb_page_compressed8 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=8; -show warnings; -Level Code Message -show create table innodb_page_compressed8; -Table Create Table -innodb_page_compressed8 CREATE TABLE `innodb_page_compressed8` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8 -create table innodb_page_compressed9 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=9; -show warnings; -Level Code Message -show create table innodb_page_compressed9; -Table Create Table -innodb_page_compressed9 CREATE TABLE `innodb_page_compressed9` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=9 -create procedure innodb_insert_proc (repeat_count int) -begin -declare current_num int; -set current_num = 0; -while current_num < repeat_count do -insert into innodb_normal values(current_num,'aaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccc'); -set current_num = current_num + 1; -end while; -end// -commit; -set autocommit=0; -call innodb_insert_proc(5000); -commit; -set autocommit=1; -select count(*) from innodb_normal; -count(*) -5000 -insert into innodb_compressed select * from innodb_normal; -insert into innodb_page_compressed1 select * from innodb_normal; -insert into innodb_page_compressed2 select * from innodb_normal; -insert into innodb_page_compressed3 select * from innodb_normal; -insert into innodb_page_compressed4 select * from innodb_normal; -insert into innodb_page_compressed5 select * from innodb_normal; -insert into innodb_page_compressed6 select * from innodb_normal; -insert into innodb_page_compressed7 select * from innodb_normal; -insert into innodb_page_compressed8 select * from innodb_normal; -insert into innodb_page_compressed9 select * from innodb_normal; -commit; -select count(*) from innodb_compressed; -count(*) -5000 +call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+."); +set global innodb_compression_algorithm = snappy; +create table innodb_normal (c1 int not null auto_increment primary key, b char(200)) engine=innodb; +create table innodb_page_compressed1 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=1; +create table innodb_page_compressed2 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=2; +create table innodb_page_compressed3 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=3; +create table innodb_page_compressed4 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=4; +create table innodb_page_compressed5 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=5; +create table innodb_page_compressed6 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=6; +create table innodb_page_compressed7 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=7; +create table innodb_page_compressed8 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=8; +create table innodb_page_compressed9 (c1 int not null auto_increment primary key, b char(200)) engine=innodb page_compressed=1 page_compression_level=9; select count(*) from innodb_page_compressed1; count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -alter table innodb_normal page_compressed=1 page_compression_level=8; -show warnings; -Level Code Message -show create table innodb_normal; -Table Create Table -innodb_normal CREATE TABLE `innodb_normal` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8 -alter table innodb_compressed row_format=default page_compressed=1 page_compression_level=8 key_block_size=0; -show warnings; -Level Code Message -show create table innodb_compressed; -Table Create Table -innodb_compressed CREATE TABLE `innodb_compressed` ( - `c1` int(11) DEFAULT NULL, - `b` char(200) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 `page_compressed`=1 `page_compression_level`=8 -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -count(*) -5000 +10000 +select count(*) from innodb_page_compressed3; +count(*) +10000 +select count(*) from innodb_page_compressed4; +count(*) +10000 +select count(*) from innodb_page_compressed5; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed6; +count(*) +10000 +select count(*) from innodb_page_compressed7; +count(*) +10000 +select count(*) from innodb_page_compressed8; +count(*) +10000 +select count(*) from innodb_page_compressed9; +count(*) +10000 +# innodb_normal expected FOUND +FOUND 24084 /AaAaAaAa/ in innodb_normal.ibd +# innodb_page_compressed1 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed1.ibd +# innodb_page_compressed2 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed2.ibd +# innodb_page_compressed3 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed3.ibd +# innodb_page_compressed4 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed4.ibd +# innodb_page_compressed5 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed5.ibd +# innodb_page_compressed6 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed6.ibd +# innodb_page_compressed7 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd +# innodb_page_compressed8 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd +# innodb_page_compressed9 page compressed expected NOT FOUND +NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd select count(*) from innodb_page_compressed1; count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -count(*) -5000 -select count(*) from innodb_page_compressed1; -count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -set global innodb_compression_algorithm = 1; -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -commit; -select count(*) from innodb_compressed; -count(*) -5000 -select count(*) from innodb_page_compressed1; -count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -count(*) -5000 -select count(*) from innodb_page_compressed1; -count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -set global innodb_compression_algorithm = 0; -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -commit; -select count(*) from innodb_compressed; -count(*) -5000 -select count(*) from innodb_page_compressed1; -count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; -count(*) -5000 -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -count(*) -5000 -select count(*) from innodb_page_compressed1; -count(*) -5000 -select count(*) from innodb_page_compressed1 where c1 < 500000; -count(*) -5000 -select count(*) from innodb_page_compressed2 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed3; count(*) -5000 -select count(*) from innodb_page_compressed3 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed4; count(*) -5000 -select count(*) from innodb_page_compressed4 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed5; count(*) -5000 -select count(*) from innodb_page_compressed5 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed6; count(*) -5000 -select count(*) from innodb_page_compressed6 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed6; count(*) -5000 -select count(*) from innodb_page_compressed7 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed7; count(*) -5000 -select count(*) from innodb_page_compressed8 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed8; count(*) -5000 -select count(*) from innodb_page_compressed9 where c1 < 500000; +10000 +select count(*) from innodb_page_compressed9; count(*) -5000 -drop procedure innodb_insert_proc; +10000 drop table innodb_normal; -drop table innodb_compressed; drop table innodb_page_compressed1; drop table innodb_page_compressed2; drop table innodb_page_compressed3; @@ -434,3 +94,4 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +#done diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result index ad8b3cac4ed..edac0d0ab69 100644 --- a/mysql-test/suite/innodb/r/innodb.result +++ b/mysql-test/suite/innodb/r/innodb.result @@ -1917,6 +1917,9 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref v v 13 const # Using where; Using index alter table t1 add unique(v); ERROR 23000: Duplicate entry '{ ' for key 'v_2' +show warnings; +Level Code Message +Error 1062 Duplicate entry 'v' for key 'v_2' alter table t1 add key(v); Warnings: Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release diff --git a/mysql-test/suite/innodb/r/innodb_stats_del_mark.result b/mysql-test/suite/innodb/r/innodb_stats_del_mark.result deleted file mode 100644 index 67538bf8eb3..00000000000 --- a/mysql-test/suite/innodb/r/innodb_stats_del_mark.result +++ /dev/null @@ -1,91 +0,0 @@ -# -# Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE -# TRANSACTION IS COMMITTED -# -"Test 1:- Uncommited delete test" -CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, -val INT UNSIGNED NOT NULL, -INDEX (val)) ENGINE=INNODB -STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; -INSERT INTO t1 (val) VALUES (CEIL(RAND()*20)); -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -SELECT COUNT(*) FROM t1; -COUNT(*) -262144 -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -connect con1, localhost, root,,; -START TRANSACTION; -DELETE FROM t1; -SELECT COUNT(*) FROM t1; -connection default; -Test correctly estimates the number of rows as > 20000 -even when in other uncommmited transaction -all rows have been deleted. -connection con1; -COUNT(*) -0 -commit; -connection default; -Test 2:- Insert and rollback test -CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, -val INT UNSIGNED NOT NULL, -INDEX (val)) ENGINE=INNODB -STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; -connection con1; -START TRANSACTION; -INSERT INTO t2 (val) VALUES (CEIL(RAND()*20)); -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -SELECT COUNT(*) FROM t2; -connection default; -select count(*) from t2; -count(*) -0 -Test correctly estimates the number of rows as > 20000 -even when in other uncommited transaction -many rows are inserted. -connection con1; -COUNT(*) -262144 -Rollback the insert -rollback; -disconnect con1; -connection default; -Test correctly estimates the number of rows as 1 -after rollback. -DROP TABLE t1,t2; diff --git a/mysql-test/suite/innodb/r/innodb_stats_persistent.result b/mysql-test/suite/innodb/r/innodb_stats_persistent.result new file mode 100644 index 00000000000..f4de4b6b82e --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_stats_persistent.result @@ -0,0 +1,116 @@ +SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked; +SET GLOBAL innodb_stats_include_delete_marked = ON; +SET @saved_traditional = @@GLOBAL.innodb_stats_traditional; +SET GLOBAL innodb_stats_traditional=false; +SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter; +SET GLOBAL innodb_stats_modified_counter=1; +CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val)) +ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; +CREATE TABLE t1 LIKE t0; +CREATE TABLE t2 LIKE t0; +INSERT INTO t0 (val) VALUES (4); +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t1 SELECT * FROM t0; +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +connect con1, localhost, root,,; +START TRANSACTION; +DELETE FROM t1; +SELECT COUNT(*) FROM t1; +connection default; +# With innodb_stats_include_delete_marked=ON, +# DELETE must not affect statistics before COMMIT. +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 16 Using index +connection con1; +COUNT(*) +0 +ROLLBACK; +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 16 Using index +BEGIN; +DELETE FROM t1; +COMMIT; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +connection default; +BEGIN; +INSERT INTO t2 SELECT * FROM t0; +# The INSERT will show up before COMMIT. +EXPLAIN SELECT * FROM t2 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref val val 4 const 16 Using index +SELECT COUNT(*) FROM t2; +COUNT(*) +16 +# The ROLLBACK of the INSERT must affect the statistics. +ROLLBACK; +SELECT COUNT(*) FROM t2; +COUNT(*) +0 +connection con1; +EXPLAIN SELECT * FROM t2 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref val val 4 const 1 Using index +SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; +SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; +InnoDB 0 transactions not purged +SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; +# After COMMIT and purge, the DELETE must show up. +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 1 Using index +SET GLOBAL innodb_stats_include_delete_marked = OFF; +BEGIN; +INSERT INTO t1 SELECT * FROM t0; +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 16 Using index +ROLLBACK; +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 1 Using index +BEGIN; +INSERT INTO t1 SELECT * FROM t0; +COMMIT; +EXPLAIN SELECT * FROM t1 WHERE val=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref val val 4 const 16 Using index +BEGIN; +DELETE FROM t1; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +# With innodb_stats_include_delete_marked=OFF, +# DELETE must affect statistics even before COMMIT. +# However, if there was a WHERE condition, +# ha_innobase::records_in_range() would count the delete-marked records. +EXPLAIN SELECT * FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL val 4 NULL 1 Using index +ROLLBACK; +EXPLAIN SELECT * FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL val 4 NULL 16 Using index +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +disconnect con1; +connection default; +DROP TABLE t0,t1,t2; +SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked; +SET GLOBAL innodb_stats_traditional = @saved_traditional; +SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter; diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result new file mode 100644 index 00000000000..2bc8769092f --- /dev/null +++ b/mysql-test/suite/innodb/r/row_format_redundant.result @@ -0,0 +1,79 @@ +SET GLOBAL innodb_file_per_table=1; +# +# Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE == +# MTR_LOG_NO_REDO +# +SET GLOBAL innodb_file_per_table=ON; +create table t1 (a int not null, d varchar(15) not null, b +varchar(198) not null, c char(156), +fulltext ftsic(c)) engine=InnoDB +row_format=redundant; +insert into t1 values(123, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(456, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(789, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf'); +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +SET GLOBAL innodb_file_per_table=OFF; +create table t2 (a int not null, d varchar(15) not null, b +varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB +row_format=redundant; +insert into t2 select * from t1; +create table t3 (a int not null, d varchar(15) not null, b varchar(198), +c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB +row_format=redundant; +insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa'); +insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa'); +SET GLOBAL innodb_fast_shutdown=0; +SELECT COUNT(*) FROM t1; +COUNT(*) +4096 +SELECT COUNT(*) FROM t2; +COUNT(*) +4096 +SELECT COUNT(*) FROM t3; +COUNT(*) +2 +TRUNCATE TABLE t1; +ERROR HY000: Table 't1' is read only +TRUNCATE TABLE t2; +ERROR HY000: Table 't2' is read only +TRUNCATE TABLE t3; +ERROR HY000: Table 't3' is read only +TRUNCATE TABLE t1; +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; +corrupted SYS_TABLES.MIX_LEN for test/t1 +corrupted SYS_TABLES.MIX_LEN for test/t2 +corrupted SYS_TABLES.MIX_LEN for test/t3 +TRUNCATE TABLE t1; +ERROR 42S02: Table 'test.t1' doesn't exist in engine +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; +SELECT COUNT(*) FROM t1; +ERROR 42S02: Table 'test.t1' doesn't exist in engine +SELECT COUNT(*) FROM t2; +COUNT(*) +0 +SELECT COUNT(*) FROM t3; +COUNT(*) +0 +RENAME TABLE t1 TO tee_one; +ERROR HY000: Error on rename of './test/t1' to './test/tee_one' (errno: 155 "The table does not exist in engine") +DROP TABLE t1; +Warnings: +Warning 1932 Table 'test.t1' doesn't exist in engine +DROP TABLE t2,t3; +FOUND 49 /\[ERROR\] InnoDB: Table `test`\.`t1` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=255\b/ in mysqld.1.err +ib_buffer_pool +ib_logfile0 +ib_logfile1 +ibdata1 diff --git a/mysql-test/suite/innodb/r/table_flags,32k.rdiff b/mysql-test/suite/innodb/r/table_flags,32k.rdiff new file mode 100644 index 00000000000..ea2d7048bb7 --- /dev/null +++ b/mysql-test/suite/innodb/r/table_flags,32k.rdiff @@ -0,0 +1,105 @@ +--- suite/innodb/r/table_flags.result ++++ suite/innodb/r/table_flags,32k.reject +@@ -5,6 +5,8 @@ + SET innodb_strict_mode=OFF; + CREATE TABLE tz(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPRESSED + KEY_BLOCK_SIZE=1; ++Warnings: ++Warning 1478 InnoDB: Cannot create a COMPRESSED table when innodb_page_size > 16k. Assuming ROW_FORMAT=DYNAMIC. + SET innodb_strict_mode=ON; + CREATE TABLE tp(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC + PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; +@@ -13,7 +15,7 @@ + header=0x01000003016e (NAME=0x696e66696d756d00) + header=0x00002815008d (NAME='SYS_DATAFILES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320194, ++ DB_ROLL_PTR=0x81000001310194, + ID=0x000000000000000e, + N_COLS=0x00000002, + TYPE=0x00000001, +@@ -23,7 +25,7 @@ + SPACE=0x00000000) + header=0x0000101500d5 (NAME='SYS_FOREIGN', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0110, ++ DB_ROLL_PTR=0x800000012c0110, + ID=0x000000000000000b, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -33,7 +35,7 @@ + SPACE=0x00000000) + header=0x000018150122 (NAME='SYS_FOREIGN_COLS', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0201, ++ DB_ROLL_PTR=0x800000012c0201, + ID=0x000000000000000c, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -43,7 +45,7 @@ + SPACE=0x00000000) + header=0x0400201501b8 (NAME='SYS_TABLESPACES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320110, ++ DB_ROLL_PTR=0x81000001310110, + ID=0x000000000000000d, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -53,7 +55,7 @@ + SPACE=0x00000000) + header=0x000030150244 (NAME='SYS_VIRTUAL', + DB_TRX_ID=0x000000000304, +- DB_ROLL_PTR=0x82000001350110, ++ DB_ROLL_PTR=0x82000001340110, + ID=0x000000000000000f, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -63,7 +65,7 @@ + SPACE=0x00000000) + header=0x000040150288 (NAME='test/tc', + DB_TRX_ID=0x000000000308, +- DB_ROLL_PTR=0x84000001380110, ++ DB_ROLL_PTR=0x84000001370110, + ID=0x0000000000000011, + N_COLS=0x80000001, + TYPE=0x00000001, +@@ -73,7 +75,7 @@ + SPACE=0x00000002) + header=0x000048150310 (NAME='test/td', + DB_TRX_ID=0x00000000030a, +- DB_ROLL_PTR=0x85000001390110, ++ DB_ROLL_PTR=0x85000001380110, + ID=0x0000000000000012, + N_COLS=0x80000001, + TYPE=0x00000021, +@@ -83,7 +85,7 @@ + SPACE=0x00000003) + header=0x000058150200 (NAME='test/tp', + DB_TRX_ID=0x00000000030e, +- DB_ROLL_PTR=0x870000013b0110, ++ DB_ROLL_PTR=0x870000013a0110, + ID=0x0000000000000014, + N_COLS=0x80000001, + TYPE=0x000009a1, +@@ -93,7 +95,7 @@ + SPACE=0x00000005) + header=0x0000381502cc (NAME='test/tr', + DB_TRX_ID=0x000000000306, +- DB_ROLL_PTR=0x83000001370110, ++ DB_ROLL_PTR=0x83000001360110, + ID=0x0000000000000010, + N_COLS=0x00000001, + TYPE=0x00000001, +@@ -103,10 +105,10 @@ + SPACE=0x00000001) + header=0x000050150074 (NAME='test/tz', + DB_TRX_ID=0x00000000030c, +- DB_ROLL_PTR=0x860000013a0110, ++ DB_ROLL_PTR=0x86000001390110, + ID=0x0000000000000013, + N_COLS=0x80000001, +- TYPE=0x00000023, ++ TYPE=0x00000021, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), diff --git a/mysql-test/suite/innodb/r/table_flags,4k.rdiff b/mysql-test/suite/innodb/r/table_flags,4k.rdiff new file mode 100644 index 00000000000..0e469ee631b --- /dev/null +++ b/mysql-test/suite/innodb/r/table_flags,4k.rdiff @@ -0,0 +1,92 @@ +--- suite/innodb/r/table_flags.result ++++ suite/innodb/r/table_flags,4k.reject +@@ -13,7 +13,7 @@ + header=0x01000003016e (NAME=0x696e66696d756d00) + header=0x00002815008d (NAME='SYS_DATAFILES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320194, ++ DB_ROLL_PTR=0x81000003260194, + ID=0x000000000000000e, + N_COLS=0x00000002, + TYPE=0x00000001, +@@ -23,7 +23,7 @@ + SPACE=0x00000000) + header=0x0000101500d5 (NAME='SYS_FOREIGN', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0110, ++ DB_ROLL_PTR=0x80000003200110, + ID=0x000000000000000b, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -33,7 +33,7 @@ + SPACE=0x00000000) + header=0x000018150122 (NAME='SYS_FOREIGN_COLS', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0201, ++ DB_ROLL_PTR=0x80000003200201, + ID=0x000000000000000c, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -43,7 +43,7 @@ + SPACE=0x00000000) + header=0x0400201501b8 (NAME='SYS_TABLESPACES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320110, ++ DB_ROLL_PTR=0x81000003260110, + ID=0x000000000000000d, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -53,7 +53,7 @@ + SPACE=0x00000000) + header=0x000030150244 (NAME='SYS_VIRTUAL', + DB_TRX_ID=0x000000000304, +- DB_ROLL_PTR=0x82000001350110, ++ DB_ROLL_PTR=0x820000032a0110, + ID=0x000000000000000f, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -63,7 +63,7 @@ + SPACE=0x00000000) + header=0x000040150288 (NAME='test/tc', + DB_TRX_ID=0x000000000308, +- DB_ROLL_PTR=0x84000001380110, ++ DB_ROLL_PTR=0x840000032d0110, + ID=0x0000000000000011, + N_COLS=0x80000001, + TYPE=0x00000001, +@@ -73,7 +73,7 @@ + SPACE=0x00000002) + header=0x000048150310 (NAME='test/td', + DB_TRX_ID=0x00000000030a, +- DB_ROLL_PTR=0x85000001390110, ++ DB_ROLL_PTR=0x850000032f0110, + ID=0x0000000000000012, + N_COLS=0x80000001, + TYPE=0x00000021, +@@ -83,7 +83,7 @@ + SPACE=0x00000003) + header=0x000058150200 (NAME='test/tp', + DB_TRX_ID=0x00000000030e, +- DB_ROLL_PTR=0x870000013b0110, ++ DB_ROLL_PTR=0x87000003310110, + ID=0x0000000000000014, + N_COLS=0x80000001, + TYPE=0x000009a1, +@@ -93,7 +93,7 @@ + SPACE=0x00000005) + header=0x0000381502cc (NAME='test/tr', + DB_TRX_ID=0x000000000306, +- DB_ROLL_PTR=0x83000001370110, ++ DB_ROLL_PTR=0x830000032c0110, + ID=0x0000000000000010, + N_COLS=0x00000001, + TYPE=0x00000001, +@@ -103,7 +103,7 @@ + SPACE=0x00000001) + header=0x000050150074 (NAME='test/tz', + DB_TRX_ID=0x00000000030c, +- DB_ROLL_PTR=0x860000013a0110, ++ DB_ROLL_PTR=0x86000003300110, + ID=0x0000000000000013, + N_COLS=0x80000001, + TYPE=0x00000023, diff --git a/mysql-test/suite/innodb/r/table_flags,64k.rdiff b/mysql-test/suite/innodb/r/table_flags,64k.rdiff new file mode 100644 index 00000000000..ce2d7faa925 --- /dev/null +++ b/mysql-test/suite/innodb/r/table_flags,64k.rdiff @@ -0,0 +1,105 @@ +--- suite/innodb/r/table_flags.result ++++ suite/innodb/r/table_flags,64k.reject +@@ -5,6 +5,8 @@ + SET innodb_strict_mode=OFF; + CREATE TABLE tz(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPRESSED + KEY_BLOCK_SIZE=1; ++Warnings: ++Warning 1478 InnoDB: Cannot create a COMPRESSED table when innodb_page_size > 16k. Assuming ROW_FORMAT=DYNAMIC. + SET innodb_strict_mode=ON; + CREATE TABLE tp(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC + PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; +@@ -13,7 +15,7 @@ + header=0x01000003016e (NAME=0x696e66696d756d00) + header=0x00002815008d (NAME='SYS_DATAFILES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320194, ++ DB_ROLL_PTR=0x81000001310194, + ID=0x000000000000000e, + N_COLS=0x00000002, + TYPE=0x00000001, +@@ -23,7 +25,7 @@ + SPACE=0x00000000) + header=0x0000101500d5 (NAME='SYS_FOREIGN', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0110, ++ DB_ROLL_PTR=0x800000012c0110, + ID=0x000000000000000b, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -33,7 +35,7 @@ + SPACE=0x00000000) + header=0x000018150122 (NAME='SYS_FOREIGN_COLS', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0201, ++ DB_ROLL_PTR=0x800000012c0201, + ID=0x000000000000000c, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -43,7 +45,7 @@ + SPACE=0x00000000) + header=0x0400201501b8 (NAME='SYS_TABLESPACES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320110, ++ DB_ROLL_PTR=0x81000001310110, + ID=0x000000000000000d, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -53,7 +55,7 @@ + SPACE=0x00000000) + header=0x000030150244 (NAME='SYS_VIRTUAL', + DB_TRX_ID=0x000000000304, +- DB_ROLL_PTR=0x82000001350110, ++ DB_ROLL_PTR=0x82000001340110, + ID=0x000000000000000f, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -63,7 +65,7 @@ + SPACE=0x00000000) + header=0x000040150288 (NAME='test/tc', + DB_TRX_ID=0x000000000308, +- DB_ROLL_PTR=0x84000001380110, ++ DB_ROLL_PTR=0x84000001370110, + ID=0x0000000000000011, + N_COLS=0x80000001, + TYPE=0x00000001, +@@ -73,7 +75,7 @@ + SPACE=0x00000002) + header=0x000048150310 (NAME='test/td', + DB_TRX_ID=0x00000000030a, +- DB_ROLL_PTR=0x85000001390110, ++ DB_ROLL_PTR=0x85000001380110, + ID=0x0000000000000012, + N_COLS=0x80000001, + TYPE=0x00000021, +@@ -83,7 +85,7 @@ + SPACE=0x00000003) + header=0x000058150200 (NAME='test/tp', + DB_TRX_ID=0x00000000030e, +- DB_ROLL_PTR=0x870000013b0110, ++ DB_ROLL_PTR=0x870000013a0110, + ID=0x0000000000000014, + N_COLS=0x80000001, + TYPE=0x000009a1, +@@ -93,7 +95,7 @@ + SPACE=0x00000005) + header=0x0000381502cc (NAME='test/tr', + DB_TRX_ID=0x000000000306, +- DB_ROLL_PTR=0x83000001370110, ++ DB_ROLL_PTR=0x83000001360110, + ID=0x0000000000000010, + N_COLS=0x00000001, + TYPE=0x00000001, +@@ -103,10 +105,10 @@ + SPACE=0x00000001) + header=0x000050150074 (NAME='test/tz', + DB_TRX_ID=0x00000000030c, +- DB_ROLL_PTR=0x860000013a0110, ++ DB_ROLL_PTR=0x86000001390110, + ID=0x0000000000000013, + N_COLS=0x80000001, +- TYPE=0x00000023, ++ TYPE=0x00000021, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), diff --git a/mysql-test/suite/innodb/r/table_flags,8k.rdiff b/mysql-test/suite/innodb/r/table_flags,8k.rdiff new file mode 100644 index 00000000000..d5b5731b305 --- /dev/null +++ b/mysql-test/suite/innodb/r/table_flags,8k.rdiff @@ -0,0 +1,92 @@ +--- suite/innodb/r/table_flags.result ++++ suite/innodb/r/table_flags,8k.reject +@@ -13,7 +13,7 @@ + header=0x01000003016e (NAME=0x696e66696d756d00) + header=0x00002815008d (NAME='SYS_DATAFILES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320194, ++ DB_ROLL_PTR=0x81000001d70194, + ID=0x000000000000000e, + N_COLS=0x00000002, + TYPE=0x00000001, +@@ -23,7 +23,7 @@ + SPACE=0x00000000) + header=0x0000101500d5 (NAME='SYS_FOREIGN', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0110, ++ DB_ROLL_PTR=0x80000001d10110, + ID=0x000000000000000b, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -33,7 +33,7 @@ + SPACE=0x00000000) + header=0x000018150122 (NAME='SYS_FOREIGN_COLS', + DB_TRX_ID=0x000000000300, +- DB_ROLL_PTR=0x800000012d0201, ++ DB_ROLL_PTR=0x80000001d10201, + ID=0x000000000000000c, + N_COLS=0x00000004, + TYPE=0x00000001, +@@ -43,7 +43,7 @@ + SPACE=0x00000000) + header=0x0400201501b8 (NAME='SYS_TABLESPACES', + DB_TRX_ID=0x000000000302, +- DB_ROLL_PTR=0x81000001320110, ++ DB_ROLL_PTR=0x81000001d70110, + ID=0x000000000000000d, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -53,7 +53,7 @@ + SPACE=0x00000000) + header=0x000030150244 (NAME='SYS_VIRTUAL', + DB_TRX_ID=0x000000000304, +- DB_ROLL_PTR=0x82000001350110, ++ DB_ROLL_PTR=0x82000001da0110, + ID=0x000000000000000f, + N_COLS=0x00000003, + TYPE=0x00000001, +@@ -63,7 +63,7 @@ + SPACE=0x00000000) + header=0x000040150288 (NAME='test/tc', + DB_TRX_ID=0x000000000308, +- DB_ROLL_PTR=0x84000001380110, ++ DB_ROLL_PTR=0x84000001dd0110, + ID=0x0000000000000011, + N_COLS=0x80000001, + TYPE=0x00000001, +@@ -73,7 +73,7 @@ + SPACE=0x00000002) + header=0x000048150310 (NAME='test/td', + DB_TRX_ID=0x00000000030a, +- DB_ROLL_PTR=0x85000001390110, ++ DB_ROLL_PTR=0x85000001de0110, + ID=0x0000000000000012, + N_COLS=0x80000001, + TYPE=0x00000021, +@@ -83,7 +83,7 @@ + SPACE=0x00000003) + header=0x000058150200 (NAME='test/tp', + DB_TRX_ID=0x00000000030e, +- DB_ROLL_PTR=0x870000013b0110, ++ DB_ROLL_PTR=0x87000001e00110, + ID=0x0000000000000014, + N_COLS=0x80000001, + TYPE=0x000009a1, +@@ -93,7 +93,7 @@ + SPACE=0x00000005) + header=0x0000381502cc (NAME='test/tr', + DB_TRX_ID=0x000000000306, +- DB_ROLL_PTR=0x83000001370110, ++ DB_ROLL_PTR=0x83000001dc0110, + ID=0x0000000000000010, + N_COLS=0x00000001, + TYPE=0x00000001, +@@ -103,7 +103,7 @@ + SPACE=0x00000001) + header=0x000050150074 (NAME='test/tz', + DB_TRX_ID=0x00000000030c, +- DB_ROLL_PTR=0x860000013a0110, ++ DB_ROLL_PTR=0x86000001df0110, + ID=0x0000000000000013, + N_COLS=0x80000001, + TYPE=0x00000023, diff --git a/mysql-test/suite/innodb/r/table_flags.result b/mysql-test/suite/innodb/r/table_flags.result new file mode 100644 index 00000000000..ac7187cd610 --- /dev/null +++ b/mysql-test/suite/innodb/r/table_flags.result @@ -0,0 +1,188 @@ +SET GLOBAL innodb_file_per_table=1; +CREATE TABLE tr(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +CREATE TABLE tc(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPACT; +CREATE TABLE td(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +SET innodb_strict_mode=OFF; +CREATE TABLE tz(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPRESSED +KEY_BLOCK_SIZE=1; +SET innodb_strict_mode=ON; +CREATE TABLE tp(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC +PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; +SYS_TABLES clustered index root page (8): +N_RECS=10; LEVEL=0; INDEX_ID=0x0000000000000001 +header=0x01000003016e (NAME=0x696e66696d756d00) +header=0x00002815008d (NAME='SYS_DATAFILES', + DB_TRX_ID=0x000000000302, + DB_ROLL_PTR=0x81000001320194, + ID=0x000000000000000e, + N_COLS=0x00000002, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000040, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000000) +header=0x0000101500d5 (NAME='SYS_FOREIGN', + DB_TRX_ID=0x000000000300, + DB_ROLL_PTR=0x800000012d0110, + ID=0x000000000000000b, + N_COLS=0x00000004, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000040, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000000) +header=0x000018150122 (NAME='SYS_FOREIGN_COLS', + DB_TRX_ID=0x000000000300, + DB_ROLL_PTR=0x800000012d0201, + ID=0x000000000000000c, + N_COLS=0x00000004, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000040, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000000) +header=0x0400201501b8 (NAME='SYS_TABLESPACES', + DB_TRX_ID=0x000000000302, + DB_ROLL_PTR=0x81000001320110, + ID=0x000000000000000d, + N_COLS=0x00000003, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000040, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000000) +header=0x000030150244 (NAME='SYS_VIRTUAL', + DB_TRX_ID=0x000000000304, + DB_ROLL_PTR=0x82000001350110, + ID=0x000000000000000f, + N_COLS=0x00000003, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000040, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000000) +header=0x000040150288 (NAME='test/tc', + DB_TRX_ID=0x000000000308, + DB_ROLL_PTR=0x84000001380110, + ID=0x0000000000000011, + N_COLS=0x80000001, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000002) +header=0x000048150310 (NAME='test/td', + DB_TRX_ID=0x00000000030a, + DB_ROLL_PTR=0x85000001390110, + ID=0x0000000000000012, + N_COLS=0x80000001, + TYPE=0x00000021, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000003) +header=0x000058150200 (NAME='test/tp', + DB_TRX_ID=0x00000000030e, + DB_ROLL_PTR=0x870000013b0110, + ID=0x0000000000000014, + N_COLS=0x80000001, + TYPE=0x000009a1, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000005) +header=0x0000381502cc (NAME='test/tr', + DB_TRX_ID=0x000000000306, + DB_ROLL_PTR=0x83000001370110, + ID=0x0000000000000010, + N_COLS=0x00000001, + TYPE=0x00000001, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000001) +header=0x000050150074 (NAME='test/tz', + DB_TRX_ID=0x00000000030c, + DB_ROLL_PTR=0x860000013a0110, + ID=0x0000000000000013, + N_COLS=0x80000001, + TYPE=0x00000023, + MIX_ID=0x0000000000000000, + MIX_LEN=0x00000050, + CLUSTER_NAME=NULL(0 bytes), + SPACE=0x00000004) +header=0x070008030000 (NAME=0x73757072656d756d00) +SHOW CREATE TABLE tr; +ERROR 42S02: Table 'test.tr' doesn't exist in engine +SHOW CREATE TABLE tc; +ERROR 42S02: Table 'test.tc' doesn't exist in engine +SHOW CREATE TABLE td; +ERROR 42S02: Table 'test.td' doesn't exist in engine +SHOW CREATE TABLE tz; +Table Create Table +tz CREATE TABLE `tz` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 +BEGIN; +INSERT INTO tz VALUES(42); +ROLLBACK; +SELECT * FROM tz; +a +42 +SHOW CREATE TABLE tp; +ERROR 42S02: Table 'test.tp' doesn't exist in engine +FOUND 4 /InnoDB: Table `test`.`t[cp]` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=(129|289|3873|1232[13]) SYS_TABLES\.N_COLS=2147483649/ in mysqld.1.err +FOUND 2 /InnoDB: Refusing to load '\..test.td\.ibd' \(id=3, flags=0x1?[2ae]1\); dictionary contains id=3, flags=0x10[01][2ae]1\b/ in mysqld.1.err +FOUND 2 /InnoDB: Table `test`\.`tr` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=65 SYS_TABLES\.MIX_LEN=4294967295\b/ in mysqld.1.err +Restoring SYS_TABLES clustered index root page (8) +SHOW CREATE TABLE tr; +Table Create Table +tr CREATE TABLE `tr` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT +SHOW CREATE TABLE tc; +Table Create Table +tc CREATE TABLE `tc` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +SHOW CREATE TABLE td; +Table Create Table +td CREATE TABLE `td` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +SHOW CREATE TABLE tz; +Table Create Table +tz CREATE TABLE `tz` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 +SHOW CREATE TABLE tp; +Table Create Table +tp CREATE TABLE `tp` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC `PAGE_COMPRESSED`=1 `PAGE_COMPRESSION_LEVEL`=9 +BEGIN; +INSERT INTO tr VALUES(1); +INSERT INTO tc VALUES(1); +INSERT INTO td VALUES(1); +INSERT INTO tp VALUES(1); +ROLLBACK; +SELECT * FROM tr; +a +SELECT * FROM tc; +a +SELECT * FROM td; +a +SELECT * FROM tp; +a +DROP TABLE tr,tc,td,tz,tp; +ib_buffer_pool +ib_logfile0 +ib_logfile1 +ibdata1 +sys_tables.bin diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result index da5ff361eeb..074dd413947 100644 --- a/mysql-test/suite/innodb/r/temporary_table.result +++ b/mysql-test/suite/innodb/r/temporary_table.result @@ -149,10 +149,6 @@ FOUND 2 /support raw device/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND 1 /The innodb_temporary data file 'ibtmp1' must be at least/ in mysqld.1.err -SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' -AND support IN ('YES', 'DEFAULT', 'ENABLED'); -ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS FOUND 1 /InnoDB: syntax error in file path/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); diff --git a/mysql-test/suite/innodb/t/101_compatibility.test b/mysql-test/suite/innodb/t/101_compatibility.test index 83ab7914a69..78dee53e52f 100644 --- a/mysql-test/suite/innodb/t/101_compatibility.test +++ b/mysql-test/suite/innodb/t/101_compatibility.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size.inc --source include/not_embedded.inc -- echo # diff --git a/mysql-test/suite/innodb/t/alter_missing_tablespace.test b/mysql-test/suite/innodb/t/alter_missing_tablespace.test index 643b7a4833d..d877b8f3b5d 100644 --- a/mysql-test/suite/innodb/t/alter_missing_tablespace.test +++ b/mysql-test/suite/innodb/t/alter_missing_tablespace.test @@ -1,5 +1,5 @@ --source include/not_embedded.inc ---source include/have_innodb.inc +--source include/innodb_page_size.inc --echo # --echo # Bug#13955083 ALLOW IN-PLACE DDL OPERATIONS ON MISSING diff --git a/mysql-test/suite/innodb/t/doublewrite.test b/mysql-test/suite/innodb/t/doublewrite.test index a153ad66b19..bdeaba2dcec 100644 --- a/mysql-test/suite/innodb/t/doublewrite.test +++ b/mysql-test/suite/innodb/t/doublewrite.test @@ -4,22 +4,27 @@ --echo # PAGE OF SYSTEM TABLESPACE --echo # ---source include/have_innodb.inc +--source include/innodb_page_size.inc --source include/have_debug.inc --source include/not_embedded.inc # Slow shutdown and restart to make sure ibuf merge is finished SET GLOBAL innodb_fast_shutdown = 0; --disable_query_log -call mtr.add_suppression("Header page consists of zero bytes"); -call mtr.add_suppression("Checksum mismatch in datafile"); -call mtr.add_suppression("but the innodb_page_size start-up parameter is"); -call mtr.add_suppression("adjusting FSP_SPACE_FLAGS"); +call mtr.add_suppression("InnoDB: Header page consists of zero bytes"); +call mtr.add_suppression("InnoDB: Checksum mismatch in datafile: .*, Space ID:0, Flags: 0"); +call mtr.add_suppression("InnoDB: Data file .* uses page size .* but the innodb_page_size start-up parameter is"); +call mtr.add_suppression("InnoDB: adjusting FSP_SPACE_FLAGS"); +call mtr.add_suppression("InnoDB: New log files created"); +call mtr.add_suppression("InnoDB: Cannot create doublewrite buffer: the first file in innodb_data_file_path must be at least (3|6|12)M\\."); +call mtr.add_suppression("InnoDB: Database creation was aborted"); +call mtr.add_suppression("Plugin 'InnoDB' (init function returned error|registration as a STORAGE ENGINE failed)"); --enable_query_log --source include/restart_mysqld.inc let INNODB_PAGE_SIZE=`select @@innodb_page_size`; let MYSQLD_DATADIR=`select @@datadir`; +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; show variables like 'innodb_doublewrite'; show variables like 'innodb_fil_make_page_dirty_debug'; @@ -392,9 +397,38 @@ EOF --source include/start_mysqld.inc check table t1; -select f1, f2 from t1; +--let SEARCH_PATTERN= \[ERROR\] InnoDB: .*test.t1\\.ibd.* +--source include/search_pattern_in_file.inc ---echo # Test End ---echo # --------------------------------------------------------------- +select f1, f2 from t1; drop table t1; + +--echo # +--echo # MDEV-12600 crash during install_db with innodb_page_size=32K +--echo # and ibdata1=3M +--echo # +let bugdir= $MYSQLTEST_VARDIR/tmp/doublewrite; +--mkdir $bugdir + +let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); + +--let $ibp=--innodb-log-group-home-dir=$bugdir --innodb-data-home-dir=$bugdir +--let $ibd=$ibp --innodb-undo-tablespaces=0 --innodb-log-files-in-group=2 +--let $ibp=$ibp --innodb-data-file-path=ibdata1:1M;ibdata2:1M:autoextend + +--let $restart_parameters= $ibp +--source include/restart_mysqld.inc +eval $check_no_innodb; +--let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot create doublewrite buffer +--source include/search_pattern_in_file.inc +--let $restart_parameters= +--source include/restart_mysqld.inc + +--remove_file $bugdir/ibdata1 +--remove_file $bugdir/ibdata2 +--remove_file $bugdir/ib_logfile0 +--remove_file $bugdir/ib_logfile1 +--rmdir $bugdir diff --git a/mysql-test/suite/innodb/t/drop_table_background.test b/mysql-test/suite/innodb/t/drop_table_background.test new file mode 100644 index 00000000000..0f596dec574 --- /dev/null +++ b/mysql-test/suite/innodb/t/drop_table_background.test @@ -0,0 +1,30 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +# Embedded server does not support restarting +--source include/not_embedded.inc + +CREATE TABLE t(c0 SERIAL, c1 INT, c2 INT, c3 INT, c4 INT, +KEY(c1), KEY(c2), KEY(c2,c1), +KEY(c3), KEY(c3,c1), KEY(c3,c2), KEY(c3,c2,c1), +KEY(c4), KEY(c4,c1), KEY(c4,c2), KEY(c4,c2,c1), +KEY(c4,c3), KEY(c4,c3,c1), KEY(c4,c3,c2), KEY(c4,c3,c2,c1)) ENGINE=InnoDB; + +let $n= 10; + +SET DEBUG_DBUG='+d,row_drop_table_add_to_background'; +--disable_query_log +let $i= $n; +while ($i) { + eval CREATE TABLE t$i LIKE t; + dec $i; +} +let $i= $n; +while ($i) { + eval DROP TABLE t$i; + dec $i; +} +--enable_query_log +DROP TABLE t; +--source include/restart_mysqld.inc +CREATE TABLE t (a INT) ENGINE=InnoDB; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/innodb-alter-debug.test b/mysql-test/suite/innodb/t/innodb-alter-debug.test index 70017ffba35..f4996916e9f 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-debug.test +++ b/mysql-test/suite/innodb/t/innodb-alter-debug.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size.inc --source include/have_debug.inc --source include/have_debug_sync.inc diff --git a/mysql-test/suite/innodb/t/innodb-alter-nullable.test b/mysql-test/suite/innodb/t/innodb-alter-nullable.test index 3f1e82b3183..bb5cdee000a 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-nullable.test +++ b/mysql-test/suite/innodb/t/innodb-alter-nullable.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size.inc # Save the initial number of concurrent sessions. --source include/count_sessions.inc diff --git a/mysql-test/suite/innodb/t/innodb-alter-table.test b/mysql-test/suite/innodb/t/innodb-alter-table.test index 45342b4a218..97f0075f344 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-table.test +++ b/mysql-test/suite/innodb/t/innodb-alter-table.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size.inc # # MMDEV-8386: MariaDB creates very big tmp file and hangs on xtradb diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test index ec1ea35f1cf..d3f34b12ea6 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test +++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test @@ -10,8 +10,7 @@ # Avoid CrashReporter popup on Mac --source include/not_crashrep.inc -# InnoDB is required ---source include/have_innodb.inc +--source include/innodb_page_size.inc --echo # --echo # Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS diff --git a/mysql-test/suite/innodb/t/innodb-page_compression_default.test b/mysql-test/suite/innodb/t/innodb-page_compression_default.test index c5d6b36a97e..1cc6c917548 100644 --- a/mysql-test/suite/innodb/t/innodb-page_compression_default.test +++ b/mysql-test/suite/innodb/t/innodb-page_compression_default.test @@ -1,42 +1,10 @@ --source include/have_innodb.inc +--source include/not_embedded.inc -let $innodb_compression_algorithm_orig=`SELECT @@innodb_compression_algorithm`; -let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`; +call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+."); -SET GLOBAL innodb_file_per_table = ON; +# All page compression test use the same +--source include/innodb-page-compression.inc -create table t1 (c1 int not null primary key auto_increment, b char(200)) engine=innodb page_compressed=1; -insert into t1 values(NULL,'compressed_text_aaaaaaaaabbbbbbbbbbbbbccccccccccccc'); -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; -insert into t1(b) select b from t1; +-- echo #done -let $wait_condition= select variable_value > 0 from information_schema.global_status where variable_name = 'INNODB_NUM_PAGES_PAGE_COMPRESSED'; ---source include/wait_condition.inc - ---let $MYSQLD_DATADIR=`select @@datadir` ---let t1_IBD = $MYSQLD_DATADIR/test/t1.ibd ---let SEARCH_RANGE = 10000000 ---let SEARCH_PATTERN=compressed_text - ---echo # t1 compressed expected NOT FOUND --- let SEARCH_FILE=$t1_IBD --- source include/search_pattern_in_file.inc - -drop table t1; - -# reset system ---disable_query_log -EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig; -EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig; ---enable_query_log diff --git a/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test b/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test index b90d15f1fa3..532ec294d28 100644 --- a/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test +++ b/mysql-test/suite/innodb/t/innodb-page_compression_snappy.test @@ -1,244 +1,13 @@ -- source include/have_innodb.inc -- source include/have_innodb_snappy.inc +--source include/not_embedded.inc -call mtr.add_suppression("InnoDB: Compression failed for space.*"); - -let $innodb_compression_algorithm_orig=`select @@innodb_compression_algorithm`; +call mtr.add_suppression("InnoDB: Compression failed for space [0-9]+ name test/innodb_page_compressed[0-9] len [0-9]+ err 2 write_size [0-9]+."); # snappy -set global innodb_compression_algorithm = 6; - -create table innodb_compressed(c1 int, b char(200)) engine=innodb row_format=compressed key_block_size=8; -show warnings; -create table innodb_normal (c1 int, b char(200)) engine=innodb; -show warnings; -create table innodb_page_compressed1 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=1; -show warnings; -show create table innodb_page_compressed1; -create table innodb_page_compressed2 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=2; -show warnings; -show create table innodb_page_compressed2; -create table innodb_page_compressed3 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=3; -show warnings; -show create table innodb_page_compressed3; -create table innodb_page_compressed4 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=4; -show warnings; -show create table innodb_page_compressed4; -create table innodb_page_compressed5 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=5; -show warnings; -show create table innodb_page_compressed5; -create table innodb_page_compressed6 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=6; -show warnings; -show create table innodb_page_compressed6; -create table innodb_page_compressed7 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=7; -show warnings; -show create table innodb_page_compressed7; -create table innodb_page_compressed8 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=8; -show warnings; -show create table innodb_page_compressed8; -create table innodb_page_compressed9 (c1 int, b char(200)) engine=innodb page_compressed=1 page_compression_level=9; -show warnings; -show create table innodb_page_compressed9; -delimiter //; -create procedure innodb_insert_proc (repeat_count int) -begin - declare current_num int; - set current_num = 0; - while current_num < repeat_count do - insert into innodb_normal values(current_num,'aaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccc'); - set current_num = current_num + 1; - end while; -end// -delimiter ;// -commit; - -set autocommit=0; -call innodb_insert_proc(5000); -commit; -set autocommit=1; -select count(*) from innodb_normal; -insert into innodb_compressed select * from innodb_normal; -insert into innodb_page_compressed1 select * from innodb_normal; -insert into innodb_page_compressed2 select * from innodb_normal; -insert into innodb_page_compressed3 select * from innodb_normal; -insert into innodb_page_compressed4 select * from innodb_normal; -insert into innodb_page_compressed5 select * from innodb_normal; -insert into innodb_page_compressed6 select * from innodb_normal; -insert into innodb_page_compressed7 select * from innodb_normal; -insert into innodb_page_compressed8 select * from innodb_normal; -insert into innodb_page_compressed9 select * from innodb_normal; -commit; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - -alter table innodb_normal page_compressed=1 page_compression_level=8; -show warnings; -show create table innodb_normal; -alter table innodb_compressed row_format=default page_compressed=1 page_compression_level=8 key_block_size=0; -show warnings; -show create table innodb_compressed; - -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - ---source include/restart_mysqld.inc - -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - -# zlib -set global innodb_compression_algorithm = 1; -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -commit; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - ---source include/restart_mysqld.inc - -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - -# none -set global innodb_compression_algorithm = 0; -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -commit; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; - ---source include/restart_mysqld.inc - -update innodb_page_compressed1 set c1 = c1 + 1; -update innodb_page_compressed2 set c1 = c1 + 1; -update innodb_page_compressed3 set c1 = c1 + 1; -update innodb_page_compressed4 set c1 = c1 + 1; -update innodb_page_compressed5 set c1 = c1 + 1; -update innodb_page_compressed6 set c1 = c1 + 1; -update innodb_page_compressed7 set c1 = c1 + 1; -update innodb_page_compressed8 set c1 = c1 + 1; -update innodb_page_compressed9 set c1 = c1 + 1; -select count(*) from innodb_compressed; -select count(*) from innodb_page_compressed1; -select count(*) from innodb_page_compressed1 where c1 < 500000; -select count(*) from innodb_page_compressed2 where c1 < 500000; -select count(*) from innodb_page_compressed3 where c1 < 500000; -select count(*) from innodb_page_compressed4 where c1 < 500000; -select count(*) from innodb_page_compressed5 where c1 < 500000; -select count(*) from innodb_page_compressed6 where c1 < 500000; -select count(*) from innodb_page_compressed7 where c1 < 500000; -select count(*) from innodb_page_compressed8 where c1 < 500000; -select count(*) from innodb_page_compressed9 where c1 < 500000; +set global innodb_compression_algorithm = snappy; -drop procedure innodb_insert_proc; -drop table innodb_normal; -drop table innodb_compressed; -drop table innodb_page_compressed1; -drop table innodb_page_compressed2; -drop table innodb_page_compressed3; -drop table innodb_page_compressed4; -drop table innodb_page_compressed5; -drop table innodb_page_compressed6; -drop table innodb_page_compressed7; -drop table innodb_page_compressed8; -drop table innodb_page_compressed9; +# All page compression test use the same +--source include/innodb-page-compression.inc -# reset system ---disable_query_log -EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig; ---enable_query_log +-- echo #done diff --git a/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt b/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt deleted file mode 100644 index 145ee2b4264..00000000000 --- a/mysql-test/suite/innodb/t/innodb_stats_del_mark-master.opt +++ /dev/null @@ -1 +0,0 @@ ---innodb_stats_include_delete_marked=on diff --git a/mysql-test/suite/innodb/t/innodb_stats_del_mark.test b/mysql-test/suite/innodb/t/innodb_stats_del_mark.test deleted file mode 100644 index 36f7a2ea099..00000000000 --- a/mysql-test/suite/innodb/t/innodb_stats_del_mark.test +++ /dev/null @@ -1,113 +0,0 @@ ---source include/have_innodb.inc ---source include/big_test.inc - ---echo # ---echo # Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE ---echo # TRANSACTION IS COMMITTED ---echo # - ---echo "Test 1:- Uncommited delete test" -CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, - val INT UNSIGNED NOT NULL, - INDEX (val)) ENGINE=INNODB - STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; - - -INSERT INTO t1 (val) VALUES (CEIL(RAND()*20)); -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; -INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1; - -SELECT COUNT(*) FROM t1; -ANALYZE TABLE t1; - -connect(con1, localhost, root,,); -START TRANSACTION; -DELETE FROM t1; -send SELECT COUNT(*) FROM t1; - -connection default; -let $row_count= query_get_value(EXPLAIN SELECT * FROM t1 WHERE val=4, rows,1); -if ($row_count > 20000) -{ ---echo Test correctly estimates the number of rows as > 20000 ---echo even when in other uncommmited transaction ---echo all rows have been deleted. -} - -connection con1; -reap; -commit; - -connection default; - ---echo Test 2:- Insert and rollback test -CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, - val INT UNSIGNED NOT NULL, - INDEX (val)) ENGINE=INNODB - STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; - -connection con1; - -START TRANSACTION; -INSERT INTO t2 (val) VALUES (CEIL(RAND()*20)); -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2; -send SELECT COUNT(*) FROM t2; - -connection default; -select count(*) from t2; -let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1); -if ($row_count > 20000) -{ ---echo Test correctly estimates the number of rows as > 20000 ---echo even when in other uncommited transaction ---echo many rows are inserted. -} - -connection con1; -reap; ---echo Rollback the insert -rollback; -disconnect con1; - -connection default; -let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1); -if ($row_count <= 1) -{ ---echo Test correctly estimates the number of rows as $row_count ---echo after rollback. -} - -DROP TABLE t1,t2; diff --git a/mysql-test/suite/innodb/t/innodb_stats_persistent.test b/mysql-test/suite/innodb/t/innodb_stats_persistent.test new file mode 100644 index 00000000000..652b201c4b4 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_stats_persistent.test @@ -0,0 +1,95 @@ +--source include/have_innodb.inc +--source include/big_test.inc + +SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked; +SET GLOBAL innodb_stats_include_delete_marked = ON; +SET @saved_traditional = @@GLOBAL.innodb_stats_traditional; +SET GLOBAL innodb_stats_traditional=false; +SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter; +SET GLOBAL innodb_stats_modified_counter=1; + +CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val)) +ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1; +CREATE TABLE t1 LIKE t0; +CREATE TABLE t2 LIKE t0; + +INSERT INTO t0 (val) VALUES (4); +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; +INSERT INTO t0 (val) SELECT 4 FROM t0; + +INSERT INTO t1 SELECT * FROM t0; +SELECT COUNT(*) FROM t1; +ANALYZE TABLE t1; + +connect(con1, localhost, root,,); +START TRANSACTION; +DELETE FROM t1; +send SELECT COUNT(*) FROM t1; + +connection default; +--echo # With innodb_stats_include_delete_marked=ON, +--echo # DELETE must not affect statistics before COMMIT. +EXPLAIN SELECT * FROM t1 WHERE val=4; + +connection con1; +reap; +ROLLBACK; +SELECT COUNT(*) FROM t1; +EXPLAIN SELECT * FROM t1 WHERE val=4; + +BEGIN; +DELETE FROM t1; +COMMIT; +SELECT COUNT(*) FROM t1; + +connection default; +BEGIN; +INSERT INTO t2 SELECT * FROM t0; + +--echo # The INSERT will show up before COMMIT. +EXPLAIN SELECT * FROM t2 WHERE val=4; +SELECT COUNT(*) FROM t2; +--echo # The ROLLBACK of the INSERT must affect the statistics. +ROLLBACK; +SELECT COUNT(*) FROM t2; + +connection con1; +EXPLAIN SELECT * FROM t2 WHERE val=4; +SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; +SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; +--source include/wait_all_purged.inc +SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; +--echo # After COMMIT and purge, the DELETE must show up. +EXPLAIN SELECT * FROM t1 WHERE val=4; + +SET GLOBAL innodb_stats_include_delete_marked = OFF; +BEGIN; +INSERT INTO t1 SELECT * FROM t0; +EXPLAIN SELECT * FROM t1 WHERE val=4; +ROLLBACK; +EXPLAIN SELECT * FROM t1 WHERE val=4; +BEGIN; +INSERT INTO t1 SELECT * FROM t0; +COMMIT; +EXPLAIN SELECT * FROM t1 WHERE val=4; +BEGIN; +DELETE FROM t1; +SELECT COUNT(*) FROM t1; +--echo # With innodb_stats_include_delete_marked=OFF, +--echo # DELETE must affect statistics even before COMMIT. +--echo # However, if there was a WHERE condition, +--echo # ha_innobase::records_in_range() would count the delete-marked records. +EXPLAIN SELECT * FROM t1; +ROLLBACK; +EXPLAIN SELECT * FROM t1; +SELECT COUNT(*) FROM t1; +disconnect con1; + +connection default; + +DROP TABLE t0,t1,t2; +SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked; +SET GLOBAL innodb_stats_traditional = @saved_traditional; +SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter; diff --git a/mysql-test/suite/innodb/t/log_data_file_size.test b/mysql-test/suite/innodb/t/log_data_file_size.test index 56a9d35bd92..8a0e88efc76 100644 --- a/mysql-test/suite/innodb/t/log_data_file_size.test +++ b/mysql-test/suite/innodb/t/log_data_file_size.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size.inc --source include/not_embedded.inc let INNODB_PAGE_SIZE=`select @@innodb_page_size`; diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index ae6442a7994..14cc889e34b 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -1,5 +1,5 @@ # Test resizing the InnoDB redo log. ---source include/have_innodb.inc +--source include/innodb_page_size_small.inc # Embedded server tests do not support restarting --source include/not_embedded.inc # DBUG_EXECUTE_IF is needed @@ -24,6 +24,12 @@ call mtr.add_suppression("InnoDB: Unable to open .*ib_logfile0. to check native FLUSH TABLES; --enable_query_log +--let $restart_parameters= --innodb-thread-concurrency=1 --innodb-log-file-size=1m --innodb-log-files-in-group=2 +--source include/restart_mysqld.inc + +--let $restart_parameters= --innodb-thread-concurrency=100 --innodb-log-file-size=10M --innodb-log-files-in-group=2 +--source include/restart_mysqld.inc + CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; BEGIN; INSERT INTO t1 VALUES (42); diff --git a/mysql-test/suite/innodb/t/row_format_redundant.test b/mysql-test/suite/innodb/t/row_format_redundant.test new file mode 100644 index 00000000000..af3fe3b52cf --- /dev/null +++ b/mysql-test/suite/innodb/t/row_format_redundant.test @@ -0,0 +1,157 @@ +--source include/have_innodb.inc +# Embedded mode doesn't allow restarting +--source include/not_embedded.inc + +--disable_query_log +call mtr.add_suppression("InnoDB: Table `mysql`\\.`innodb_table_stats` not found"); +call mtr.add_suppression("InnoDB: Table `test`.`t1` in InnoDB data dictionary contains invalid flags. SYS_TABLES\\.TYPE=1 SYS_TABLES\\.MIX_LEN=255\\r?$"); +call mtr.add_suppression("InnoDB: Parent table of FTS auxiliary table test/FTS_.* not found"); +call mtr.add_suppression("InnoDB: Cannot open table test/t1 from the internal data dictionary"); +call mtr.add_suppression("InnoDB: Table `test`.`t1` does not exist in the InnoDB internal data dictionary though MariaDB is trying to (rename|drop)"); +FLUSH TABLES; +--enable_query_log + +let INNODB_PAGE_SIZE=`select @@innodb_page_size`; + +let bugdir= $MYSQLTEST_VARDIR/tmp/row_format_redundant; +--mkdir $bugdir +--let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err + +--let $d=--innodb-data-home-dir=$bugdir --innodb-log-group-home-dir=$bugdir +--let $d=$d --innodb-data-file-path=ibdata1:1M:autoextend +--let $d=$d --innodb-undo-tablespaces=0 --innodb-stats-persistent=0 +--let $restart_parameters= $d +--source include/restart_mysqld.inc + +SET GLOBAL innodb_file_per_table=1; + +--echo # +--echo # Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE == +--echo # MTR_LOG_NO_REDO +--echo # + +SET GLOBAL innodb_file_per_table=ON; +create table t1 (a int not null, d varchar(15) not null, b +varchar(198) not null, c char(156), +fulltext ftsic(c)) engine=InnoDB +row_format=redundant; + +insert into t1 values(123, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(456, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(789, 'abcdef', 'jghikl', 'mnop'); +insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf'); +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; + +SET GLOBAL innodb_file_per_table=OFF; +create table t2 (a int not null, d varchar(15) not null, b +varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB +row_format=redundant; + +insert into t2 select * from t1; + +create table t3 (a int not null, d varchar(15) not null, b varchar(198), +c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB +row_format=redundant; + +insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa'); +insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa'); + +# read-only restart requires the change buffer to be empty; therefore we +# do a slow shutdown. +SET GLOBAL innodb_fast_shutdown=0; +--let $restart_parameters= $d --innodb-read-only +--source include/restart_mysqld.inc + +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; +SELECT COUNT(*) FROM t3; + +--error ER_OPEN_AS_READONLY +TRUNCATE TABLE t1; +--error ER_OPEN_AS_READONLY +TRUNCATE TABLE t2; +--error ER_OPEN_AS_READONLY +TRUNCATE TABLE t3; + +--let $restart_parameters= $d +--source include/restart_mysqld.inc + +TRUNCATE TABLE t1; +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; + +--source include/shutdown_mysqld.inc +--perl +use strict; +my $ps= $ENV{INNODB_PAGE_SIZE}; +my $file= "$ENV{bugdir}/ibdata1"; +open(FILE, "+<", $file) || die "Unable to open $file\n"; +# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME). +sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file"; +die "Unable to read $file" unless sysread(FILE, $_, 4) == 4; +my $sys_tables_root = unpack("N", $_); +my $page; +sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file"; +die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps; +for (my $offset= 0x65; $offset; + $offset= unpack("n", substr($page,$offset-2,2))) +{ + my $n_fields= unpack("n", substr($page,$offset-4,2)) >> 1 & 0x3ff; + my $start= 0; + my $end= unpack("C", substr($page, $offset-7, 1)); + my $name= substr($page,$offset+$start,$end-$start); + for (my $i= 0; $i < $n_fields; $i++) { + my $end= unpack("C", substr($page, $offset-7-$i, 1)); + # Corrupt SYS_TABLES.MIX_LEN (ignored for ROW_FORMAT=REDUNDANT) + if ($i == 7 && $name =~ '^test/t[123]') + { + print "corrupted SYS_TABLES.MIX_LEN for $name\n"; + substr($page,$offset+$start,$end-$start)= pack("N", 255); + } + $start= $end & 0x7f; + } +} +substr($page,0,4)=pack("N",0xdeadbeef); +substr($page,$ps-8,4)=pack("N",0xdeadbeef); +sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file"; +syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n"; +close(FILE) || die "Unable to close $file\n"; +EOF + +--source include/start_mysqld.inc +--error ER_NO_SUCH_TABLE_IN_ENGINE +TRUNCATE TABLE t1; +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; +--error ER_NO_SUCH_TABLE_IN_ENGINE +SELECT COUNT(*) FROM t1; +SELECT COUNT(*) FROM t2; +SELECT COUNT(*) FROM t3; +--error ER_ERROR_ON_RENAME +RENAME TABLE t1 TO tee_one; +DROP TABLE t1; +DROP TABLE t2,t3; + +--let SEARCH_PATTERN= \[ERROR\] InnoDB: Table `test`\.`t1` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=255\b +--source include/search_pattern_in_file.inc + +--let $restart_parameters= +--source include/restart_mysqld.inc + +--list_files $bugdir +--remove_files_wildcard $bugdir +--rmdir $bugdir + +# Remove the data file, because DROP TABLE skipped it for the "corrupted" table +--let MYSQLD_DATADIR=`select @@datadir` +--remove_file $MYSQLD_DATADIR/test/t1.ibd +--list_files $MYSQLD_DATADIR/test diff --git a/mysql-test/suite/innodb/t/table_flags.test b/mysql-test/suite/innodb/t/table_flags.test new file mode 100644 index 00000000000..7f47b8254e2 --- /dev/null +++ b/mysql-test/suite/innodb/t/table_flags.test @@ -0,0 +1,206 @@ +--source include/innodb_page_size.inc +# Embedded server tests do not support restarting +--source include/not_embedded.inc + +--disable_query_log +call mtr.add_suppression("InnoDB: Table `mysql`\\.`innodb_table_stats` not found"); +call mtr.add_suppression("InnoDB: incorrect flags in SYS_TABLES"); +call mtr.add_suppression("InnoDB: Table `test`.`t[cp]` in InnoDB data dictionary contains invalid flags\\. SYS_TABLES\\.TYPE=(129|289|3873|1232[31]) SYS_TABLES\\.N_COLS=2147483649\\r?$"); +call mtr.add_suppression("InnoDB: Table `test`\\.`tr` in InnoDB data dictionary contains invalid flags\\. SYS_TABLES\\.TYPE=65 SYS_TABLES\\.MIX_LEN=4294967295\\r?$"); +call mtr.add_suppression("InnoDB: Refusing to load '\\..test.td\\.ibd' \\(id=3, flags=0x([2e]1)\\); dictionary contains id=3, flags=0x100\\1\\r?$"); +call mtr.add_suppression("InnoDB: Refusing to load '\\..test.td\\.ibd' \\(id=3, flags=0x(1[2ae]1)\\); dictionary contains id=3, flags=0x10\\1\\r?$"); +call mtr.add_suppression("InnoDB: Ignoring tablespace for `test`.`td` because it could not be opened\\."); +# FIXME: Remove the following spam due to invalid flags for test.td +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation"); +call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified"); +call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself"); +FLUSH TABLES; +--enable_query_log + +let INNODB_PAGE_SIZE=`select @@innodb_page_size`; +let MYSQLD_DATADIR=`select @@datadir`; + +let bugdir= $MYSQLTEST_VARDIR/tmp/table_flags; +--mkdir $bugdir +--let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err + +--let $d=--innodb-data-home-dir=$bugdir --innodb-log-group-home-dir=$bugdir +--let $d=$d --innodb-data-file-path=ibdata1:1M:autoextend +--let $d=$d --innodb-undo-tablespaces=0 +--let $restart_parameters=$d --innodb-stats-persistent=0 +--source include/restart_mysqld.inc + +SET GLOBAL innodb_file_per_table=1; +CREATE TABLE tr(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +CREATE TABLE tc(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPACT; +CREATE TABLE td(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +SET innodb_strict_mode=OFF; +CREATE TABLE tz(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPRESSED +KEY_BLOCK_SIZE=1; +SET innodb_strict_mode=ON; +# PAGE_COMPRESSED is supported starting with MariaDB 10.1.0 +CREATE TABLE tp(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC +PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; + +--source include/shutdown_mysqld.inc +--perl +use strict; +my $ps= $ENV{INNODB_PAGE_SIZE}; +my $file= "$ENV{bugdir}/ibdata1"; +open(FILE, "+<", $file) || die "Unable to open $file\n"; +# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME). +sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file"; +die "Unable to read $file" unless sysread(FILE, $_, 4) == 4; +my $sys_tables_root = unpack("N", $_); +my $page; +print "SYS_TABLES clustered index root page ($sys_tables_root):\n"; +sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file"; +die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps; +open(BACKUP, ">$ENV{bugdir}/sys_tables.bin") || die "Unable to open backup\n"; +syswrite(BACKUP, $page, $ps)==$ps || die "Unable to write backup\n"; +close(BACKUP) || die "Unable to close backup\n"; +print "N_RECS=", unpack("n", substr($page,38+16,2)); +print "; LEVEL=", unpack("n", substr($page,38+26,2)); +print "; INDEX_ID=0x", unpack("H*", substr($page,38+28,8)), "\n"; +my @fields=("NAME","DB_TRX_ID","DB_ROLL_PTR", + "ID","N_COLS","TYPE","MIX_ID","MIX_LEN","CLUSTER_NAME","SPACE"); +for (my $offset= 0x65; $offset; + $offset= unpack("n", substr($page,$offset-2,2))) +{ + print "header=0x", unpack("H*",substr($page,$offset-6,6)), " ("; + my $n_fields= unpack("n", substr($page,$offset-4,2)) >> 1 & 0x3ff; + my $start= 0; + my $name; + for (my $i= 0; $i < $n_fields; $i++) { + my $end= unpack("C", substr($page, $offset-7-$i, 1)); + print ",\n " if $i; + print "$fields[$i]="; + if ($end & 0x80) { + print "NULL(", ($end & 0x7f) - $start, " bytes)" + } elsif ($n_fields > 1 && $i == 0) { + $name= substr($page,$offset+$start,$end-$start); + print "'$name'" + } else { + print "0x", unpack("H*", substr($page,$offset+$start,$end-$start)) + } + # Corrupt SYS_TABLES.TYPE + if ($i == 5) + { + my $flags= 0; + if ($name eq 'test/tr') { + $flags= 0x40 # DATA_DIR (largely ignored by 10.1+) + } elsif ($name eq 'test/tc') { + $flags= 0x80 # 10.1 PAGE_COMPRESSED + } elsif ($name eq 'test/td') { + $flags= 0xf00 # PAGE_COMPRESSION_LEVEL=15 (0..9 is valid) + # As part of the MDEV-12873 fix, because the + # PAGE_COMPRESSED=YES flag was not set, we will assume that + # this table was actually created with 10.2.2..10.2.6 + # using PAGE_COMPRESSED=YES PAGE_COMPRESSION_LEVEL=7. + } elsif ($name eq 'test/tz') { + $flags= 0x3000 # 10.1 ATOMIC_WRITES=3 (0..2 is valid) + } elsif ($name eq 'test/tp') { + $flags= 0x880 # 10.1 PAGE_COMPRESSED, PAGE_COMPRESSION_LEVEL=8 + # (in 10.2.2 through 10.2.6, this is interpreted as + # PAGE_COMPRESSION_LEVEL=4 without PAGE_COMPRESSED + # but with SHARED_SPACE, which should be invalid) + } + + substr($page,$offset+$start,$end-$start)= pack( + "N", $flags ^ + unpack("N", substr($page,$offset+$start,$end-$start))) + if $flags; + } + # Corrupt SYS_TABLES.MIX_LEN (ignored for ROW_FORMAT=REDUNDANT) + if ($i == 7 && $name eq 'test/tr') + { + substr($page,$offset+$start,$end-$start)= chr(255) x 4; + } + $start= $end & 0x7f; + } + print ")\n"; +} +substr($page,0,4)=pack("N",0xdeadbeef); +substr($page,$ps-8,4)=pack("N",0xdeadbeef); +sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file"; +syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n"; +close(FILE) || die "Unable to close $file\n"; +EOF +--source include/start_mysqld.inc + +--error ER_NO_SUCH_TABLE_IN_ENGINE +SHOW CREATE TABLE tr; +--error ER_NO_SUCH_TABLE_IN_ENGINE +SHOW CREATE TABLE tc; +--error ER_NO_SUCH_TABLE_IN_ENGINE +SHOW CREATE TABLE td; +# This table was converted to NO_ROLLBACK due to the SYS_TABLES.TYPE change. +SHOW CREATE TABLE tz; +BEGIN; +INSERT INTO tz VALUES(42); +ROLLBACK; +SELECT * FROM tz; +--error ER_NO_SUCH_TABLE_IN_ENGINE +SHOW CREATE TABLE tp; + +--source include/shutdown_mysqld.inc + +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; +--let SEARCH_PATTERN= InnoDB: Table `test`.`t[cp]` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=(129|289|3873|1232[13]) SYS_TABLES\.N_COLS=2147483649 +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN= InnoDB: Refusing to load '\..test.td\.ibd' \(id=3, flags=0x1?[2ae]1\); dictionary contains id=3, flags=0x10[01][2ae]1\b +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN= InnoDB: Table `test`\.`tr` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=65 SYS_TABLES\.MIX_LEN=4294967295\b +--source include/search_pattern_in_file.inc + +# Restore the backup of the corrupted SYS_TABLES clustered index root page +--perl +use strict; +my $ps= $ENV{INNODB_PAGE_SIZE}; +my $file= "$ENV{bugdir}/ibdata1"; +open(FILE, "+<", $file) || die "Unable to open $file\n"; +open(BACKUP, "<$ENV{bugdir}/sys_tables.bin") || die "Unable to open backup\n"; +# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME). +sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file"; +die "Unable to read $file\n" unless sysread(FILE, $_, 4) == 4; +my $sys_tables_root = unpack("N", $_); +print "Restoring SYS_TABLES clustered index root page ($sys_tables_root)\n"; +sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file"; +die "Unable to read backup\n" unless sysread(BACKUP, $_, $ps) == $ps; +die "Unable to restore backup\n" unless syswrite(FILE, $_, $ps) == $ps; +close(BACKUP); +close(FILE) || die "Unable to close $file\n"; +EOF +--source include/start_mysqld.inc + +SHOW CREATE TABLE tr; +SHOW CREATE TABLE tc; +SHOW CREATE TABLE td; +SHOW CREATE TABLE tz; +SHOW CREATE TABLE tp; + +BEGIN; +INSERT INTO tr VALUES(1); +INSERT INTO tc VALUES(1); +INSERT INTO td VALUES(1); +# We cannot access tz, because due to our fiddling of the NO_ROLLBACK flag, +# it now has a record with DB_TRX_ID=0, which is invalid for +# transactional tables until MDEV-12288 is implemented. +# INSERT INTO tz VALUES(1); +INSERT INTO tp VALUES(1); +ROLLBACK; + +SELECT * FROM tr; +SELECT * FROM tc; +SELECT * FROM td; +# SELECT * FROM tz; +SELECT * FROM tp; + +DROP TABLE tr,tc,td,tz,tp; + +--let $restart_parameters= +--source include/restart_mysqld.inc + +--list_files $bugdir +--remove_files_wildcard $bugdir +--rmdir $bugdir diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index f841acff1c0..1148f2ee1a0 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -15,7 +15,6 @@ call mtr.add_suppression("The table 't1' is full"); call mtr.add_suppression("Plugin 'InnoDB' init function returned error"); call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed"); call mtr.add_suppression("InnoDB: Tablespace doesn't support raw devices"); -call mtr.add_suppression("InnoDB: The innodb_temporary data file 'ibtmp1' must be at least"); call mtr.add_suppression("InnoDB: Plugin initialization aborted"); call mtr.add_suppression("innodb_temporary and innodb_system file names seem to be the same"); call mtr.add_suppression("Could not create the shared innodb_temporary"); @@ -143,12 +142,6 @@ eval $check_no_innodb; --source include/search_pattern_in_file.inc eval $check_no_innodb; ---let $restart_parameters= --innodb_temp_data_file_path=ibtmp1:2M:autoextend ---source include/restart_mysqld.inc ---let SEARCH_PATTERN = The innodb_temporary data file 'ibtmp1' must be at least ---source include/search_pattern_in_file.inc -eval $check_no_innodb; - --let $restart_parameters= --innodb_temp_data_file_path= --source include/restart_mysqld.inc --let SEARCH_PATTERN = InnoDB: syntax error in file path diff --git a/mysql-test/suite/innodb/t/truncate_purge_debug.test b/mysql-test/suite/innodb/t/truncate_purge_debug.test index 513c59e12c8..e8f5768f557 100644 --- a/mysql-test/suite/innodb/t/truncate_purge_debug.test +++ b/mysql-test/suite/innodb/t/truncate_purge_debug.test @@ -33,25 +33,8 @@ COMMIT; disconnect con2; connection default; +--source include/wait_all_purged.inc -# Wait for everything to be purged. - -let $wait_counter= 300; -while ($wait_counter) -{ - --replace_regex /.*History list length ([0-9]+).*/\1/ - let $remaining= `SHOW ENGINE INNODB STATUS`; - if ($remaining == 'InnoDB 0') - { - let $wait_counter= 0; - } - if ($wait_counter) - { - real_sleep 0.1; - dec $wait_counter; - } -} -echo $remaining transactions not purged; SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; SET DEBUG_SYNC = 'now SIGNAL finish_scan'; diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result index 0b011639e2e..b0f7d7727d6 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result @@ -46,9 +46,9 @@ id a b 1 MySQL Tutorial DBMS stands for DataBase ... select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1; id a b x -1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159 -2 How To Use MySQL Well After you went through a ... 0.0906190574169159 -3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078 +1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186 +2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186 +3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319 select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1; id a b x 1 MySQL Tutorial DBMS stands for DataBase ... 0 @@ -90,9 +90,9 @@ id a b 1 MySQL Tutorial DBMS stands for DataBase ... select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1; id a b x -1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159 -2 How To Use MySQL Well After you went through a ... 0.0906190574169159 -3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078 +1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186 +2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186 +3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319 select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1; id a b x 1 MySQL Tutorial DBMS stands for DataBase ... 0 diff --git a/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc b/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc deleted file mode 100644 index 6af83d51304..00000000000 --- a/mysql-test/suite/innodb_zip/include/have_innodb_zip.inc +++ /dev/null @@ -1,4 +0,0 @@ -if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value <= 16384`) -{ - --skip Test with InnoDB zip requires page size not greater than 16k. -} diff --git a/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result index 5cdfe162b6a..b33d9c0c8de 100644 --- a/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result +++ b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result @@ -1,28 +1,13 @@ -USE test; -DROP TABLE IF EXISTS tab5; -Warnings: -Note 1051 Unknown table 'test.tab5' -DROP TABLE IF EXISTS tab6; -Warnings: -Note 1051 Unknown table 'test.tab6' -#set the other madatory flags before test starts SET GLOBAL Innodb_file_per_table=on; -#set the compression level=0 (No compress) +SET @save_innodb_compression_level = @@global.innodb_compression_level; SET global innodb_compression_level=0; -#check the compression level and the compressed_pages is default -SELECT @@innodb_compression_level; -@@innodb_compression_level -0 -SELECT @@Innodb_file_per_table; -@@Innodb_file_per_table -1 #create table with 1K block size CREATE TABLE tab5 (col_1 CHAR (255) , col_2 VARCHAR (255), col_3 longtext, col_4 longtext,col_5 longtext, col_6 longtext , col_7 longtext , col_8 longtext ,col_9 longtext , -col_10 longtext ,col_11 int auto_increment primary key) +col_10 longtext ,col_11 int auto_increment primary key) ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; #create indexes CREATE INDEX idx1 ON tab5(col_4(10)); @@ -42,7 +27,8 @@ SET @col_7 = repeat('g', 100); SET @col_8 = repeat('h', 100); SET @col_9 = repeat('i', 100); SET @col_10 = repeat('j', 100); -#insert 10 records +#insert 10 records +BEGIN; INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) @@ -63,7 +49,8 @@ INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); -#set the compression level=9 (High compress) +COMMIT; +#set the compression level=9 (High compress) SET global innodb_compression_level=9; #create table with 1K block size CREATE TABLE tab6 (col_1 CHAR (255) , @@ -80,18 +67,8 @@ CREATE INDEX idx3 ON tab6(col_6(10)); CREATE INDEX idx4 ON tab6(col_7(10)); CREATE INDEX idx5 ON tab6(col_8(10)); CREATE INDEX idx6 ON tab6(col_11); -#load the with repeat function -SET @col_1 = repeat('a', 100); -SET @col_2 = repeat('b', 100); -SET @col_3 = repeat('c', 100); -SET @col_4 = repeat('d', 100); -SET @col_5 = repeat('e', 100); -SET @col_6 = repeat('f', 100); -SET @col_7 = repeat('g', 100); -SET @col_8 = repeat('h', 100); -SET @col_9 = repeat('i', 100); -SET @col_10 = repeat('j', 100); -#insert 10 records +#insert 10 records +BEGIN; INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) @@ -112,24 +89,21 @@ INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +COMMIT; #diff the sizes of the No compressed table and high compressed table -SET @size=(SELECT -(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) -FROM INFORMATION_SCHEMA.TABLES +SET @size=(SELECT +(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) +FROM INFORMATION_SCHEMA.TABLES WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test') - -(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 -FROM INFORMATION_SCHEMA.TABLES +(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 +FROM INFORMATION_SCHEMA.TABLES WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test') FROM DUAL); -#check the size of the table, it should not be Negative value +#check the size of the table, it should not be Negative value #The results of this query Test pass = 1 and fail=0 SELECT @size >= 0; @size >= 0 1 -# -# Cleanup -# -DROP TABLE tab5; -DROP TABLE tab6; -#reset back the compression_level to default. +DROP TABLE tab5, tab6; +SET GLOBAL innodb_compression_level = @save_innodb_compression_level; diff --git a/mysql-test/suite/innodb_zip/t/bug36169.test b/mysql-test/suite/innodb_zip/t/bug36169.test index 5452c929b92..07566b204bd 100644 --- a/mysql-test/suite/innodb_zip/t/bug36169.test +++ b/mysql-test/suite/innodb_zip/t/bug36169.test @@ -3,8 +3,7 @@ # http://bugs.mysql.com/36169 # --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc let $file_per_table=`select @@innodb_file_per_table`; SET GLOBAL innodb_file_per_table=ON; diff --git a/mysql-test/suite/innodb_zip/t/bug52745.test b/mysql-test/suite/innodb_zip/t/bug52745.test index a3de7323efe..be4b5c2bfcb 100644 --- a/mysql-test/suite/innodb_zip/t/bug52745.test +++ b/mysql-test/suite/innodb_zip/t/bug52745.test @@ -1,5 +1,4 @@ --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc let $file_per_table=`select @@innodb_file_per_table`; SET GLOBAL innodb_file_per_table=on; diff --git a/mysql-test/suite/innodb_zip/t/bug53591.test b/mysql-test/suite/innodb_zip/t/bug53591.test index 1943c59fe17..67223027bad 100644 --- a/mysql-test/suite/innodb_zip/t/bug53591.test +++ b/mysql-test/suite/innodb_zip/t/bug53591.test @@ -1,5 +1,4 @@ --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc let $file_per_table=`select @@innodb_file_per_table`; diff --git a/mysql-test/suite/innodb_zip/t/bug56680.test b/mysql-test/suite/innodb_zip/t/bug56680.test index 694c5ffac59..da37f6a28b4 100644 --- a/mysql-test/suite/innodb_zip/t/bug56680.test +++ b/mysql-test/suite/innodb_zip/t/bug56680.test @@ -3,8 +3,7 @@ # # Bug #56680 InnoDB may return wrong results from a case-insensitive index # --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc -- disable_query_log SET @tx_isolation_orig = @@tx_isolation; diff --git a/mysql-test/suite/innodb_zip/t/create_options.test b/mysql-test/suite/innodb_zip/t/create_options.test index dbc421184bb..f91cd7d42dd 100644 --- a/mysql-test/suite/innodb_zip/t/create_options.test +++ b/mysql-test/suite/innodb_zip/t/create_options.test @@ -56,8 +56,7 @@ # since they are rejected for InnoDB page sizes of 8k and 16k. # See innodb_16k and innodb_8k for those tests. --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc SET default_storage_engine=InnoDB; # These values can change during the test diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.test b/mysql-test/suite/innodb_zip/t/innochecksum.test index 0403251bf64..63a4b418677 100644 --- a/mysql-test/suite/innodb_zip/t/innochecksum.test +++ b/mysql-test/suite/innodb_zip/t/innochecksum.test @@ -1,8 +1,7 @@ #************************************************************ # WL6045:Improve Innochecksum #************************************************************ ---source include/have_innodb.inc ---source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc --source include/no_valgrind_without_big.inc # Embedded server does not support crashing. --source include/not_embedded.inc diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test index 7f4ad336805..330bb81ba75 100644 --- a/mysql-test/suite/innodb_zip/t/innochecksum_2.test +++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test @@ -1,8 +1,7 @@ #************************************************************ # WL6045:Improve Innochecksum #************************************************************ ---source include/have_innodb.inc ---source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc --source include/have_debug.inc --source include/no_valgrind_without_big.inc # Avoid CrashReporter popup on Mac. diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_3.test b/mysql-test/suite/innodb_zip/t/innochecksum_3.test index 7895510b335..54c67ff1a9b 100644 --- a/mysql-test/suite/innodb_zip/t/innochecksum_3.test +++ b/mysql-test/suite/innodb_zip/t/innochecksum_3.test @@ -1,8 +1,7 @@ #************************************************************ # WL6045:Improve Innochecksum #************************************************************ ---source include/have_innodb.inc ---source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc --source include/no_valgrind_without_big.inc diff --git a/mysql-test/suite/innodb_zip/t/innodb-zip.test b/mysql-test/suite/innodb_zip/t/innodb-zip.test index 7a35a3fe50b..ff73c99f41d 100644 --- a/mysql-test/suite/innodb_zip/t/innodb-zip.test +++ b/mysql-test/suite/innodb_zip/t/innodb-zip.test @@ -1,4 +1,4 @@ --- source include/have_innodb.inc +--source include/innodb_page_size_small.inc CREATE DATABASE mysqltest_innodb_zip; USE mysqltest_innodb_zip; diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test index 93051d56639..6a9b2099fa8 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size_small.inc # # Bug#36169 create innodb compressed table with too large row size crashed # http://bugs.mysql.com/36169 diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36172.test b/mysql-test/suite/innodb_zip/t/innodb_bug36172.test index 622e0c56b15..6d21d656fd8 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug36172.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug36172.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size_small.inc # # Test case for bug 36172 # diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug52745.test b/mysql-test/suite/innodb_zip/t/innodb_bug52745.test index 5882278ab9b..b7efd0692ca 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug52745.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug52745.test @@ -1,4 +1,4 @@ --- source include/have_innodb.inc +--source include/innodb_page_size_small.inc SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TABLE bug52745 ( diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test index 1366e0e43ac..0bce587f106 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test @@ -1,4 +1,4 @@ --- source include/have_innodb.inc +--source include/innodb_page_size_small.inc SET GLOBAL innodb_strict_mode=on; set old_alter_table=0; diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug56680.test b/mysql-test/suite/innodb_zip/t/innodb_bug56680.test index 4e73b1fce0f..0cb1c897f2e 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug56680.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug56680.test @@ -1,4 +1,4 @@ ---source include/have_innodb.inc +--source include/innodb_page_size_small.inc # # Bug #56680 InnoDB may return wrong results from a case-insensitive index # diff --git a/mysql-test/suite/innodb_zip/t/recover.test b/mysql-test/suite/innodb_zip/t/recover.test index 3969cd2f44e..0cfc2b672fb 100644 --- a/mysql-test/suite/innodb_zip/t/recover.test +++ b/mysql-test/suite/innodb_zip/t/recover.test @@ -2,6 +2,13 @@ --source include/have_innodb_max_16k.inc --source include/not_embedded.inc +--disable_query_log +# This test kills the server, which could corrupt some mysql.* tables +# that are not created with ENGINE=InnoDB. +# Flush any non-InnoDB tables to prevent that from happening. +FLUSH TABLES; +--enable_query_log + --echo # --echo # MDEV-12720 recovery fails with "Generic error" --echo # for ROW_FORMAT=compressed diff --git a/mysql-test/suite/innodb_zip/t/restart.test b/mysql-test/suite/innodb_zip/t/restart.test index a2dacdb4a05..05ac8274278 100644 --- a/mysql-test/suite/innodb_zip/t/restart.test +++ b/mysql-test/suite/innodb_zip/t/restart.test @@ -1,9 +1,7 @@ # # These test make sure that tables are visible after rebooting # - ---source include/have_innodb.inc ---source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc --source include/have_partition.inc --source include/not_embedded.inc SET default_storage_engine=InnoDB; diff --git a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test index 392a3ed2879..0898a32fbf5 100644 --- a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test +++ b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test @@ -10,8 +10,7 @@ # Avoid CrashReporter popup on Mac --source include/not_crashrep.inc --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); diff --git a/mysql-test/suite/innodb_zip/t/wl5522_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_zip.test index 7863c2b3ffb..d92093262ac 100644 --- a/mysql-test/suite/innodb_zip/t/wl5522_zip.test +++ b/mysql-test/suite/innodb_zip/t/wl5522_zip.test @@ -1,8 +1,7 @@ # Not supported in embedded --source include/not_embedded.inc --- source include/have_innodb.inc --- source include/have_innodb_zip.inc +-- source include/innodb_page_size_small.inc call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); diff --git a/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test index df4e66967f7..690efffed56 100644 --- a/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test +++ b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test @@ -9,31 +9,20 @@ # greater than the # the size of the table when compression level=9 #******************************************************************* ---source include/have_innodb.inc ---source include/have_innodb_zip.inc +--source include/innodb_page_size_small.inc -USE test; -DROP TABLE IF EXISTS tab5; -DROP TABLE IF EXISTS tab6; - ---echo #set the other madatory flags before test starts SET GLOBAL Innodb_file_per_table=on; -let $innodb_compression_level = `SELECT @@global.innodb_compression_level`; +SET @save_innodb_compression_level = @@global.innodb_compression_level; ---echo #set the compression level=0 (No compress) SET global innodb_compression_level=0; --- echo #check the compression level and the compressed_pages is default -SELECT @@innodb_compression_level; -SELECT @@Innodb_file_per_table; - -- echo #create table with 1K block size CREATE TABLE tab5 (col_1 CHAR (255) , col_2 VARCHAR (255), col_3 longtext, col_4 longtext,col_5 longtext, col_6 longtext , col_7 longtext , col_8 longtext ,col_9 longtext , -col_10 longtext ,col_11 int auto_increment primary key) +col_10 longtext ,col_11 int auto_increment primary key) ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; -- echo #create indexes @@ -56,7 +45,8 @@ SET @col_8 = repeat('h', 100); SET @col_9 = repeat('i', 100); SET @col_10 = repeat('j', 100); ---echo #insert 10 records +--echo #insert 10 records +BEGIN; let $i = 10; while ($i) { @@ -65,8 +55,9 @@ VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); dec $i; } +COMMIT; ---echo #set the compression level=9 (High compress) +--echo #set the compression level=9 (High compress) SET global innodb_compression_level=9; -- echo #create table with 1K block size @@ -86,19 +77,8 @@ CREATE INDEX idx4 ON tab6(col_7(10)); CREATE INDEX idx5 ON tab6(col_8(10)); CREATE INDEX idx6 ON tab6(col_11); ---echo #load the with repeat function -SET @col_1 = repeat('a', 100); -SET @col_2 = repeat('b', 100); -SET @col_3 = repeat('c', 100); -SET @col_4 = repeat('d', 100); -SET @col_5 = repeat('e', 100); -SET @col_6 = repeat('f', 100); -SET @col_7 = repeat('g', 100); -SET @col_8 = repeat('h', 100); -SET @col_9 = repeat('i', 100); -SET @col_10 = repeat('j', 100); - ---echo #insert 10 records +--echo #insert 10 records +BEGIN; let $i = 10; while ($i) { @@ -106,30 +86,23 @@ eval INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_ VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); dec $i; } +COMMIT; -- echo #diff the sizes of the No compressed table and high compressed table -SET @size=(SELECT -(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) -FROM INFORMATION_SCHEMA.TABLES +SET @size=(SELECT +(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) +FROM INFORMATION_SCHEMA.TABLES WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test') - -(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 -FROM INFORMATION_SCHEMA.TABLES +(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 +FROM INFORMATION_SCHEMA.TABLES WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test') FROM DUAL); ---echo #check the size of the table, it should not be Negative value +--echo #check the size of the table, it should not be Negative value --echo #The results of this query Test pass = 1 and fail=0 SELECT @size >= 0; +DROP TABLE tab5, tab6; ---echo # ---echo # Cleanup ---echo # -DROP TABLE tab5; -DROP TABLE tab6; - ---echo #reset back the compression_level to default. ---disable_query_log -eval SET GLOBAL innodb_compression_level=$innodb_compression_level; ---enable_query_log +SET GLOBAL innodb_compression_level = @save_innodb_compression_level; diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result index 04b042059fb..88f39827f27 100644 --- a/mysql-test/suite/maria/maria.result +++ b/mysql-test/suite/maria/maria.result @@ -1145,6 +1145,9 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref v v 13 const # Using where; Using index alter table t1 add unique(v); ERROR 23000: Duplicate entry '{ ' for key 'v_2' +show warnings; +Level Code Message +Error 1062 Duplicate entry 'a' for key 'v_2' alter table t1 add key(v); Warnings: Note 1831 Duplicate index `v_2`. This is deprecated and will be disallowed in a future release diff --git a/mysql-test/suite/mariabackup/full_backup.test b/mysql-test/suite/mariabackup/full_backup.test index a79f54c67e4..f987410cf76 100644 --- a/mysql-test/suite/mariabackup/full_backup.test +++ b/mysql-test/suite/mariabackup/full_backup.test @@ -13,7 +13,6 @@ INSERT INTO t VALUES(2); echo # xtrabackup prepare; --disable_result_log exec $XTRABACKUP --prepare --target-dir=$targetdir; -exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --stats --datadir=$targetdir; -- source include/restart_and_restore.inc --enable_result_log diff --git a/mysql-test/suite/mariabackup/include/restart_and_restore.inc b/mysql-test/suite/mariabackup/include/restart_and_restore.inc index 39616cc6f15..7ee4a660b78 100644 --- a/mysql-test/suite/mariabackup/include/restart_and_restore.inc +++ b/mysql-test/suite/mariabackup/include/restart_and_restore.inc @@ -7,7 +7,7 @@ shutdown_server; echo # remove datadir; rmdir $_datadir; echo # xtrabackup move back; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1; echo # restart server; exec echo "restart" > $_expect_file_name; enable_reconnect; diff --git a/mysql-test/suite/mariabackup/incremental_encrypted.test b/mysql-test/suite/mariabackup/incremental_encrypted.test index c379b3c8165..04f4ba5d18a 100644 --- a/mysql-test/suite/mariabackup/incremental_encrypted.test +++ b/mysql-test/suite/mariabackup/incremental_encrypted.test @@ -27,11 +27,6 @@ echo # Prepare full backup, apply incremental one; exec $XTRABACKUP --prepare --apply-log-only --target-dir=$basedir; exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir; -# stats also can support encryption, but needs plugin-load and plugin variables, they are stored in backup-my.cnf -# We need to prepare again to create log files though. -exec $XTRABACKUP --prepare --target-dir=$basedir; -exec $XTRABACKUP --defaults-file=$basedir/backup-my.cnf --stats --datadir=$basedir; - echo # Restore and check results; let $targetdir=$basedir; -- source include/restart_and_restore.inc diff --git a/mysql-test/suite/mariabackup/suite.pm b/mysql-test/suite/mariabackup/suite.pm index 8eecd4e8018..26d5c06cdad 100644 --- a/mysql-test/suite/mariabackup/suite.pm +++ b/mysql-test/suite/mariabackup/suite.pm @@ -21,11 +21,6 @@ $ENV{XBSTREAM}= ::mtr_exe_maybe_exists( "$::bindir/extra/mariabackup/$::opt_vs_config/mbstream", "$::path_client_bindir/mbstream"); -my $tar_version = `tar --version 2>&1`; -$ENV{HAVE_TAR} = $! ? 0: 1; -my $mariabackup_help=`$mariabackup_exe --help 2>&1`; -$ENV{HAVE_XTRABACKUP_TAR_SUPPORT} = (index($mariabackup_help,"'tar'") == -1) ? 0 : 1; - $ENV{INNOBACKUPEX}= "$mariabackup_exe --innobackupex"; sub skip_combinations { diff --git a/mysql-test/suite/mariabackup/tar.result b/mysql-test/suite/mariabackup/tar.result deleted file mode 100644 index bbb546d7add..00000000000 --- a/mysql-test/suite/mariabackup/tar.result +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE t(i INT) ENGINE INNODB; -INSERT INTO t VALUES(1); -# xtrabackup backup -# xtrabackup prepare -# shutdown server -# remove datadir -# xtrabackup move back -# restart server -SELECT * FROM t; -i -1 -DROP TABLE t; diff --git a/mysql-test/suite/mariabackup/tar.test b/mysql-test/suite/mariabackup/tar.test deleted file mode 100644 index 3938d597e05..00000000000 --- a/mysql-test/suite/mariabackup/tar.test +++ /dev/null @@ -1,30 +0,0 @@ -if (`select $HAVE_TAR = 0`) -{ - --skip No tar -} -if (`select $HAVE_XTRABACKUP_TAR_SUPPORT = 0`) -{ - --skip Compiled without libarchive -} - - -CREATE TABLE t(i INT) ENGINE INNODB; -INSERT INTO t VALUES(1); - -echo # xtrabackup backup; -let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; -let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.tar; -mkdir $targetdir; - - -exec $XTRABACKUP "--defaults-file=$MYSQLTEST_VARDIR/my.cnf" --backup --stream=tar > $streamfile 2>$targetdir/backup_stream.log; ---disable_result_log -exec tar -C $targetdir -x < $streamfile; -echo # xtrabackup prepare; -exec $XTRABACKUP --prepare --target-dir=$targetdir; - --- source include/restart_and_restore.inc ---enable_result_log -SELECT * FROM t; -DROP TABLE t; -rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/xb_file_key_management.test b/mysql-test/suite/mariabackup/xb_file_key_management.test index bc975a7cdc4..be0b15eb387 100644 --- a/mysql-test/suite/mariabackup/xb_file_key_management.test +++ b/mysql-test/suite/mariabackup/xb_file_key_management.test @@ -19,7 +19,6 @@ echo # xtrabackup prepare; --disable_result_log exec $XTRABACKUP --prepare --target-dir=$targetdir; -exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --stats --datadir=$targetdir ; -- source include/restart_and_restore.inc --enable_result_log diff --git a/mysql-test/suite/mariabackup/xbstream.test b/mysql-test/suite/mariabackup/xbstream.test index 06e5685276c..f2b4704a87e 100644 --- a/mysql-test/suite/mariabackup/xbstream.test +++ b/mysql-test/suite/mariabackup/xbstream.test @@ -9,7 +9,7 @@ echo # xtrabackup backup to stream; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; echo # xbstream extract; --disable_result_log -exec $XBSTREAM -x -C $targetdir --parallel=16 < $streamfile; +exec $XBSTREAM -x -C $targetdir < $streamfile; echo # xtrabackup prepare; exec $XTRABACKUP --prepare --target-dir=$targetdir; diff --git a/mysql-test/suite/multi_source/mdev-9544.cnf b/mysql-test/suite/multi_source/mdev-9544.cnf new file mode 100644 index 00000000000..b43aca5370d --- /dev/null +++ b/mysql-test/suite/multi_source/mdev-9544.cnf @@ -0,0 +1,22 @@ +!include my.cnf + +[mysqld.1] +log-bin +log-slave-updates + +[mysqld.2] +log-bin +log-slave-updates + +[mysqld.3] +log-bin +log-slave-updates + +[mysqld.4] +server-id=4 +log-bin=server4-bin +log-slave-updates + +[ENV] +SERVER_MYPORT_4= @mysqld.4.port +SERVER_MYSOCK_4= @mysqld.4.socket diff --git a/mysql-test/suite/multi_source/mdev-9544.result b/mysql-test/suite/multi_source/mdev-9544.result new file mode 100644 index 00000000000..42e6b6a9f56 --- /dev/null +++ b/mysql-test/suite/multi_source/mdev-9544.result @@ -0,0 +1,90 @@ +connect server_1,127.0.0.1,root,,,$SERVER_MYPORT_1; +connect server_2,127.0.0.1,root,,,$SERVER_MYPORT_2; +connect server_3,127.0.0.1,root,,,$SERVER_MYPORT_3; +connect server_4,127.0.0.1,root,,,$SERVER_MYPORT_4; +connection server_1; +create database a; +use a; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +connection server_2; +create database b; +use b; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +connection server_3; +create database c; +use c; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +connection server_4; +change master 'm1' to master_port=MYPORT_1 , master_host='127.0.0.1', master_user='root'; +change master 'm2' to master_port=MYPORT_2 , master_host='127.0.0.1', master_user='root'; +change master to master_port=MYPORT_3 , master_host='127.0.0.1', master_user='root'; +start all slaves; +set default_master_connection = 'm1'; +include/wait_for_slave_to_start.inc +set default_master_connection = 'm2'; +include/wait_for_slave_to_start.inc +set default_master_connection = ''; +include/wait_for_slave_to_start.inc +use a; +show tables; +Tables_in_a +t1 +t2 +use b; +show tables; +Tables_in_b +t1 +t2 +use c; +show tables; +Tables_in_c +t1 +t2 +#TEST +SET default_master_connection = "m1"; +SET default_master_connection = "m2"; +SET default_master_connection = ""; +flush logs; +SET default_master_connection = "m1"; +#log Rotated +mysqld-relay-bin.000002 +mysqld-relay-bin.000003 +mysqld-relay-bin.index +SET default_master_connection = "m2"; +#log Rotated +mysqld-relay-bin-m1.000002 +mysqld-relay-bin-m1.000003 +mysqld-relay-bin-m1.index +SET default_master_connection = ""; +#log Rotated +mysqld-relay-bin-m2.000002 +mysqld-relay-bin-m2.000003 +mysqld-relay-bin-m2.index +#CleanUp +connection server_1; +drop database a; +connection server_2; +drop database b; +connection server_3; +drop database c; +connection server_4; +stop all slaves; +Warnings: +Note 1938 SLAVE 'm2' stopped +Note 1938 SLAVE '' stopped +Note 1938 SLAVE 'm1' stopped +SET default_master_connection = "m1"; +include/wait_for_slave_to_stop.inc +SET default_master_connection = "m2"; +include/wait_for_slave_to_stop.inc +SET default_master_connection = ""; +include/wait_for_slave_to_stop.inc diff --git a/mysql-test/suite/multi_source/mdev-9544.test b/mysql-test/suite/multi_source/mdev-9544.test new file mode 100644 index 00000000000..f532a63a585 --- /dev/null +++ b/mysql-test/suite/multi_source/mdev-9544.test @@ -0,0 +1,116 @@ +--source include/not_embedded.inc +--source include/have_innodb.inc +--source include/have_debug.inc + +--connect (server_1,127.0.0.1,root,,,$SERVER_MYPORT_1) +--connect (server_2,127.0.0.1,root,,,$SERVER_MYPORT_2) +--connect (server_3,127.0.0.1,root,,,$SERVER_MYPORT_3) +--connect (server_4,127.0.0.1,root,,,$SERVER_MYPORT_4) + +--connection server_1 +create database a; +use a; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +--save_master_pos + +--connection server_2 +create database b; +use b; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +--save_master_pos + +--connection server_3 +create database c; +use c; +create table t1(a int); +insert into t1 values(1); +create table t2(a int); +insert into t2 values(1); +--save_master_pos + +--connection server_4 +--disable_warnings +--replace_result $SERVER_MYPORT_1 MYPORT_1 +eval change master 'm1' to master_port=$SERVER_MYPORT_1 , master_host='127.0.0.1', master_user='root'; +--replace_result $SERVER_MYPORT_2 MYPORT_2 +eval change master 'm2' to master_port=$SERVER_MYPORT_2 , master_host='127.0.0.1', master_user='root'; +--replace_result $SERVER_MYPORT_3 MYPORT_3 +eval change master to master_port=$SERVER_MYPORT_3 , master_host='127.0.0.1', master_user='root'; +start all slaves; +set default_master_connection = 'm1'; +--source include/wait_for_slave_to_start.inc +set default_master_connection = 'm2'; +--source include/wait_for_slave_to_start.inc +set default_master_connection = ''; +--source include/wait_for_slave_to_start.inc + +--enable_warnings +--sync_with_master 0,'m1' +--sync_with_master 0,'m2' +--sync_with_master 0,'' +use a; +show tables; +use b; +show tables; +use c; +show tables; +--echo #TEST +SET default_master_connection = "m1"; +--let $old_m1 = query_get_value("show relaylog events;", "Log_name",1) +SET default_master_connection = "m2"; +--let $old_m2 = query_get_value("show relaylog events;", "Log_name",1) +SET default_master_connection = ""; +--let $old__ = query_get_value("show relaylog events;", "Log_name",1) +flush logs; +--sleep 2 +--let $MYSQLD_DATADIR = `select @@datadir` +SET default_master_connection = "m1"; +--let $new_m1 = query_get_value("show relaylog events;", "Log_name",1) +--if ($new_m1 != $old_m1) { + --echo #log Rotated + --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin\." +} +SET default_master_connection = "m2"; +--let $new_m2 = query_get_value("show relaylog events;", "Log_name",1) +--if ($new_m2 != $old_m2) { + --echo #log Rotated + --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin-m1" +} +SET default_master_connection = ""; +--let $new__ = query_get_value("show relaylog events;", "Log_name",1) +--if ($new__ != $old__) { + --echo #log Rotated + --exec ls $MYSQLD_DATADIR | grep "mysqld-relay-bin-m2" +} + +--echo #CleanUp +--connection server_1 +drop database a; +--save_master_pos + +--connection server_2 +drop database b; +--save_master_pos + +--connection server_3 +drop database c; +--save_master_pos + +--connection server_4 +--sync_with_master 0,'m1' +--sync_with_master 0,'m2' +--sync_with_master 0,'' +stop all slaves; +SET default_master_connection = "m1"; +--source include/wait_for_slave_to_stop.inc +SET default_master_connection = "m2"; +--source include/wait_for_slave_to_stop.inc +SET default_master_connection = ""; +--source include/wait_for_slave_to_stop.inc + diff --git a/mysql-test/suite/perfschema/r/start_server_1_digest.result b/mysql-test/suite/perfschema/r/start_server_1_digest.result new file mode 100644 index 00000000000..cf07022d344 --- /dev/null +++ b/mysql-test/suite/perfschema/r/start_server_1_digest.result @@ -0,0 +1,7 @@ +SELECT "Digest table has a size 1 and is full already." as use_case; +use_case +Digest table has a size 1 and is full already. +select SCHEMA_NAME, DIGEST, DIGEST_TEXT +from performance_schema.events_statements_summary_by_digest; +SCHEMA_NAME DIGEST DIGEST_TEXT +NULL NULL NULL diff --git a/mysql-test/suite/perfschema/r/view_table_io.result b/mysql-test/suite/perfschema/r/view_table_io.result index 5d8ad26ae77..2f9a70cdeaa 100644 --- a/mysql-test/suite/perfschema/r/view_table_io.result +++ b/mysql-test/suite/perfschema/r/view_table_io.result @@ -51,7 +51,7 @@ bar 2 insert into marker set a = 1; optimize table test.v1; Table Op Msg_type Msg_text -test.v1 optimize Error 'test.v1' is not BASE TABLE +test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE' test.v1 optimize status Operation failed insert into marker set a = 1; select * from test.v1; diff --git a/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt b/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt new file mode 100644 index 00000000000..c3a6012fbac --- /dev/null +++ b/mysql-test/suite/perfschema/t/start_server_1_digest-master.opt @@ -0,0 +1 @@ +--loose-performance-schema-digests-size=1 diff --git a/mysql-test/suite/perfschema/t/start_server_1_digest.test b/mysql-test/suite/perfschema/t/start_server_1_digest.test new file mode 100644 index 00000000000..998d9a5eebe --- /dev/null +++ b/mysql-test/suite/perfschema/t/start_server_1_digest.test @@ -0,0 +1,15 @@ +# ----------------------------------------------------------------------- +# Tests for the performance schema statement Digests. +# ----------------------------------------------------------------------- + +--source include/not_embedded.inc +--source include/have_perfschema.inc +--source include/no_protocol.inc + +SELECT "Digest table has a size 1 and is full already." as use_case; + +select SCHEMA_NAME, DIGEST, DIGEST_TEXT + from performance_schema.events_statements_summary_by_digest; + + + diff --git a/mysql-test/suite/plugins/r/show_all_plugins.result b/mysql-test/suite/plugins/r/show_all_plugins.result index 4471011b660..dd6cbfce4c4 100644 --- a/mysql-test/suite/plugins/r/show_all_plugins.result +++ b/mysql-test/suite/plugins/r/show_all_plugins.result @@ -26,7 +26,7 @@ three_attempts NOT INSTALLED AUTHENTICATION dialog_examples.so GPL two_questions NOT INSTALLED AUTHENTICATION dialog_examples.so GPL show status like '%libraries%'; Variable_name Value -Opened_plugin_libraries 8 +Opened_plugin_libraries 7 show plugins soname where library = 'ha_example.so'; Name Status Type Library License EXAMPLE NOT INSTALLED STORAGE ENGINE ha_example.so GPL diff --git a/mysql-test/suite/rpl/r/rpl_mdev-11092.result b/mysql-test/suite/rpl/r/rpl_mdev-11092.result new file mode 100644 index 00000000000..90b809477b2 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_mdev-11092.result @@ -0,0 +1,21 @@ +include/master-slave.inc +[connection master] +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +call mtr.add_suppression("Slave SQL: The incident LOST_EVENTS occured on the master. .*"); +SET GLOBAL max_binlog_cache_size = 4096; +SET GLOBAL binlog_cache_size = 4096; +SET GLOBAL max_binlog_stmt_cache_size = 4096; +SET GLOBAL binlog_stmt_cache_size = 4096; +disconnect master; +connect master,127.0.0.1,root,,test,$MASTER_MYPORT,; +CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=MYISAM; +connection master; +ERROR HY000: Writing one row to the row-based binary log failed +include/wait_for_slave_sql_error_and_skip.inc [errno=1590] +connection master; +SET GLOBAL max_binlog_cache_size= ORIGINAL_VALUE; +SET GLOBAL binlog_cache_size= ORIGINAL_VALUE; +SET GLOBAL max_binlog_stmt_cache_size= ORIGINAL_VALUE; +SET GLOBAL binlog_stmt_cache_size= ORIGINAL_VALUE; +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/sequence.result b/mysql-test/suite/rpl/r/sequence.result index 9e0c8b7bc5b..6286c5c7106 100644 --- a/mysql-test/suite/rpl/r/sequence.result +++ b/mysql-test/suite/rpl/r/sequence.result @@ -38,7 +38,7 @@ insert into t1 select * from s1; insert into s1 values(-100,-1000,9223372036854775806,1,1,1000,0,0); insert into t1 select * from s1; select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 @@ -47,7 +47,7 @@ next_value min_value max_value start increment cache cycle round -100 -1000 9223372036854775806 1 1 1000 0 0 connection slave; select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 @@ -56,7 +56,7 @@ next_value min_value max_value start increment cache cycle round -100 -1000 9223372036854775806 1 1 1000 0 0 connection slave2; select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 21 1 9223372036854775806 1 1 10 0 0 @@ -69,14 +69,14 @@ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s1 cache=10 master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) PAGE_CHECKSUM=1 master-bin.000001 # Annotate_rows # # create table t1 select * from s1 master-bin.000001 # Table_map # # table_id: # (test.t1) diff --git a/mysql-test/suite/rpl/t/rpl_mdev-11092.opt b/mysql-test/suite/rpl/t/rpl_mdev-11092.opt new file mode 100644 index 00000000000..7f1d270d29f --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev-11092.opt @@ -0,0 +1 @@ +--binlog_checksum=1 --binlog-annotate-row-events=1 diff --git a/mysql-test/suite/rpl/t/rpl_mdev-11092.test b/mysql-test/suite/rpl/t/rpl_mdev-11092.test new file mode 100644 index 00000000000..c8b2b7f2ad1 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev-11092.test @@ -0,0 +1,53 @@ +--source include/have_innodb.inc +--source include/master-slave.inc +--source include/not_embedded.inc +--source include/not_windows.inc +--source include/have_binlog_format_row.inc + +######################################################################################## +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +call mtr.add_suppression("Slave SQL: The incident LOST_EVENTS occured on the master. .*"); + +let $old_max_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_cache_size", Value, 1); +let $old_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "binlog_cache_size", Value, 1); +let $old_max_binlog_stmt_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_stmt_cache_size", Value, 1); +let $old_binlog_stmt_cache_size= query_get_value(SHOW VARIABLES LIKE "binlog_stmt_cache_size", Value, 1); + +SET GLOBAL max_binlog_cache_size = 4096; +SET GLOBAL binlog_cache_size = 4096; +SET GLOBAL max_binlog_stmt_cache_size = 4096; +SET GLOBAL binlog_stmt_cache_size = 4096; +disconnect master; +connect (master,127.0.0.1,root,,test,$MASTER_MYPORT,); + +CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=MYISAM; + +let $data = `select concat('"', repeat('a',2000), '"')`; + +connection master; + +--disable_query_log +--error ER_BINLOG_ROW_LOGGING_FAILED +eval INSERT INTO t1 (a, data) VALUES (2, + CONCAT($data, $data, $data, $data, $data, $data)); +--enable_query_log + +# Incident event +# 1590=ER_SLAVE_INCIDENT +--let $slave_sql_errno= 1590 +--source include/wait_for_slave_sql_error_and_skip.inc + +connection master; + +--replace_result $old_max_binlog_cache_size ORIGINAL_VALUE +--eval SET GLOBAL max_binlog_cache_size= $old_max_binlog_cache_size +--replace_result $old_binlog_cache_size ORIGINAL_VALUE +--eval SET GLOBAL binlog_cache_size= $old_binlog_cache_size +--replace_result $old_max_binlog_stmt_cache_size ORIGINAL_VALUE +--eval SET GLOBAL max_binlog_stmt_cache_size= $old_max_binlog_stmt_cache_size +--replace_result $old_binlog_stmt_cache_size ORIGINAL_VALUE +--eval SET GLOBAL binlog_stmt_cache_size= $old_binlog_stmt_cache_size + +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/sql_sequence/alter.result b/mysql-test/suite/sql_sequence/alter.result index bb57a704c11..76ebdeda0f7 100644 --- a/mysql-test/suite/sql_sequence/alter.result +++ b/mysql-test/suite/sql_sequence/alter.result @@ -6,7 +6,7 @@ Note 1051 Unknown table 'test.t1' # CREATE SEQUENCE t1 nocache engine=myisam; select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 0 0 0 select next value for t1; next value for t1 @@ -16,7 +16,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 50 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 2 1 9223372036854775806 50 1 0 0 0 select next value for t1; next value for t1 @@ -26,7 +26,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 50 minvalue -100 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 -100 9223372036854775806 50 1 0 0 0 alter sequence t1 minvalue=100 start=100; ERROR HY000: Sequence 'test.t1' values are conflicting @@ -35,14 +35,14 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 100 minvalue 100 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 100 100 9223372036854775806 100 1 0 0 0 alter sequence t1 maxvalue=500; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 100 minvalue 100 maxvalue 500 increment by 1 nocache nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 100 100 500 100 1 0 0 0 drop sequence t1; CREATE SEQUENCE t1 engine=myisam; @@ -64,7 +64,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 0 0 0 select next value for t1; next value for t1 @@ -75,8 +75,8 @@ next value for t1 select next value for t1; next value for t1 3 -select next_value, round from t1; -next_value round +select next_not_cached_value, cycle_count from t1; +next_not_cached_value cycle_count 4 0 drop sequence t1; CREATE SEQUENCE t1 maxvalue=100 engine=myisam; @@ -85,7 +85,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 alter sequence t1 cycle; show create sequence t1; @@ -97,7 +97,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 15 minvalue 10 maxvalue 20 increment by 1 cache 1000 cycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 15 10 20 15 1 1000 1 0 select NEXT VALUE for t1 from seq_1_to_10; NEXT VALUE for t1 @@ -131,7 +131,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 50 minvalue -100 maxvalue 100 increment by -2 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 -100 100 50 -2 1000 0 0 select NEXT VALUE for t1 from seq_1_to_10; NEXT VALUE for t1 @@ -151,7 +151,7 @@ drop sequence t1; CREATE SEQUENCE t1 cache 10 engine=innodb; select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 10 0 0 select next value for t1; next value for t1 @@ -161,7 +161,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 100 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 10 nocycle ENGINE=InnoDB select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 9223372036854775806 100 1 10 0 0 select next value for t1; next value for t1 @@ -195,7 +195,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB COMMENT='foo' select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3001 1 9223372036854775806 1 1 1000 0 0 drop sequence t1; CREATE SEQUENCE t1 engine=myisam; @@ -227,7 +227,7 @@ next value for t1 select next value for t1; ERROR HY000: Sequence 'test.t1' has run out select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 0 1 100 50 -2 1000 0 0 alter sequence t1 restart; select next value for t1; diff --git a/mysql-test/suite/sql_sequence/alter.test b/mysql-test/suite/sql_sequence/alter.test index 584bb6f19aa..fd1809ccd2f 100644 --- a/mysql-test/suite/sql_sequence/alter.test +++ b/mysql-test/suite/sql_sequence/alter.test @@ -43,7 +43,7 @@ select * from t1; select next value for t1; select next value for t1; select next value for t1; -select next_value, round from t1; +select next_not_cached_value, cycle_count from t1; drop sequence t1; CREATE SEQUENCE t1 maxvalue=100 engine=myisam; diff --git a/mysql-test/suite/sql_sequence/aria.result b/mysql-test/suite/sql_sequence/aria.result index 8bf45563a6f..b39d85d58ca 100644 --- a/mysql-test/suite/sql_sequence/aria.result +++ b/mysql-test/suite/sql_sequence/aria.result @@ -26,7 +26,7 @@ NEXT VALUE for t1 seq 19 19 20 20 select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 21 1 9223372036854775806 1 1 10 0 0 drop sequence t1; create sequence s1; diff --git a/mysql-test/suite/sql_sequence/binlog.result b/mysql-test/suite/sql_sequence/binlog.result index b4f772a204b..f01b3234e96 100644 --- a/mysql-test/suite/sql_sequence/binlog.result +++ b/mysql-test/suite/sql_sequence/binlog.result @@ -1,15 +1,15 @@ create or replace sequence s1 cache 3; -select next value for s1, min_value from s1 where max_value> 1; -next value for s1 min_value +select next value for s1, minimum_value from s1 where maximum_value> 1; +next value for s1 minimum_value 1 1 -select next value for s1, min_value from s1 where max_value> 2; -next value for s1 min_value +select next value for s1, minimum_value from s1 where maximum_value> 2; +next value for s1 minimum_value 2 1 -select next value for s1, min_value from s1 where max_value> 3; -next value for s1 min_value +select next value for s1, minimum_value from s1 where maximum_value> 3; +next value for s1 minimum_value 3 1 -select next value for s1, min_value from s1 where max_value> 4; -next value for s1 min_value +select next value for s1, minimum_value from s1 where maximum_value> 4; +next value for s1 minimum_value 4 1 alter sequence s1 maxvalue 1000; drop sequence s1; @@ -18,12 +18,12 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; create or replace sequence s1 cache 3 master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # select next value for s1, min_value from s1 where max_value> 1 +master-bin.000001 # Annotate_rows # # select next value for s1, minimum_value from s1 where maximum_value> 1 master-bin.000001 # Table_map # # table_id: # (test.s1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # select next value for s1, min_value from s1 where max_value> 4 +master-bin.000001 # Annotate_rows # # select next value for s1, minimum_value from s1 where maximum_value> 4 master-bin.000001 # Table_map # # table_id: # (test.s1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Query # # COMMIT diff --git a/mysql-test/suite/sql_sequence/binlog.test b/mysql-test/suite/sql_sequence/binlog.test index 56d835f05ca..5f2d52d7864 100644 --- a/mysql-test/suite/sql_sequence/binlog.test +++ b/mysql-test/suite/sql_sequence/binlog.test @@ -11,10 +11,10 @@ reset master; # get rid of previous tests binlog --enable_query_log create or replace sequence s1 cache 3; -select next value for s1, min_value from s1 where max_value> 1; -select next value for s1, min_value from s1 where max_value> 2; -select next value for s1, min_value from s1 where max_value> 3; -select next value for s1, min_value from s1 where max_value> 4; +select next value for s1, minimum_value from s1 where maximum_value> 1; +select next value for s1, minimum_value from s1 where maximum_value> 2; +select next value for s1, minimum_value from s1 where maximum_value> 3; +select next value for s1, minimum_value from s1 where maximum_value> 4; # # Alter sequence diff --git a/mysql-test/suite/sql_sequence/create.result b/mysql-test/suite/sql_sequence/create.result index 4962752c7d9..d7492f9a22b 100644 --- a/mysql-test/suite/sql_sequence/create.result +++ b/mysql-test/suite/sql_sequence/create.result @@ -8,17 +8,17 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 create or replace sequence t1 engine=innodb; show create sequence t1; @@ -27,17 +27,17 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=InnoDB SEQUENCE=1 select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 create or replace sequence t1 engine=maria; show create sequence t1; @@ -46,17 +46,17 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=Aria SEQUENCE=1 select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 create or replace sequence t1 engine=archive; ERROR HY000: Table storage engine 'ARCHIVE' does not support the create option 'SEQUENCE' @@ -67,42 +67,42 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 10 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 10 1 9223372036854775806 10 1 1000 0 0 create or replace sequence t1 minvalue=11; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 11 minvalue 11 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 11 9223372036854775806 11 1 1000 0 0 create or replace sequence t1 maxvalue=13 increment by -1; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 13 minvalue -9223372036854775807 maxvalue 13 increment by -1 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 13 -9223372036854775807 13 13 -1 1000 0 0 create or replace sequence t1 increment by -1 cache 100; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with -1 minvalue -9223372036854775807 maxvalue -1 increment by -1 cache 100 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count -1 -9223372036854775807 -1 -1 -1 100 0 0 create or replace sequence t1 cycle; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 cycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 1 0 create or replace sequence t1 nocycle; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 show create sequence t1; Table Create Table @@ -112,7 +112,7 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 14 minvalue 14 maxvalue 9223372036854775806 increment by 1 cache 1000 cycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 14 14 9223372036854775806 14 1 1000 1 0 create or replace sequence t1 cycle increment by -1; show create sequence t1; @@ -124,7 +124,7 @@ create sequence if not exists t1 start with 10; Warnings: Note 1050 Table 't1' already exists select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 show create sequence t1; Table Create Table @@ -134,14 +134,14 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 10 minvalue 10 maxvalue 11 increment by 1 nocache cycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 10 10 11 10 1 0 1 0 create or replace sequence t1 start with 10 minvalue=-10 maxvalue=11 cache=10 cycle increment by 10; show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 10 minvalue -10 maxvalue 11 increment by 10 cache 10 cycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 10 -10 11 10 10 10 1 0 create or replace sequence t1 start with 10 NO MAXVALUE NO MINVALUE; create or replace sequence t1 start with 10 maxvalue 10; @@ -155,12 +155,12 @@ show create sequence t1; Table Create Table t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 0 cache 1000 nocycle ENGINE=MyISAM select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 0 1000 0 0 drop sequence t1; create table t1 (a int); show create sequence t1; -ERROR HY000: 'test.t1' is not SEQUENCE +ERROR 42S02: 'test.t1' is not a SEQUENCE drop sequence t1; ERROR 42S02: 'test.t1' is not a SEQUENCE drop sequence if exists t1; @@ -193,46 +193,46 @@ drop sequence t1; create sequence t1; show fields from t1; Field Type Null Key Default Extra -next_value bigint(21) NO NULL -min_value bigint(21) NO NULL -max_value bigint(21) NO NULL -start bigint(21) NO NULL +next_not_cached_value bigint(21) NO NULL +minimum_value bigint(21) NO NULL +maximum_value bigint(21) NO NULL +start_value bigint(21) NO NULL increment bigint(21) NO NULL -cache bigint(21) NO NULL -cycle tinyint(1) unsigned NO NULL -round bigint(21) NO NULL +cache_size bigint(21) unsigned NO NULL +cycle_option tinyint(1) unsigned NO NULL +cycle_count bigint(21) NO NULL flush tables; show fields from t1; Field Type Null Key Default Extra -next_value bigint(21) NO NULL -min_value bigint(21) NO NULL -max_value bigint(21) NO NULL -start bigint(21) NO NULL +next_not_cached_value bigint(21) NO NULL +minimum_value bigint(21) NO NULL +maximum_value bigint(21) NO NULL +start_value bigint(21) NO NULL increment bigint(21) NO NULL -cache bigint(21) NO NULL -cycle tinyint(1) unsigned NO NULL -round bigint(21) NO NULL +cache_size bigint(21) unsigned NO NULL +cycle_option tinyint(1) unsigned NO NULL +cycle_count bigint(21) NO NULL create or replace sequence t1 engine=aria; show fields from t1; Field Type Null Key Default Extra -next_value bigint(21) NO NULL -min_value bigint(21) NO NULL -max_value bigint(21) NO NULL -start bigint(21) NO NULL +next_not_cached_value bigint(21) NO NULL +minimum_value bigint(21) NO NULL +maximum_value bigint(21) NO NULL +start_value bigint(21) NO NULL increment bigint(21) NO NULL -cache bigint(21) NO NULL -cycle tinyint(1) unsigned NO NULL -round bigint(21) NO NULL +cache_size bigint(21) unsigned NO NULL +cycle_option tinyint(1) unsigned NO NULL +cycle_count bigint(21) NO NULL show fields from t1; Field Type Null Key Default Extra -next_value bigint(21) NO NULL -min_value bigint(21) NO NULL -max_value bigint(21) NO NULL -start bigint(21) NO NULL +next_not_cached_value bigint(21) NO NULL +minimum_value bigint(21) NO NULL +maximum_value bigint(21) NO NULL +start_value bigint(21) NO NULL increment bigint(21) NO NULL -cache bigint(21) NO NULL -cycle tinyint(1) unsigned NO NULL -round bigint(21) NO NULL +cache_size bigint(21) unsigned NO NULL +cycle_option tinyint(1) unsigned NO NULL +cycle_count bigint(21) NO NULL flush tables; create or replace sequence t1 comment= "test 1"; show create sequence t1; @@ -241,14 +241,14 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 COMMENT='test 1' create or replace sequence t1 comment= "test 2" min_rows=1 max_rows=2; show create sequence t1; @@ -257,27 +257,28 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM MIN_ROWS=1 MAX_ROWS=2 SEQUENCE=1 COMMENT='test 2' create or replace sequence t1 start=1 increment= 2; create or replace sequence t1 start 1 increment 2; +create or replace sequence t1 cache +1; drop sequence t1; CREATE TABLE t1 ( -`next_value` bigint(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) sequence=1; show create sequence t1; Table Create Table @@ -285,93 +286,93 @@ t1 CREATE SEQUENCE `t1` start with 1 minvalue 1 maxvalue 9223372036854775806 inc show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) ENGINE=MyISAM SEQUENCE=1 drop sequence t1; CREATE OR REPLACE TABLE t1 ( -`next_val` bigint(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count_not_exists` bigint(21) NOT NULL ) sequence=1; -ERROR HY000: Sequence 'test.t1' table structure is invalid (next_val) +ERROR HY000: Sequence 'test.t1' table structure is invalid (cycle_count_not_exists) CREATE OR REPLACE TABLE t1 ( -`next_value` int(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` int(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) sequence=1; -ERROR HY000: Sequence 'test.t1' table structure is invalid (next_value) +ERROR HY000: Sequence 'test.t1' table structure is invalid (next_not_cached_value) CREATE OR REPLACE TABLE t1 ( -`next_val` bigint(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, +`cache_size` bigint(21) unsigned NOT NULL, `cycle` bigint(21) unsigned NOT NULL, /* error */ -`round` bigint(21) NOT NULL +`cycle_count` bigint(21) NOT NULL ) sequence=1; -ERROR HY000: Sequence 'test.t1' table structure is invalid (next_val) +ERROR HY000: Sequence 'test.t1' table structure is invalid (cycle) CREATE OR REPLACE TABLE t1 ( -`next_value` bigint(21), -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21), /* error */ +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) sequence=1; -ERROR HY000: Sequence 'test.t1' table structure is invalid (next_value) +ERROR HY000: Sequence 'test.t1' table structure is invalid (next_not_cached_value) CREATE OR REPLACE TABLE t1 ( -`next_value` bigint(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL, +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL, extra_field bigint(21) ) sequence=1; ERROR HY000: Sequence 'test.t1' table structure is invalid (Wrong number of columns) CREATE OR REPLACE TABLE t1 ( -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`next_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) sequence=1; -ERROR HY000: Sequence 'test.t1' table structure is invalid (min_value) +ERROR HY000: Sequence 'test.t1' table structure is invalid (minimum_value) CREATE OR REPLACE TABLE t1 ( -`next_value` bigint(21) NOT NULL, -`min_value` bigint(21) NOT NULL, -`max_value` bigint(21) NOT NULL, -`start` bigint(21) NOT NULL, +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, -`cache` bigint(21) NOT NULL, -`cycle` tinyint(1) unsigned NOT NULL, -`round` bigint(21) NOT NULL, -key key1 (next_value) +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL, +key key1 (next_not_cached_value) ) sequence=1; ERROR HY000: Sequence 'test.t1' table structure is invalid (Sequence tables cannot have any keys) drop sequence if exists t1; diff --git a/mysql-test/suite/sql_sequence/create.test b/mysql-test/suite/sql_sequence/create.test index cf094c2cedd..23c32939efc 100644 --- a/mysql-test/suite/sql_sequence/create.test +++ b/mysql-test/suite/sql_sequence/create.test @@ -92,7 +92,7 @@ drop sequence t1; # create table t1 (a int); ---error ER_WRONG_OBJECT +--error ER_NOT_SEQUENCE show create sequence t1; --error ER_NOT_SEQUENCE2 drop sequence t1; @@ -149,6 +149,7 @@ show create sequence t1; show create table t1; create or replace sequence t1 start=1 increment= 2; create or replace sequence t1 start 1 increment 2; +create or replace sequence t1 cache +1; drop sequence t1; # @@ -156,14 +157,14 @@ drop sequence t1; # CREATE TABLE t1 ( - `next_value` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) sequence=1; show create sequence t1; @@ -174,40 +175,40 @@ drop sequence t1; --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_val` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count_not_exists` bigint(21) NOT NULL ) sequence=1; # Wrong type --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_value` int(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` int(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) sequence=1; --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_val` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, + `cache_size` bigint(21) unsigned NOT NULL, `cycle` bigint(21) unsigned NOT NULL, /* error */ - `round` bigint(21) NOT NULL + `cycle_count` bigint(21) NOT NULL ) sequence=1; @@ -215,28 +216,28 @@ CREATE OR REPLACE TABLE t1 ( --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_value` bigint(21), - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21), /* error */ + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) sequence=1; # Extra field --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_value` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL, + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL, extra_field bigint(21) ) sequence=1; @@ -244,29 +245,29 @@ CREATE OR REPLACE TABLE t1 ( --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `next_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) sequence=1; # key --error ER_SEQUENCE_INVALID_TABLE_STRUCTURE CREATE OR REPLACE TABLE t1 ( - `next_value` bigint(21) NOT NULL, - `min_value` bigint(21) NOT NULL, - `max_value` bigint(21) NOT NULL, - `start` bigint(21) NOT NULL, + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, `increment` bigint(21) NOT NULL, - `cache` bigint(21) NOT NULL, - `cycle` tinyint(1) unsigned NOT NULL, - `round` bigint(21) NOT NULL, - key key1 (next_value) + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL, + key key1 (next_not_cached_value) ) sequence=1; drop sequence if exists t1; diff --git a/mysql-test/suite/sql_sequence/gtid.result b/mysql-test/suite/sql_sequence/gtid.result index ce8e8b7bb80..4c0a4e0e8fc 100644 --- a/mysql-test/suite/sql_sequence/gtid.result +++ b/mysql-test/suite/sql_sequence/gtid.result @@ -24,28 +24,28 @@ create sequence s1; show create table s1; Table Create Table s1 CREATE TABLE `s1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 connection slave; use s_db; show create table s1; Table Create Table s1 CREATE TABLE `s1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 connection master; use s_db; @@ -161,13 +161,13 @@ priv test connection m_normal_1; create sequence s_db.s1; select * from s_db.s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 select next value for s_db.s1; next value for s_db.s1 1 select * from s_db.s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1001 1 9223372036854775806 1 1 1000 0 0 create sequence s_db.s2; drop sequence s_db.s2; @@ -250,20 +250,20 @@ connection slave; connection s_normal_3; use s_db; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 20 1 1 5 1 0 connection m_normal_1; select next value for s_t; next value for s_t 1 select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 6 1 20 1 1 5 1 0 connection master; connection slave; connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 6 1 20 1 1 5 1 0 ------------------------------------------ master update nextval; @@ -272,12 +272,12 @@ connection m_normal_1; select next value for s_t; next value for s_t 2 -update s_t set next_value= 11; +update s_t set next_not_cached_value= 11; ERROR HY000: Storage engine SEQUENCE of the table `s_db`.`s_t` doesn't have this option alter sequence s_t restart=11; commit; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 20 1 1 5 1 0 connection master; connection slave; @@ -286,29 +286,29 @@ show slave nextval; ------------------------------------------ connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 20 1 1 5 1 0 connection m_normal_1; select next value for s_t; next value for s_t 11 select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 connection master; connection slave; connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 ------------------------------------------ update into invalid sequence ------------------------------------------ connection m_normal_1; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 -update s_t set next_value= 11,start=10, min_value=11; +update s_t set next_not_cached_value= 11,start_value=10, minimum_value=11; ERROR HY000: Storage engine SEQUENCE of the table `s_db`.`s_t` doesn't have this option ALTER SEQUENCE s_t restart with 11 start=10 minvalue=11; ERROR HY000: Sequence 's_db.s_t' values are conflicting @@ -549,14 +549,14 @@ next value for s1 4 commit; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; connection slave; use s_db; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 ------------------------------------------ close session binlog. @@ -577,14 +577,14 @@ next value for s1 8 set session sql_log_bin=on; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 9 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; connection slave; use s_db; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 connection m_normal_1; use s_db; @@ -609,13 +609,13 @@ select next value for s1; next value for s1 1 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; use s_db; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 1 9223372036854775806 1 1 2 0 0 connection m_normal_1; use s_db; @@ -788,7 +788,7 @@ select * from t1; a 1 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1001 1 9223372036854775806 1 1 1000 0 0 select previous value for s1; previous value for s1 @@ -810,7 +810,7 @@ a (next value for s1) 2 3 do setval(s1,10000,0); select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 10000 1 9223372036854775806 1 1 1000 0 0 connection m_normal_2; drop table t1; diff --git a/mysql-test/suite/sql_sequence/gtid.test b/mysql-test/suite/sql_sequence/gtid.test index 98760119b53..a354062a05c 100644 --- a/mysql-test/suite/sql_sequence/gtid.test +++ b/mysql-test/suite/sql_sequence/gtid.test @@ -263,7 +263,7 @@ select * from s_t; connection m_normal_1; select next value for s_t; --error ER_ILLEGAL_HA -update s_t set next_value= 11; +update s_t set next_not_cached_value= 11; alter sequence s_t restart=11; commit; @@ -295,7 +295,7 @@ select * from s_t; connection m_normal_1; select * from s_t; --error ER_ILLEGAL_HA -update s_t set next_value= 11,start=10, min_value=11; +update s_t set next_not_cached_value= 11,start_value=10, minimum_value=11; --error ER_SEQUENCE_INVALID_DATA ALTER SEQUENCE s_t restart with 11 start=10 minvalue=11; commit; diff --git a/mysql-test/suite/sql_sequence/mysqldump.result b/mysql-test/suite/sql_sequence/mysqldump.result index 30f0719f16e..0067709db54 100644 --- a/mysql-test/suite/sql_sequence/mysqldump.result +++ b/mysql-test/suite/sql_sequence/mysqldump.result @@ -5,14 +5,14 @@ CREATE SEQUENCE x1 engine=innodb; /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `a1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=Aria SEQUENCE=1; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `a1` VALUES (1,1,9223372036854775806,1,1,1000,0,0); @@ -27,14 +27,14 @@ INSERT INTO `t1` VALUES (1),(2); /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `x1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=InnoDB SEQUENCE=1; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `x1` VALUES (1,1,9223372036854775806,1,1,1000,0,0); diff --git a/mysql-test/suite/sql_sequence/next.result b/mysql-test/suite/sql_sequence/next.result index b7d3a8ff26d..a10c131e0e1 100644 --- a/mysql-test/suite/sql_sequence/next.result +++ b/mysql-test/suite/sql_sequence/next.result @@ -2,80 +2,80 @@ CREATE SEQUENCE t1 start with 1 minvalue 1 maxvalue 10 increment by 1 cache 2 cy show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 select next value for t1; next value for t1 1 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 3 0 select next value for t1; next value for t1 2 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 3 0 select next value for t1; next value for t1 3 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 5 0 select next value for t1; next value for t1 4 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 5 0 select next value for t1; next value for t1 5 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 7 0 select next value for t1; next value for t1 6 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 7 0 select next value for t1; next value for t1 7 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 9 0 select next value for t1; next value for t1 8 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 9 0 select next value for t1; next value for t1 9 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 11 0 select next value for t1; next value for t1 10 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 11 0 select next value for t1; next value for t1 1 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 3 1 select NEXT VALUE for t1,seq from seq_1_to_20; NEXT VALUE for t1 seq @@ -104,68 +104,68 @@ CREATE SEQUENCE t1 minvalue 1 maxvalue 10 increment by -1 cache 2 cycle engine=a select next value for t1; next value for t1 10 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 8 0 select next value for t1; next value for t1 9 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 8 0 select next value for t1; next value for t1 8 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 6 0 select next value for t1; next value for t1 7 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 6 0 select next value for t1; next value for t1 6 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 4 0 select next value for t1; next value for t1 5 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 4 0 select next value for t1; next value for t1 4 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 2 0 select next value for t1; next value for t1 3 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 2 0 select next value for t1; next value for t1 2 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 0 0 select next value for t1; next value for t1 1 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 0 0 select next value for t1; next value for t1 10 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 8 1 select NEXT VALUE for t1,seq from seq_1_to_20; NEXT VALUE for t1 seq @@ -235,7 +235,7 @@ select next value for t1; next value for t1 1 select * from t1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 100 1 1 10 0 0 flush tables; select next value for t1; @@ -284,58 +284,58 @@ seq previous value for t9 NEXT VALUE for t9 previous value for t9 19 10 1 1 20 1 2 2 select * from t9; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 6 1 10 1 1 5 1 2 drop sequence t9; CREATE SEQUENCE s1 cache=0; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 0 0 0 select next value for s1; next value for s1 1 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 2 select next value for s1; next value for s1 2 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 3 DROP SEQUENCE s1; CREATE SEQUENCE s1 cache=1; -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 1 select next value for s1; next value for s1 1 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 2 select next value for s1; next value for s1 2 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 3 DROP SEQUENCE s1; CREATE SEQUENCE s1 cache=2; -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 1 select next value for s1; next value for s1 1 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 3 select next value for s1; next value for s1 2 -select next_value from s1; -next_value +select next_not_cached_value from s1; +next_not_cached_value 3 DROP SEQUENCE s1; CREATE SEQUENCE s1; @@ -353,7 +353,7 @@ next value for s1 4 alter sequence s1 increment -2; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1001 1 9223372036854775806 1 -2 1000 0 0 select next value for s1; next value for s1 @@ -404,17 +404,17 @@ INCREMENT BY 1 START WITH 3984356 CACHE 20 CYCLE engine=innodb; show create table s1; Table Create Table s1 CREATE TABLE `s1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=InnoDB SEQUENCE=1 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3984356 1 9999999999 3984356 1 20 1 0 select NEXT VALUE FOR s1; NEXT VALUE FOR s1 @@ -426,24 +426,24 @@ select NEXT VALUE FOR s1; NEXT VALUE FOR s1 3984358 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3984376 1 9999999999 3984356 1 20 1 0 FLUSH TABLES; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3984376 1 9999999999 3984356 1 20 1 0 select NEXT VALUE FOR s1; NEXT VALUE FOR s1 3984376 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3984396 1 9999999999 3984356 1 20 1 0 drop sequence s1; CREATE SEQUENCE t1 start with 5 minvalue 1 maxvalue 10 increment by 1 cache 5 cycle; explain select next value for t1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used -explain select next value for t1, min_value from t1; +explain select next value for t1, minimum_value from t1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 1 drop table t1; @@ -472,7 +472,7 @@ a b drop table t1,s1; CREATE OR REPLACE SEQUENCE s1 MINVALUE 1 MAXVALUE 9999999999 INCREMENT BY 1 START WITH 3984356 nocache CYCLE engine='innodb'; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3984356 1 9999999999 3984356 1 0 1 0 select next value for s1; next value for s1 @@ -496,8 +496,8 @@ create sequence t1; select next value for t1; next value for t1 1 -select next value for t1, min_value; -ERROR 42S22: Unknown column 'min_value' in 'field list' +select next value for t1, minimum_value; +ERROR 42S22: Unknown column 'minimum_value' in 'field list' drop sequence t1; # # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions diff --git a/mysql-test/suite/sql_sequence/next.test b/mysql-test/suite/sql_sequence/next.test index cf67b7a2752..271c4a6558a 100644 --- a/mysql-test/suite/sql_sequence/next.test +++ b/mysql-test/suite/sql_sequence/next.test @@ -8,27 +8,27 @@ CREATE SEQUENCE t1 start with 1 minvalue 1 maxvalue 10 increment by 1 cache 2 cycle; show create table t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select NEXT VALUE for t1,seq from seq_1_to_20; @@ -36,27 +36,27 @@ drop sequence t1; CREATE SEQUENCE t1 minvalue 1 maxvalue 10 increment by -1 cache 2 cycle engine=aria; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select NEXT VALUE for t1,seq from seq_1_to_20; @@ -85,7 +85,7 @@ select next value for s1; drop sequence s1; # -# Test that flush tables jumps to next next_value +# Test that flush tables jumps to next next_not_cached_value # CREATE SEQUENCE t1 start with 1 minvalue 1 maxvalue 100 increment by 1 cache 10; @@ -116,23 +116,23 @@ drop sequence t9; CREATE SEQUENCE s1 cache=0; select * from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; DROP SEQUENCE s1; CREATE SEQUENCE s1 cache=1; -select next_value from s1; +select next_not_cached_value from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; DROP SEQUENCE s1; CREATE SEQUENCE s1 cache=2; -select next_value from s1; +select next_not_cached_value from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; select next value for s1; -select next_value from s1; +select next_not_cached_value from s1; DROP SEQUENCE s1; # @@ -196,7 +196,7 @@ drop sequence s1; CREATE SEQUENCE t1 start with 5 minvalue 1 maxvalue 10 increment by 1 cache 5 cycle; explain select next value for t1; -explain select next value for t1, min_value from t1; +explain select next value for t1, minimum_value from t1; drop table t1; # @@ -244,7 +244,7 @@ drop table t1; create sequence t1; select next value for t1; --error ER_BAD_FIELD_ERROR -select next value for t1, min_value; +select next value for t1, minimum_value; drop sequence t1; --echo # diff --git a/mysql-test/suite/sql_sequence/other.result b/mysql-test/suite/sql_sequence/other.result index ea72e264b34..90e3a0737c6 100644 --- a/mysql-test/suite/sql_sequence/other.result +++ b/mysql-test/suite/sql_sequence/other.result @@ -37,16 +37,16 @@ select next value for s1; next value for s1 1001 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 2001 1 9223372036854775806 1 1 1000 0 0 drop sequence s1; # -# ÌNSERT +# INSERT # create sequence s1; create sequence s2; -insert into s1 (next_value, min_value) values (100,1000); -ERROR HY000: Field 'max_value' doesn't have a default value +insert into s1 (next_not_cached_value, minimum_value) values (100,1000); +ERROR HY000: Field 'maximum_value' doesn't have a default value insert into s1 values (next value for s1, 1,9223372036854775806,1,1,1000,0,0); ERROR HY000: Table 's1' is specified twice, both as a target for 'INSERT' and as a separate source for data insert into s1 values (next value for s2, 1,9223372036854775806,1,1,1000,0,0); @@ -58,17 +58,17 @@ ERROR HY000: Sequence 'test.s1' values are conflicting insert into s1 values(0,9223372036854775806,1,1,1,1000,0,0); ERROR HY000: Sequence 'test.s1' values are conflicting select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 insert into s1 values(1000,1,9223372036854775806,1,1,1000,0,0); select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1000 1 9223372036854775806 1 1 1000 0 0 select next value for s1; next value for s1 1000 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 2000 1 9223372036854775806 1 1 1000 0 0 insert into s2 values(0, 1, 10, 1, 2, 1, 1, 0); ERROR HY000: Sequence 'test.s2' values are conflicting @@ -77,9 +77,9 @@ drop sequence s1,s2; # UPDATE and DELETE # create sequence s1; -update s1 set next_value=100; +update s1 set next_not_cached_value=100; ERROR HY000: Storage engine SEQUENCE of the table `test`.`s1` doesn't have this option -delete from s1 where next_value > 0; +delete from s1 where next_not_cached_value > 0; ERROR HY000: Storage engine SEQUENCE of the table `test`.`s1` doesn't have this option drop sequence s1; # @@ -106,7 +106,7 @@ drop view v1; create sequence s1 engine=innodb; LOCK TABLES s1 READ; SELECT * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 UNLOCK TABLES; LOCK TABLES s1 WRITE; @@ -123,7 +123,7 @@ count(nextval(s1)) 2000 commit; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 2001 1 9223372036854775806 1 1 1000 0 0 drop sequence s1; create sequence s1 cache=1000 engine=innodb; @@ -138,13 +138,27 @@ select count(nextval(s1)) from seq_1_to_2000; count(nextval(s1)) 2000 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 4001 1 9223372036854775806 1 1 1000 0 0 commit; disconnect addconroot; connection default; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 4001 1 9223372036854775806 1 1 1000 0 0 commit; drop sequence s1; +# +# Flush tables with read lock +# +create sequence s1; +select next value for s1; +next value for s1 +1 +flush tables with read lock; +create sequence s2; +ERROR HY000: Can't execute the query because you have a conflicting read lock +select next value for s1; +ERROR HY000: Can't execute the query because you have a conflicting read lock +unlock tables; +drop sequence s1; diff --git a/mysql-test/suite/sql_sequence/other.test b/mysql-test/suite/sql_sequence/other.test index 0caeb342bc9..d816ec7c108 100644 --- a/mysql-test/suite/sql_sequence/other.test +++ b/mysql-test/suite/sql_sequence/other.test @@ -29,13 +29,13 @@ select * from s1; drop sequence s1; --echo # ---echo # ÌNSERT +--echo # INSERT --echo # create sequence s1; create sequence s2; --error ER_NO_DEFAULT_FOR_FIELD -insert into s1 (next_value, min_value) values (100,1000); +insert into s1 (next_not_cached_value, minimum_value) values (100,1000); --error ER_UPDATE_TABLE_USED insert into s1 values (next value for s1, 1,9223372036854775806,1,1,1000,0,0); --error ER_WRONG_INSERT_INTO_SEQUENCE @@ -61,9 +61,9 @@ drop sequence s1,s2; create sequence s1; --error ER_ILLEGAL_HA -update s1 set next_value=100; +update s1 set next_not_cached_value=100; --error ER_ILLEGAL_HA -delete from s1 where next_value > 0; +delete from s1 where next_not_cached_value > 0; drop sequence s1; --echo # @@ -117,3 +117,17 @@ connection default; select * from s1; commit; drop sequence s1; + +--echo # +--echo # Flush tables with read lock +--echo # + +create sequence s1; +select next value for s1; +flush tables with read lock; +--error 1223 +create sequence s2; +--error 1223 +select next value for s1; +unlock tables; +drop sequence s1; diff --git a/mysql-test/suite/sql_sequence/read_only.result b/mysql-test/suite/sql_sequence/read_only.result index 3f6a95610dd..38edcc0894b 100644 --- a/mysql-test/suite/sql_sequence/read_only.result +++ b/mysql-test/suite/sql_sequence/read_only.result @@ -23,3 +23,16 @@ select next value for s1; ERROR HY000: Table 's1' is read only select next value for s1; ERROR HY000: Table 's1' is read only +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1 1 9223372036854775806 1 1 2 0 0 +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1 1 9223372036854775806 1 1 2 0 0 +select next value for s1; +next value for s1 +1 +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +3 1 9223372036854775806 1 1 2 0 0 +drop sequence s1; diff --git a/mysql-test/suite/sql_sequence/read_only.test b/mysql-test/suite/sql_sequence/read_only.test index 04dab2bb525..73103384882 100644 --- a/mysql-test/suite/sql_sequence/read_only.test +++ b/mysql-test/suite/sql_sequence/read_only.test @@ -7,12 +7,8 @@ create sequence s1 cache 2 engine=innodb; ---exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect ---shutdown_server 10 ---source include/wait_until_disconnected.inc ---enable_reconnect ---exec echo "restart":--innodb_read_only=1 > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect ---source include/wait_until_connected_again.inc +--let $restart_parameters= --innodb-read-only +--source include/restart_mysqld.inc connection default; show global variables like 'innodb_read_only'; @@ -38,3 +34,11 @@ select next value for s1; select next value for s1; --error ER_OPEN_AS_READONLY select next value for s1; + +select * from s1; +--let $restart_parameters= +--source include/restart_mysqld.inc +select * from s1; +select next value for s1; +select * from s1; +drop sequence s1; diff --git a/mysql-test/suite/sql_sequence/replication.result b/mysql-test/suite/sql_sequence/replication.result index 12355851f7b..5c7d45614e9 100644 --- a/mysql-test/suite/sql_sequence/replication.result +++ b/mysql-test/suite/sql_sequence/replication.result @@ -23,27 +23,27 @@ create sequence s1; show create table s1; Table Create Table s1 CREATE TABLE `s1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 connection slave; show create table s1; Table Create Table s1 CREATE TABLE `s1` ( - `next_value` bigint(21) NOT NULL COMMENT 'next not cached value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'How many cycles has been done' + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' ) ENGINE=MyISAM SEQUENCE=1 connection master; drop sequence s1; @@ -73,38 +73,37 @@ support create sequence connection master; create table t_1(id int); show create sequence t_1; -ERROR HY000: 's_db.t_1' is not SEQUENCE +ERROR 42S02: 's_db.t_1' is not a SEQUENCE drop table t_1; CREATE SEQUENCE `s2` ( -`currval` bigint(21) NOT NULL COMMENT 'current value', -`nextval` bigint(21) NOT NULL COMMENT 'next value', -`minvalue` bigint(21) NOT NULL COMMENT 'min value', -`maxvalue` bigint(21) NOT NULL COMMENT 'max value', -`start` bigint(21) NOT NULL COMMENT 'start value', -`increment` bigint(21) NOT NULL COMMENT 'increment value', -`cache` bigint(21) NOT NULL COMMENT 'cache size', -`cycle` bigint(21) NOT NULL COMMENT 'cycle state', -`round` bigint(21) NOT NULL COMMENT 'already how many round' +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, +`increment` bigint(21) NOT NULL, +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) ENGINE=InnoDB sequence=1; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '( -`currval` bigint(21) NOT NULL COMMENT 'current value', -`nextval` bigint(21) NO' at line 1 +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NU' at line 1 CREATE TABLE `s2` ( -`next_value` bigint(21) NOT NULL COMMENT 'next value', -`min_value` bigint(21) NOT NULL COMMENT 'min value', -`max_value` bigint(21) NOT NULL COMMENT 'max value', -`start` bigint(21) NOT NULL COMMENT 'start value', -`increment` bigint(21) NOT NULL COMMENT 'increment value', -`cache` bigint(21) NOT NULL COMMENT 'cache size', -`cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', -`round` bigint(21) NOT NULL COMMENT 'already how many round' +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, +`increment` bigint(21) NOT NULL, +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) ENGINE=InnoDB sequence=1; insert into s2 values(1, 1, 10, 1, 2, 1, 1, 0); commit; select * for s2; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 's2' at line 1 select * from s2; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 10 1 2 1 1 0 select NEXT VALUE for s2; NEXT VALUE for s2 @@ -128,25 +127,25 @@ select NEXT VALUE for s2; NEXT VALUE for s2 3 select * from s2; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 10 1 2 1 1 1 commit; connection master; connection slave; select * from s2; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 10 1 2 1 1 1 connection master; drop sequence s2; CREATE TABLE `s2` ( -`next_value` bigint(21) NOT NULL COMMENT 'next value', -`min_value` bigint(21) NOT NULL COMMENT 'min value', -`max_value` bigint(21) NOT NULL COMMENT 'max value', -`start` bigint(21) NOT NULL COMMENT 'start value', -`increment` bigint(21) NOT NULL COMMENT 'increment value', -`cache` bigint(21) NOT NULL COMMENT 'cache size', -`cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', -`round` bigint(21) NOT NULL COMMENT 'already how many round' +`next_not_cached_value` bigint(21) NOT NULL, +`minimum_value` bigint(21) NOT NULL, +`maximum_value` bigint(21) NOT NULL, +`start_value` bigint(21) NOT NULL, +`increment` bigint(21) NOT NULL, +`cache_size` bigint(21) unsigned NOT NULL, +`cycle_option` tinyint(1) unsigned NOT NULL, +`cycle_count` bigint(21) NOT NULL ) ENGINE=myisam DEFAULT CHARSET=latin1 sequence=1; show create sequence s2; Table Create Table @@ -159,7 +158,7 @@ connection master; create sequence s2; create table t2 (id int); select * from s2; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 select * from t2; id @@ -171,7 +170,7 @@ NEXT VALUE for s2 select NEXT VALUE for t2; ERROR 42S02: 's_db.t2' is not a SEQUENCE select * from s2, t2; -next_value min_value max_value start increment cache cycle round id +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count id 1001 1 9223372036854775806 1 1 1000 0 0 1 select * for s2; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 's2' at line 1 @@ -191,7 +190,7 @@ show create sequence s2_2; Table Create Table s2_2 CREATE SEQUENCE `s2_2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=MyISAM select * from s2_2; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 truncate table s2_2; ERROR HY000: Storage engine SEQUENCE of the table `s_db`.`s2_2` doesn't have this option @@ -339,7 +338,7 @@ show global variables like 'read_only'; Variable_name Value read_only OFF select * from s_db.s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 1000 0 0 connection master; connection slave; @@ -360,7 +359,7 @@ connection master; connection slave; connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 20 1 1 5 1 0 connection m_normal_1; select next value for s_t; @@ -369,8 +368,8 @@ next value for s_t connection master; connection slave; connection s_normal_3; -select next_value from s_t; -next_value +select next_not_cached_value from s_t; +next_not_cached_value 6 ------------------------------------------ master ALTER SEQUENCE @@ -381,7 +380,7 @@ next value for s_t 2 alter sequence s_t restart= 11; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 20 1 1 5 1 0 connection master; connection slave; @@ -390,7 +389,7 @@ show slave nextval; ------------------------------------------ connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 20 1 1 5 1 0 connection m_normal_1; select next value for s_t; @@ -400,7 +399,7 @@ connection master; connection slave; connection s_normal_3; select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 ------------------------------------------ update into invalid sequence @@ -410,17 +409,17 @@ select next value for s_t; next value for s_t 12 select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 alter sequence s_t minvalue=11 maxvalue=9; ERROR HY000: Sequence 's_db.s_t' values are conflicting select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 alter sequence s_t restart= 12 start=10 minvalue=11 maxvalue=20; ERROR HY000: Sequence 's_db.s_t' values are conflicting select * from s_t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 16 1 20 1 1 5 1 0 ------------------------------------------ delete sequence row @@ -457,7 +456,7 @@ id 2 2222 select * from s_1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 6 1 9223372036854775806 1 1 5 0 0 connection master; connection slave; @@ -505,7 +504,7 @@ id 2 2222 select * from s_1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 9223372036854775806 1 1 5 0 0 select next value for s_1; next value for s_1 @@ -643,12 +642,12 @@ next value for s1 4 commit; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 ------------------------------------------ close session binlog. @@ -669,23 +668,23 @@ next value for s1 8 set session sql_log_bin=on; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 9 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 5 1 9223372036854775806 1 1 2 0 0 connection master; select next value for s1; next value for s1 9 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 9223372036854775806 1 1 2 0 0 connection slave; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 11 1 9223372036854775806 1 1 2 0 0 connection master; drop sequence s1; @@ -711,7 +710,7 @@ next value for s1 connection master; connection slave; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 1 9223372036854775806 1 1 2 0 0 connection m_normal_1; drop sequence s1; @@ -733,12 +732,12 @@ select next value for s1; next value for s1 2 select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 1 9223372036854775806 1 1 2 0 0 connection master; connection slave; select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 3 1 9223372036854775806 1 1 2 0 0 connection m_normal_1; drop sequence s1; @@ -784,7 +783,7 @@ connection m_normal_1; create sequence s1 cache 2; create table t as select * from s1; select * from t; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 1 1 9223372036854775806 1 1 2 0 0 drop table t; create table t as select next value for s1; @@ -918,7 +917,7 @@ test value boundary ########################################### connection m_normal_1; ------------------------------------------ -round increment by round +cycle_count increment by cycle_count ------------------------------------------ create sequence s1 start with 5 minvalue 2 maxvalue 7 cache 1 cycle; select next value for s1; @@ -950,39 +949,39 @@ next value for s1 select next value for s1; ERROR HY000: Sequence 's_db.s1' has run out select * from s1; -next_value min_value max_value start increment cache cycle round +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count 8 2 7 5 1 10 0 0 drop sequence s1; create sequence s1 start with 2 minvalue 1 maxvalue 3 increment by 3 nocache cycle; select next value for s1; next value for s1 2 -select next_value,round from s1; -next_value round +select next_not_cached_value,cycle_count from s1; +next_not_cached_value cycle_count 4 0 select next value for s1; next value for s1 1 -select next_value,round from s1; -next_value round +select next_not_cached_value,cycle_count from s1; +next_not_cached_value cycle_count 4 1 select next value for s1; next value for s1 1 -select next_value,round from s1; -next_value round +select next_not_cached_value,cycle_count from s1; +next_not_cached_value cycle_count 4 2 select next value for s1; next value for s1 1 -select next_value,round from s1; -next_value round +select next_not_cached_value,cycle_count from s1; +next_not_cached_value cycle_count 4 3 select next value for s1; next value for s1 1 -select next_value,round from s1; -next_value round +select next_not_cached_value,cycle_count from s1; +next_not_cached_value cycle_count 4 4 drop sequence s1; create sequence s1 start with 2 minvalue 1 maxvalue 3 increment by 3 cache 2 nocycle; @@ -996,67 +995,67 @@ drop sequence s1; beyond ulonglong maxvalue ------------------------------------------ create sequence s1 start with 9223372036854775805 minvalue 9223372036854775804 maxvalue 9223372036854775806 cache 1 cycle; -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 3 drop sequence s1; create sequence s1 start with 9223372036854775805 minvalue 9223372036854775804 maxvalue 9223372036854775806 cache 10 cycle; -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 0 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 1 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775806 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775804 2 -select next value for s1, round from s1; -next value for s1 round +select next value for s1, cycle_count from s1; +next value for s1 cycle_count 9223372036854775805 3 drop sequence s1; connection master; diff --git a/mysql-test/suite/sql_sequence/replication.test b/mysql-test/suite/sql_sequence/replication.test index c1a28a9b5c4..7bd6f00e2ed 100644 --- a/mysql-test/suite/sql_sequence/replication.test +++ b/mysql-test/suite/sql_sequence/replication.test @@ -79,33 +79,32 @@ drop sequence s2; connection master; create table t_1(id int); ---error ER_WRONG_OBJECT +--error ER_NOT_SEQUENCE show create sequence t_1; drop table t_1; --error ER_PARSE_ERROR CREATE SEQUENCE `s2` ( - `currval` bigint(21) NOT NULL COMMENT 'current value', - `nextval` bigint(21) NOT NULL COMMENT 'next value', - `minvalue` bigint(21) NOT NULL COMMENT 'min value', - `maxvalue` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', - `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` bigint(21) NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'already how many round' + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, + `increment` bigint(21) NOT NULL, + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) ENGINE=InnoDB sequence=1; CREATE TABLE `s2` ( - `next_value` bigint(21) NOT NULL COMMENT 'next value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', - `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'already how many round' + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, + `increment` bigint(21) NOT NULL, + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) ENGINE=InnoDB sequence=1; insert into s2 values(1, 1, 10, 1, 2, 1, 1, 0); @@ -131,14 +130,14 @@ connection master; drop sequence s2; CREATE TABLE `s2` ( - `next_value` bigint(21) NOT NULL COMMENT 'next value', - `min_value` bigint(21) NOT NULL COMMENT 'min value', - `max_value` bigint(21) NOT NULL COMMENT 'max value', - `start` bigint(21) NOT NULL COMMENT 'start value', - `increment` bigint(21) NOT NULL COMMENT 'increment value', - `cache` bigint(21) NOT NULL COMMENT 'cache size', - `cycle` tinyint(1) unsigned NOT NULL COMMENT 'cycle state', - `round` bigint(21) NOT NULL COMMENT 'already how many round' + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL, + `increment` bigint(21) NOT NULL, + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL, + `cycle_count` bigint(21) NOT NULL ) ENGINE=myisam DEFAULT CHARSET=latin1 sequence=1; show create sequence s2; @@ -355,7 +354,7 @@ connection master; --sync_slave_with_master connection s_normal_3; -select next_value from s_t; +select next_not_cached_value from s_t; --echo ------------------------------------------ --echo master ALTER SEQUENCE @@ -785,7 +784,7 @@ drop sequence s1; connection m_normal_1; --echo ------------------------------------------ ---echo round increment by round +--echo cycle_count increment by cycle_count --echo ------------------------------------------ create sequence s1 start with 5 minvalue 2 maxvalue 7 cache 1 cycle; select next value for s1; @@ -806,15 +805,15 @@ drop sequence s1; create sequence s1 start with 2 minvalue 1 maxvalue 3 increment by 3 nocache cycle; select next value for s1; -select next_value,round from s1; +select next_not_cached_value,cycle_count from s1; select next value for s1; -select next_value,round from s1; +select next_not_cached_value,cycle_count from s1; select next value for s1; -select next_value,round from s1; +select next_not_cached_value,cycle_count from s1; select next value for s1; -select next_value,round from s1; +select next_not_cached_value,cycle_count from s1; select next value for s1; -select next_value,round from s1; +select next_not_cached_value,cycle_count from s1; drop sequence s1; create sequence s1 start with 2 minvalue 1 maxvalue 3 increment by 3 cache 2 nocycle; @@ -827,29 +826,29 @@ drop sequence s1; --echo beyond ulonglong maxvalue --echo ------------------------------------------ create sequence s1 start with 9223372036854775805 minvalue 9223372036854775804 maxvalue 9223372036854775806 cache 1 cycle; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; drop sequence s1; create sequence s1 start with 9223372036854775805 minvalue 9223372036854775804 maxvalue 9223372036854775806 cache 10 cycle; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; -select next value for s1, round from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; +select next value for s1, cycle_count from s1; drop sequence s1; connection master; diff --git a/mysql-test/suite/sql_sequence/setval.result b/mysql-test/suite/sql_sequence/setval.result index 8e68a717cbf..b7cda27cf96 100644 --- a/mysql-test/suite/sql_sequence/setval.result +++ b/mysql-test/suite/sql_sequence/setval.result @@ -5,26 +5,26 @@ Note 1051 Unknown table 'test.t1' # Test setval function # CREATE SEQUENCE t1 cache 10 engine=myisam; -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1 0 do setval(t1,10); -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 11 0 select next value for t1; next value for t1 11 do setval(t1,12,1); -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 21 0 select next value for t1; next value for t1 13 do setval(t1,15,0); -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 21 0 select next value for t1; next value for t1 @@ -39,15 +39,15 @@ do setval(t1,1000,0); select next value for t1; next value for t1 1000 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1010 0 do setval(t1,2000,0); select next value for t1; next value for t1 2000 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 2010 0 select setval(t1,1000,0); setval(t1,1000,0) @@ -61,8 +61,8 @@ NULL select next value for t1; next value for t1 2002 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 2010 0 select setval(t1,2002,0); setval(t1,2002,0) @@ -76,40 +76,40 @@ setval(t1,2010,0) select next value for t1; next value for t1 2010 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 2020 0 drop sequence t1; # # Testing with cycle # CREATE SEQUENCE t1 cache=10 maxvalue=100 cycle engine=innodb; -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1 0 select setval(t1,100,0); setval(t1,100,0) 100 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 100 0 select next value for t1; next value for t1 100 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 101 0 select setval(t1,100,0); setval(t1,100,0) NULL -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 101 0 select next value for t1; next value for t1 1 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 11 1 select next value for t1; next value for t1 @@ -117,8 +117,8 @@ next value for t1 select setval(t1,100,0,1); setval(t1,100,0,1) 100 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 100 1 select next value for t1; next value for t1 @@ -126,8 +126,8 @@ next value for t1 select setval(t1,100,1,2); setval(t1,100,1,2) 100 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 101 2 select next value for t1; next value for t1 @@ -135,8 +135,8 @@ next value for t1 select setval(t1,100,0,3); setval(t1,100,0,3) 100 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 100 3 select next value for t1; next value for t1 @@ -146,27 +146,27 @@ drop sequence t1; # Testing extreme values # CREATE SEQUENCE t1 cache=10 maxvalue=100 engine=innodb; -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1 0 select setval(t1,200); setval(t1,200) 200 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 101 0 select next value for t1; ERROR HY000: Sequence 'test.t1' has run out drop sequence t1; CREATE SEQUENCE t1 cache=10 maxvalue=100 cycle engine=innodb; -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1 0 select setval(t1,200); setval(t1,200) 200 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 101 0 select next value for t1; next value for t1 @@ -176,8 +176,8 @@ CREATE SEQUENCE t1 cache=10 maxvalue=0 increment=-10; select setval(t1,-10); setval(t1,-10) -10 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count -20 0 select next value for t1; next value for t1 @@ -185,8 +185,8 @@ next value for t1 select setval(t1,-15); setval(t1,-15) NULL -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count -120 0 select next value for t1; next value for t1 @@ -214,8 +214,8 @@ CREATE SEQUENCE t1 cache=10 maxvalue=0 increment=-10; select setval(t1,-10,0); setval(t1,-10,0) -10 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count -10 0 select next value for t1; next value for t1 @@ -231,8 +231,8 @@ setval(t1,10,0) setval(t1,15,1) setval(t1,5,1) select next value for t1; next value for t1 16 -select next_value,round from t1; -next_value round +select next_not_cached_value,cycle_count from t1; +next_not_cached_value cycle_count 1016 0 explain extended select setval(t1,100),setval(t1,100,TRUE),setval(t1,100,FALSE,50); id select_type table type possible_keys key key_len ref rows filtered Extra diff --git a/mysql-test/suite/sql_sequence/setval.test b/mysql-test/suite/sql_sequence/setval.test index dbb2620d4e4..8f8059fdcae 100644 --- a/mysql-test/suite/sql_sequence/setval.test +++ b/mysql-test/suite/sql_sequence/setval.test @@ -8,35 +8,35 @@ drop table if exists t1; --echo # CREATE SEQUENCE t1 cache 10 engine=myisam; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; do setval(t1,10); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; do setval(t1,12,1); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; do setval(t1,15,0); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,16,0); select next value for t1; do setval(t1,1000,0); select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; do setval(t1,2000,0); select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; # Set smaller value select setval(t1,1000,0); select next value for t1; select setval(t1,1000,TRUE); select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select setval(t1,2002,0); select next value for t1; select setval(t1,2010,0); select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; drop sequence t1; --echo # @@ -44,24 +44,24 @@ drop sequence t1; --echo # CREATE SEQUENCE t1 cache=10 maxvalue=100 cycle engine=innodb; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select setval(t1,100,0); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select setval(t1,100,0); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,100,0,1); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,100,1,2); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,100,0,3); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; drop sequence t1; @@ -70,26 +70,26 @@ drop sequence t1; --echo # CREATE SEQUENCE t1 cache=10 maxvalue=100 engine=innodb; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select setval(t1,200); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; --error ER_SEQUENCE_RUN_OUT select next value for t1; drop sequence t1; CREATE SEQUENCE t1 cache=10 maxvalue=100 cycle engine=innodb; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select setval(t1,200); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; drop sequence t1; CREATE SEQUENCE t1 cache=10 maxvalue=0 increment=-10; select setval(t1,-10); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,-15); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; select setval(t1,-500,FALSE); select next value for t1; @@ -101,7 +101,7 @@ drop sequence t1; CREATE SEQUENCE t1 cache=10 maxvalue=0 increment=-10; select setval(t1,-10,0); -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; select next value for t1; drop sequence t1; @@ -112,7 +112,7 @@ drop sequence t1; CREATE SEQUENCE t1; select setval(t1,10,0),setval(t1,15,1),setval(t1,5,1); select next value for t1; -select next_value,round from t1; +select next_not_cached_value,cycle_count from t1; explain extended select setval(t1,100),setval(t1,100,TRUE),setval(t1,100,FALSE,50); drop sequence t1; diff --git a/mysql-test/suite/sql_sequence/temporary.result b/mysql-test/suite/sql_sequence/temporary.result new file mode 100644 index 00000000000..4885b5f8a26 --- /dev/null +++ b/mysql-test/suite/sql_sequence/temporary.result @@ -0,0 +1,12 @@ +# +# Create +# +create temporary sequence s1 engine=innodb; +alter table s1 engine myisam; +select nextval(s1); +nextval(s1) +1 +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1001 1 9223372036854775806 1 1 1000 0 0 +drop temporary sequence s1; diff --git a/mysql-test/suite/sql_sequence/temporary.test b/mysql-test/suite/sql_sequence/temporary.test new file mode 100644 index 00000000000..158f6b4961e --- /dev/null +++ b/mysql-test/suite/sql_sequence/temporary.test @@ -0,0 +1,16 @@ +--source include/have_sequence.inc +--source include/have_innodb.inc + +# +# Test temporary sequences +# + +--echo # +--echo # Create +--echo # + +create temporary sequence s1 engine=innodb; +alter table s1 engine myisam; +select nextval(s1); +select * from s1; +drop temporary sequence s1; diff --git a/mysql-test/suite/storage_engine/alter_table.result b/mysql-test/suite/storage_engine/alter_table.result index 09696e0e6b6..85db1514421 100644 --- a/mysql-test/suite/storage_engine/alter_table.result +++ b/mysql-test/suite/storage_engine/alter_table.result @@ -13,7 +13,7 @@ ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '0', + `a` int(11) DEFAULT 0, `c` char(8) DEFAULT NULL, `b` int(11) DEFAULT NULL ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 diff --git a/mysql-test/suite/storage_engine/alter_tablespace.result b/mysql-test/suite/storage_engine/alter_tablespace.result index b54d7c99e9e..ff3da9f68bb 100644 --- a/mysql-test/suite/storage_engine/alter_tablespace.result +++ b/mysql-test/suite/storage_engine/alter_tablespace.result @@ -10,7 +10,7 @@ a 2 ALTER TABLE t1 DISCARD TABLESPACE; SELECT a FROM t1; -ERROR HY000: Tablespace has been discarded for table 't1' +ERROR HY000: Tablespace has been discarded for table `t1` ALTER TABLE t1 IMPORT TABLESPACE; Warnings: Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification diff --git a/mysql-test/suite/storage_engine/create_table.result b/mysql-test/suite/storage_engine/create_table.result index 87bf37046ba..6936854eb65 100644 --- a/mysql-test/suite/storage_engine/create_table.result +++ b/mysql-test/suite/storage_engine/create_table.result @@ -27,7 +27,7 @@ CREATE TABLE t1 ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> AS SELECT 1 UNION SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `1` bigint(20) NOT NULL DEFAULT '0' + `1` bigint(20) NOT NULL DEFAULT 0 ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 SELECT * FROM t1; 1 diff --git a/mysql-test/suite/storage_engine/disabled.def b/mysql-test/suite/storage_engine/disabled.def index e69de29bb2d..658d0c240db 100644 --- a/mysql-test/suite/storage_engine/disabled.def +++ b/mysql-test/suite/storage_engine/disabled.def @@ -0,0 +1,12 @@ +alter_table_online : MDEV-9705 - Field type conversion warnings have changed to worse +col_opt_null : MDEV-10252 - Row count in 'out of range' warnings is off +col_opt_not_null : MDEV-10252 - Row count in 'out of range' warnings is off +col_opt_unsigned : MDEV-10252 - Row count in 'out of range' warnings is off +col_opt_zerofill : MDEV-10252 - Row count in 'out of range' warnings is off +type_binary : MDEV-10252 - Row count in 'out of range' warnings is off +type_char : MDEV-10252 - Row count in 'out of range' warnings is off +type_fixed : MDEV-10252 - Row count in 'out of range' warnings is off +type_float : MDEV-10252 - Row count in 'out of range' warnings is off +type_int : MDEV-10252 - Row count in 'out of range' warnings is off +type_varbinary : MDEV-10252 - Row count in 'out of range' warnings is off +type_varchar : MDEV-10252 - Row count in 'out of range' warnings is off diff --git a/mysql-test/suite/storage_engine/insert_delayed.test b/mysql-test/suite/storage_engine/insert_delayed.test index 3ded1686714..ece0bf3cf68 100644 --- a/mysql-test/suite/storage_engine/insert_delayed.test +++ b/mysql-test/suite/storage_engine/insert_delayed.test @@ -1,4 +1,4 @@ -# +# # INSERT DELAYED # diff --git a/mysql-test/suite/storage_engine/repair_table.inc b/mysql-test/suite/storage_engine/repair_table.inc index aa3b4e6304d..a295b4c19f2 100644 --- a/mysql-test/suite/storage_engine/repair_table.inc +++ b/mysql-test/suite/storage_engine/repair_table.inc @@ -91,6 +91,9 @@ if ($have_default_index) call mtr.add_suppression(" '\..test.t1'"); call mtr.add_suppression("Couldn't repair table: test.t1"); + # In 10.2 with log_warnings=2 the error message is printed to the error log + call mtr.add_suppression("Table 't1' is marked as crashed.*"); + --let $create_definition = a $int_indexed_col, b $char_col, $default_index (a) --source create_table.inc REPAIR TABLE t1; diff --git a/mysql-test/suite/storage_engine/repair_table.result b/mysql-test/suite/storage_engine/repair_table.result index a20b9be3ba1..d518e47756f 100644 --- a/mysql-test/suite/storage_engine/repair_table.result +++ b/mysql-test/suite/storage_engine/repair_table.result @@ -59,6 +59,7 @@ call mtr.add_suppression("Got an error from thread_id=.*"); call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); call mtr.add_suppression(" '\..test.t1'"); call mtr.add_suppression("Couldn't repair table: test.t1"); +call mtr.add_suppression("Table 't1' is marked as crashed.*"); CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; REPAIR TABLE t1; Table Op Msg_type Msg_text @@ -85,7 +86,7 @@ Table Op Msg_type Msg_text test.t1 check error Size of datafile is: 39 Should be: 65 test.t1 check error Corrupt SELECT a,b FROM t1; -ERROR HY000: Incorrect key file for table 't1'; try to repair it +ERROR HY000: Index for table 't1' is corrupt; try to repair it # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). # If you got a difference in error message, just add it to rdiff file INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.result b/mysql-test/suite/storage_engine/tbl_opt_data_dir.result index b8c5e75c88c..19b7d539b45 100644 --- a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.result +++ b/mysql-test/suite/storage_engine/tbl_opt_data_dir.result @@ -1,16 +1,19 @@ DROP TABLE IF EXISTS t1; +# Running CREATE TABLE .. DATA DIRECTORY = <> SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL -) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>' +# For ALTER TABLE the option is ignored +# Running ALTER TABLE .. DATA DIRECTORY = <> Warnings: -Warning 1618 <INDEX DIRECTORY> option ignored +Warning 1618 <DATA DIRECTORY> option ignored SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL -) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>' DROP TABLE t1; diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_dir.test b/mysql-test/suite/storage_engine/tbl_opt_data_dir.test new file mode 100644 index 00000000000..434ca449125 --- /dev/null +++ b/mysql-test/suite/storage_engine/tbl_opt_data_dir.test @@ -0,0 +1,52 @@ +# +# Check whether DATA DIRECTORY is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only that it is accepted +# + +--source include/have_symlink.inc +--source have_engine.inc + +--let $data_dir1 = $MYSQLTEST_VARDIR/storage_engine_data_dir1/ +--let $data_dir2 = $MYSQLTEST_VARDIR/storage_engine_data_dir2/ +--mkdir $data_dir1 +--mkdir $data_dir2 + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--let $table_options = DATA DIRECTORY = '$data_dir1' +# We cannot mask the folder name here and further, but we can switch off query logging +--let $disable_query_log = 1 +--echo # Running CREATE TABLE .. DATA DIRECTORY = <> +--source create_table.inc + +--source mask_engine.inc +--replace_result $data_dir1 <DATA_DIR_1> +SHOW CREATE TABLE t1; + +--echo # For ALTER TABLE the option is ignored + +--let $alter_definition = DATA DIRECTORY = '$data_dir2' +--disable_query_log +--echo # Running ALTER TABLE .. DATA DIRECTORY = <> +--source alter_table.inc +if ($mysql_errname) +{ + --let $my_last_stmt = $alter_statement + --let $functionality = ALTER TABLE + --source unexpected_result.inc +} +--enable_query_log +--source mask_engine.inc +--replace_result $data_dir1 <DATA_DIR_1> +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +--source cleanup_engine.inc + +--rmdir $data_dir1 +--rmdir $data_dir2 diff --git a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test b/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test deleted file mode 100644 index 4a83cc46b84..00000000000 --- a/mysql-test/suite/storage_engine/tbl_opt_data_index_dir.test +++ /dev/null @@ -1,52 +0,0 @@ -# -# Check whether DATA DIRECTORY and INDEX DIRECTORY -# are supported in CREATE and ALTER TABLE -# -# Note: the test does not check whether the options -# have any real effect on the table, only -# that they are accepted -# - ---source include/have_symlink.inc ---source have_engine.inc - ---let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/ ---let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/ ---mkdir $data_dir ---mkdir $index_dir - ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings - ---let $table_options = DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir' -# We cannot mask the folders name here, but we can switch off query logging ---let $disable_query_log = 1 ---source create_table.inc - ---source mask_engine.inc ---replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR> -SHOW CREATE TABLE t1; - ---let $alter_definition = INDEX DIRECTORY = '$data_dir' ---disable_query_log ---source alter_table.inc -if ($mysql_errname) -{ - --let $my_last_stmt = $alter_statement - --let $functionality = ALTER TABLE - --source unexpected_result.inc -} ---enable_query_log ---source mask_engine.inc ---replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR> -SHOW CREATE TABLE t1; - -DROP TABLE t1; - ---source cleanup_engine.inc - ---rmdir $data_dir ---rmdir $index_dir - - diff --git a/mysql-test/suite/storage_engine/tbl_opt_index_dir.result b/mysql-test/suite/storage_engine/tbl_opt_index_dir.result new file mode 100644 index 00000000000..c7368e9a84a --- /dev/null +++ b/mysql-test/suite/storage_engine/tbl_opt_index_dir.result @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t1; +# Running CREATE TABLE .. INDEX DIRECTORY = <> +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>' +# For ALTER TABLE the option is ignored +# Running ALTER TABLE .. INDEX DIRECTORY = <> +Warnings: +Warning 1618 <INDEX DIRECTORY> option ignored +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>' +DROP TABLE t1; diff --git a/mysql-test/suite/storage_engine/tbl_opt_index_dir.test b/mysql-test/suite/storage_engine/tbl_opt_index_dir.test new file mode 100644 index 00000000000..03d2ef2bf82 --- /dev/null +++ b/mysql-test/suite/storage_engine/tbl_opt_index_dir.test @@ -0,0 +1,52 @@ +# +# Check whether INDEX DIRECTORY is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only that it is accepted +# + +--source include/have_symlink.inc +--source have_engine.inc + +--let $index_dir1 = $MYSQLTEST_VARDIR/storage_engine_index_dir1/ +--let $index_dir2 = $MYSQLTEST_VARDIR/storage_engine_index_dir2/ +--mkdir $index_dir1 +--mkdir $index_dir2 + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--let $table_options = INDEX DIRECTORY = '$index_dir1' +# We cannot mask the folder name here and further, but we can switch off query logging +--let $disable_query_log = 1 +--echo # Running CREATE TABLE .. INDEX DIRECTORY = <> +--source create_table.inc + +--source mask_engine.inc +--replace_result $index_dir1 <INDEX_DIR_1> +SHOW CREATE TABLE t1; + +--echo # For ALTER TABLE the option is ignored + +--let $alter_definition = INDEX DIRECTORY = '$index_dir2' +--disable_query_log +--echo # Running ALTER TABLE .. INDEX DIRECTORY = <> +--source alter_table.inc +if ($mysql_errname) +{ + --let $my_last_stmt = $alter_statement + --let $functionality = ALTER TABLE + --source unexpected_result.inc +} +--enable_query_log +--source mask_engine.inc +--replace_result $index_dir1 <INDEX_DIR_1> +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +--source cleanup_engine.inc + +--rmdir $index_dir1 +--rmdir $index_dir2 diff --git a/mysql-test/suite/storage_engine/tbl_opt_row_format.result b/mysql-test/suite/storage_engine/tbl_opt_row_format.result index 5f1cf713bb6..57000e295f7 100644 --- a/mysql-test/suite/storage_engine/tbl_opt_row_format.result +++ b/mysql-test/suite/storage_engine/tbl_opt_row_format.result @@ -1,16 +1,30 @@ DROP TABLE IF EXISTS t1; -CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED; +CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=DYNAMIC; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +ALTER TABLE t1 ROW_FORMAT=FIXED; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED -ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +ALTER TABLE t1 ROW_FORMAT=PAGE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL -) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT DROP TABLE t1; diff --git a/mysql-test/suite/storage_engine/tbl_opt_row_format.test b/mysql-test/suite/storage_engine/tbl_opt_row_format.test index d6347551da0..17fe2f36429 100644 --- a/mysql-test/suite/storage_engine/tbl_opt_row_format.test +++ b/mysql-test/suite/storage_engine/tbl_opt_row_format.test @@ -13,12 +13,12 @@ DROP TABLE IF EXISTS t1; --enable_warnings ---let $table_options = ROW_FORMAT=FIXED +--let $table_options = ROW_FORMAT=DYNAMIC --source create_table.inc --source mask_engine.inc SHOW CREATE TABLE t1; ---let $alter_definition = ROW_FORMAT=DYNAMIC +--let $alter_definition = ROW_FORMAT=FIXED --source alter_table.inc if ($mysql_errname) { @@ -29,6 +29,30 @@ if ($mysql_errname) --source mask_engine.inc SHOW CREATE TABLE t1; +--let $alter_definition = ROW_FORMAT=PAGE +--source alter_table.inc +if ($mysql_errname) +{ + --let $my_last_stmt = $alter_statement + --let $functionality = ALTER TABLE + --source unexpected_result.inc +} +--source mask_engine.inc +SHOW CREATE TABLE t1; + + +--let $alter_definition = ROW_FORMAT=COMPACT +--source alter_table.inc +if ($mysql_errname) +{ + --let $my_last_stmt = $alter_statement + --let $functionality = ALTER TABLE + --source unexpected_result.inc +} +--source mask_engine.inc +SHOW CREATE TABLE t1; + + DROP TABLE t1; --source cleanup_engine.inc diff --git a/mysql-test/suite/storage_engine/type_char_indexes.result b/mysql-test/suite/storage_engine/type_char_indexes.result index 5be07bdf53f..73c076863ab 100644 --- a/mysql-test/suite/storage_engine/type_char_indexes.result +++ b/mysql-test/suite/storage_engine/type_char_indexes.result @@ -97,7 +97,7 @@ varchar2b 1 varchar3b 1 SET SESSION optimizer_switch = 'engine_condition_pushdown=on'; Warnings: -Warning 1681 'engine_condition_pushdown=on' is deprecated and will be removed in a future release. +Warning 1681 'engine_condition_pushdown=on' is deprecated and will be removed in a future release EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a'; id select_type table type possible_keys key key_len ref rows Extra # # # range c_v c_v # # # Using index condition diff --git a/mysql-test/suite/storage_engine/type_date_time.result b/mysql-test/suite/storage_engine/type_date_time.result index 473a7e67494..f136dd93f34 100644 --- a/mysql-test/suite/storage_engine/type_date_time.result +++ b/mysql-test/suite/storage_engine/type_date_time.result @@ -13,7 +13,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra d date # # # dt datetime # # # -ts timestamp # # # on update CURRENT_TIMESTAMP +ts timestamp # # # on update current_timestamp() t time # # # y year(4) # # # y4 year(4) # # # diff --git a/mysql-test/suite/storage_engine/vcol.result b/mysql-test/suite/storage_engine/vcol.result index d51ab038576..e56a1b0543d 100644 --- a/mysql-test/suite/storage_engine/vcol.result +++ b/mysql-test/suite/storage_engine/vcol.result @@ -3,12 +3,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1)) ENGIN SHOW COLUMNS IN t1; Field Type Null Key Default Extra a int(11) # # -b int(11) # # VIRTUAL +b int(11) # # VIRTUAL GENERATED INSERT INTO t1 (a) VALUES (1),(2); INSERT INTO t1 (a,b) VALUES (3,3),(4,4); Warnings: -Warning 1906 The value specified for computed column 'b' in table 't1' ignored -Warning 1906 The value specified for computed column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored SELECT a,b FROM t1; a b 1 2 @@ -20,12 +20,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) PERSIS SHOW COLUMNS IN t1; Field Type Null Key Default Extra a int(11) # # -b int(11) # # PERSISTENT +b int(11) # # STORED GENERATED INSERT INTO t1 (a) VALUES (1),(2); INSERT INTO t1 (a,b) VALUES (3,3),(4,4); Warnings: -Warning 1906 The value specified for computed column 'b' in table 't1' ignored -Warning 1906 The value specified for computed column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored SELECT a,b FROM t1; a b 1 2 @@ -37,12 +37,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) VIRTUA SHOW COLUMNS IN t1; Field Type Null Key Default Extra a int(11) # # -b int(11) # # VIRTUAL +b int(11) # # VIRTUAL GENERATED INSERT INTO t1 (a) VALUES (1),(2); INSERT INTO t1 (a,b) VALUES (3,3),(4,4); Warnings: -Warning 1906 The value specified for computed column 'b' in table 't1' ignored -Warning 1906 The value specified for computed column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored SELECT a,b FROM t1; a b 1 2 @@ -54,12 +54,12 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> AS (a+1) PERSISTENT) ENGINE=<STO SHOW COLUMNS IN t1; Field Type Null Key Default Extra a int(11) # # -b int(11) # # PERSISTENT +b int(11) # # STORED GENERATED INSERT INTO t1 (a) VALUES (1),(2); INSERT INTO t1 (a,b) VALUES (3,3),(4,4); Warnings: -Warning 1906 The value specified for computed column 'b' in table 't1' ignored -Warning 1906 The value specified for computed column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored +Warning 1906 The value specified for generated column 'b' in table 't1' ignored SELECT a,b FROM t1; a b 1 2 diff --git a/mysql-test/suite/sys_vars/r/delay_key_write_func.result b/mysql-test/suite/sys_vars/r/delay_key_write_func.result index 0fd1d492ef4..5cc4b2eaaad 100644 --- a/mysql-test/suite/sys_vars/r/delay_key_write_func.result +++ b/mysql-test/suite/sys_vars/r/delay_key_write_func.result @@ -1,24 +1,20 @@ '#--------------------FN_DYNVARS_023_01-------------------------#' SET @start_value= @@global.delay_key_write; -SET @@global.delay_key_write = ON; -SELECT @@global.delay_key_write; -@@global.delay_key_write -ON -connect user1,localhost,root,,,,; -connection user1; -SELECT @@global.delay_key_write AS res_is_ON; -res_is_ON -ON -SET @@global.delay_key_write = ALL; -disconnect user1; -connect user1,localhost,root,,,,; -connection user1; -SELECT @@global.delay_key_write AS res_is_ALL; -res_is_ALL -ALL '#--------------------FN_DYNVARS_023_02-------------------------#' +CREATE PROCEDURE sp_addRecords (IN var1 INT,IN var2 INT) +BEGIN +WHILE (var1 < var2) DO +INSERT INTO t1 VALUES(var1,REPEAT('MYSQL',10),100000.0/var1); +SET var1=var1+1; +END WHILE; +END// '---check when delay_key_write is OFF---' SET @@global.delay_key_write = OFF; +CREATE TABLE t1( +a INT PRIMARY KEY, +b VARCHAR(512), +c DOUBLE +) DELAY_KEY_WRITE = 1; FLUSH STATUS; CALL sp_addRecords(1,10); SHOW STATUS LIKE 'Key_reads'; @@ -33,8 +29,14 @@ Key_write_requests 9 SELECT COUNT(*) FROM t1; COUNT(*) 9 +DROP TABLE t1; '----check when delay_key_write is ON---' SET @@global.delay_key_write = ON; +CREATE TABLE t1( +a INT PRIMARY KEY, +b VARCHAR(512), +c DOUBLE +) DELAY_KEY_WRITE = 1; FLUSH STATUS; CALL sp_addRecords(1,10); SHOW STATUS LIKE 'Key_reads'; @@ -49,8 +51,14 @@ Key_write_requests 9 SELECT COUNT(*) FROM t1; COUNT(*) 9 +DROP TABLE t1; '----check when delay_key_write is ALL---' SET @@global.delay_key_write = ALL; +CREATE TABLE t1( +a INT PRIMARY KEY, +b VARCHAR(512), +c DOUBLE +) DELAY_KEY_WRITE = 0; FLUSH STATUS; CALL sp_addRecords(1,10); SHOW STATUS LIKE 'Key_reads'; @@ -67,6 +75,4 @@ COUNT(*) 9 DROP PROCEDURE sp_addRecords; DROP TABLE t1; -disconnect user1; -connection default; SET @@global.delay_key_write= @start_value; diff --git a/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result b/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result index 1183fb27732..f2bfaf2ed61 100644 --- a/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_sched_priority_cleaner_basic.result @@ -1,4 +1,4 @@ -SET @start_value = @@GLOBAL.innodb_sched_priority_cleaner; +SET GLOBAL innodb_sched_priority_cleaner=39; SELECT @@GLOBAL.innodb_sched_priority_cleaner; @@GLOBAL.innodb_sched_priority_cleaner 19 diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled index 858df585a7b..f5ce4d77c6b 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff-disabled @@ -1218,8 +1218,8 @@ COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL --GLOBAL_VALUE 5.6.35 -+GLOBAL_VALUE 5.6.35-80.0 +-GLOBAL_VALUE 5.6.36 ++GLOBAL_VALUE 5.6.36-82.0 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE NULL VARIABLE_SCOPE GLOBAL diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled index d801270c6b6..9199653b460 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff-disabled @@ -661,8 +661,8 @@ COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL --GLOBAL_VALUE 5.6.35 -+GLOBAL_VALUE 5.6.35-80.0 +-GLOBAL_VALUE 5.6.36 ++GLOBAL_VALUE 5.6.36-82.0 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE NULL VARIABLE_SCOPE GLOBAL diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 8026c4a15ec..d74f15e90ae 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -1428,8 +1428,8 @@ DEFAULT_VALUE 50331648 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT VARIABLE_COMMENT Size of each log file in a log group. -NUMERIC_MIN_VALUE 4194304 -NUMERIC_MAX_VALUE 9223372036854775807 +NUMERIC_MIN_VALUE 1048576 +NUMERIC_MAX_VALUE 549755813888 NUMERIC_BLOCK_SIZE 1048576 ENUM_VALUE_LIST NULL READ_ONLY YES @@ -2458,7 +2458,7 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL -GLOBAL_VALUE 5.7.14 +GLOBAL_VALUE 5.7.18 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE NULL VARIABLE_SCOPE GLOBAL diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 7ceea1a295c..b75960bfbb1 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -2909,9 +2909,9 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES SESSION_VALUE NULL -GLOBAL_VALUE 190 +GLOBAL_VALUE 191 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 190 +DEFAULT_VALUE 191 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Maximum number of statement instruments. diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 957485b4e4b..13072f5afe1 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3105,9 +3105,9 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES SESSION_VALUE NULL -GLOBAL_VALUE 190 +GLOBAL_VALUE 191 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 190 +DEFAULT_VALUE 191 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Maximum number of statement instruments. diff --git a/mysql-test/suite/sys_vars/t/delay_key_write_func.test b/mysql-test/suite/sys_vars/t/delay_key_write_func.test index 89f40ba544b..e823e51954c 100644 --- a/mysql-test/suite/sys_vars/t/delay_key_write_func.test +++ b/mysql-test/suite/sys_vars/t/delay_key_write_func.test @@ -20,32 +20,14 @@ ############################################################################### --echo '#--------------------FN_DYNVARS_023_01-------------------------#' -####################################################################### -# Check if setting delay_key_write is changed in every new connection # -####################################################################### - SET @start_value= @@global.delay_key_write; -SET @@global.delay_key_write = ON; -SELECT @@global.delay_key_write; - -connect (user1,localhost,root,,,,); -connection user1; -SELECT @@global.delay_key_write AS res_is_ON; -SET @@global.delay_key_write = ALL; -disconnect user1; - -connect (user1,localhost,root,,,,); -connection user1; -SELECT @@global.delay_key_write AS res_is_ALL; - --echo '#--------------------FN_DYNVARS_023_02-------------------------#' ###################################################### # Begin the functionality Testing of delay_key_write # ###################################################### # create procedure to add rows ---disable_query_log DELIMITER //; CREATE PROCEDURE sp_addRecords (IN var1 INT,IN var2 INT) BEGIN @@ -55,28 +37,19 @@ BEGIN END WHILE; END// DELIMITER ;// ---enable_query_log #============================================================================== --echo '---check when delay_key_write is OFF---' #============================================================================== - SET @@global.delay_key_write = OFF; - ---disable_query_log ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings # create a table with delay_key_write enabled CREATE TABLE t1( a INT PRIMARY KEY, b VARCHAR(512), c DOUBLE ) DELAY_KEY_WRITE = 1; ---enable_query_log - FLUSH STATUS; @@ -86,6 +59,7 @@ SHOW STATUS LIKE 'Key_reads'; SHOW STATUS LIKE 'Key_writes'; SHOW STATUS LIKE 'Key_write_requests'; SELECT COUNT(*) FROM t1; +DROP TABLE t1; #============================================================================== --echo '----check when delay_key_write is ON---' @@ -93,17 +67,12 @@ SELECT COUNT(*) FROM t1; SET @@global.delay_key_write = ON; ---disable_query_log ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings # create a table with delay_key_write enabled CREATE TABLE t1( a INT PRIMARY KEY, b VARCHAR(512), c DOUBLE ) DELAY_KEY_WRITE = 1; ---enable_query_log FLUSH STATUS; CALL sp_addRecords(1,10); @@ -112,23 +81,19 @@ SHOW STATUS LIKE 'Key_reads'; SHOW STATUS LIKE 'Key_writes'; SHOW STATUS LIKE 'Key_write_requests'; SELECT COUNT(*) FROM t1; +DROP TABLE t1; #============================================================================== --echo '----check when delay_key_write is ALL---' #============================================================================== SET @@global.delay_key_write = ALL; ---disable_query_log ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings # create a table with delay_key_write disabled CREATE TABLE t1( a INT PRIMARY KEY, b VARCHAR(512), c DOUBLE ) DELAY_KEY_WRITE = 0; ---enable_query_log FLUSH STATUS; CALL sp_addRecords(1,10); @@ -140,12 +105,9 @@ SELECT COUNT(*) FROM t1; DROP PROCEDURE sp_addRecords; DROP TABLE t1; -disconnect user1; -connection default; SET @@global.delay_key_write= @start_value; #################################################### # End of functionality testing for delay_key_write # #################################################### - diff --git a/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test b/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test index b2382fd7844..2c2037f167f 100644 --- a/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_sched_priority_cleaner_basic.test @@ -4,7 +4,15 @@ # A dynamic, global variable -SET @start_value = @@GLOBAL.innodb_sched_priority_cleaner; +# Test in read-only mode +--let $restart_parameters= --innodb-read-only +--source include/restart_mysqld.inc +--let $restart_parameters= + +# This has no actual effect in innodb_read_only mode +SET GLOBAL innodb_sched_priority_cleaner=39; + +--source include/restart_mysqld.inc # Default value SELECT @@GLOBAL.innodb_sched_priority_cleaner; diff --git a/mysql-test/suite/wsrep/include/check_galera_version.inc b/mysql-test/suite/wsrep/include/check_galera_version.inc index 38b4ada98b9..cb35269249b 100644 --- a/mysql-test/suite/wsrep/include/check_galera_version.inc +++ b/mysql-test/suite/wsrep/include/check_galera_version.inc @@ -12,11 +12,29 @@ --disable_query_log -eval SET @GALERA_VERSION=(SELECT CONCAT('$galera_version', '%')); +# Required Version -if (!`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE - VARIABLE_NAME LIKE 'wsrep_provider_version' AND - VARIABLE_VALUE LIKE @GALERA_VERSION`) +eval SET @GALERA_VERSION='$galera_version'; +SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @GALERA_MAJOR_VERSION; +SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @GALERA_MID_VERSION; +SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\3') AS UNSIGNED) INTO @GALERA_MINOR_VERSION; + +# Actual +SELECT VARIABLE_VALUE INTO @ACTUAL_GALERA_VERSION FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_provider_version'; + +SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @ACTUAL_GALERA_MAJOR_VERSION; +SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @ACTUAL_GALERA_MID_VERSION; +SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\3') AS UNSIGNED) INTO @ACTUAL_GALERA_MINOR_VERSION; + +# For testing +#SELECT @GALERA_MAJOR_VERSION, @GALERA_MID_VERSION, @GALERA_MINOR_VERSION; +#SELECT @ACTUAL_GALERA_VERSION; +#SELECT @ACTUAL_GALERA_MAJOR_VERSION, @ACTUAL_GALERA_MID_VERSION, @ACTUAL_GALERA_MINOR_VERSION; + +if (!`SELECT (@ACTUAL_GALERA_MAJOR_VERSION > @GALERA_MAJOR_VERSION) OR + (@ACTUAL_GALERA_MAJOR_VERSION = @GALERA_MAJOR_VERSION AND @ACTUAL_GALERA_MID_VERSION > @GALERA_MID_VERSION) OR + (@ACTUAL_GALERA_MAJOR_VERSION = @GALERA_MAJOR_VERSION AND @ACTUAL_GALERA_MID_VERSION = @GALERA_MID_VERSION AND @ACTUAL_GALERA_MINOR_VERSION >= @GALERA_MINOR_VERSION) + `) { skip Test requires Galera library version $galera_version; } diff --git a/mysql-test/t/alter_table_online.test b/mysql-test/t/alter_table_online.test index 22ebadd64f9..15df36e8009 100644 --- a/mysql-test/t/alter_table_online.test +++ b/mysql-test/t/alter_table_online.test @@ -285,6 +285,24 @@ CREATE TABLE t1 (a LONGTEXT COLLATE latin1_general_ci); ALTER TABLE t1 MODIFY a LONGTEXT COLLATE latin1_swedish_ci, ALGORITHM=INPLACE; DROP TABLE t1; ---echo # ---echo # End of MDEV-8948 ALTER ... INPLACE does work for BINARY, BLOB ---echo # +# +# MDEV-11335 Changing delay_key_write option for MyISAM table should not copy rows +# +select @@global.delay_key_write; +create table t1 (a int, b int, key(b)); +flush tables; +flush status; +show status like 'Feature_delay_key_write'; +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +alter online table t1 delay_key_write=1; +show status like 'Feature_delay_key_write'; +flush tables; +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +alter online table t1 delay_key_write=0; +show status like 'Feature_delay_key_write'; +flush tables; +insert t1 values (1,2),(2,3),(3,4); +show status like 'Feature_delay_key_write'; +drop table t1; diff --git a/mysql-test/t/derived_view.test b/mysql-test/t/derived_view.test index 3a18e9a086e..9b0cf9dca7d 100644 --- a/mysql-test/t/derived_view.test +++ b/mysql-test/t/derived_view.test @@ -1899,6 +1899,56 @@ deallocate prepare stmt1; drop table t1,t2; +--echo # +--echo # Bug mdev-12670: mergeable derived / view with subqueries +--echo # subject to semi-join optimizations +--echo # (actually this is a 5.3 bug.) +--echo # + +create table t1 (a int) engine=myisam; +insert into t1 values (5),(3),(2),(7),(2),(5),(1); +create table t2 (b int, index idx(b)) engine=myisam; +insert into t2 values (2),(3),(2),(1),(3),(4); +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +analyze table t1,t2; + +explain select a from t1 where a in (select b from t2); +explain select * from (select a from t1 where a in (select b from t2)) t; +create view v1 as select a from t1 where a in (select b from t2); +explain select * from v1; + +drop view v1; +drop table t1,t2; + +--echo # +--echo # Bug mdev-12812: mergeable derived / view with subqueries +--echo # NOT subject to semi-join optimizations +--echo # + +CREATE TABLE t1 (c1 varchar(3)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('foo'); + +CREATE TABLE t2 (c2 varchar(3)) ENGINE=MyISAM; +INSERT INTO t2 VALUES ('bar'),('qux'),('foo'); + +let $q= +SELECT STRAIGHT_JOIN * + FROM ( SELECT * FROM t1 WHERE c1 IN ( SELECT c2 FROM t2 ) ) AS sq; + +eval $q; +eval EXPLAIN EXTENDED $q; + +DROP TABLE t1, t2; + # The following command must be the last one the file set optimizer_switch=@exit_optimizer_switch; set join_cache_level=@exit_join_cache_level; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 4928203e603..d847f7cd5f7 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1693,6 +1693,17 @@ DROP TABLE t1; --echo # --echo # +--echo # MDEV-13064: assertion `n < m_size' fails in Item::split_sum_func2() +--echo # + +create table t1 (i int) engine=MyISAM; +insert into t1 value (1),(2); + +select count(*)+sleep(0) from t1; + +drop table t1; + +--echo # --echo # Start of 10.3 tests --echo # diff --git a/mysql-test/t/func_hybrid_type.test b/mysql-test/t/func_hybrid_type.test index f353eb7a628..954c7de53fa 100644 --- a/mysql-test/t/func_hybrid_type.test +++ b/mysql-test/t/func_hybrid_type.test @@ -553,6 +553,42 @@ SELECT * FROM t1; SHOW CREATE TABLE t1; DROP TABLE t1; +CREATE TABLE t1 AS SELECT + 9 AS i1, COALESCE(9) AS c1, + 99 AS i2, COALESCE(99) AS c2, + 999 AS i3, COALESCE(999) AS c3, + 9999 AS i4, COALESCE(9999) AS c4, + 99999 AS i5, COALESCE(99999) AS c5, + 999999 AS i6, COALESCE(999999) AS c6, + 9999999 AS i7, COALESCE(9999999) AS c7, + 99999999 AS i8, COALESCE(99999999) AS c8, + 999999999 AS i9, COALESCE(999999999) AS c9, + 2147483647, COALESCE(2147483647), + 2147483648, COALESCE(2147483648), + 9999999999 AS i10, COALESCE(9999999999) AS c10, + 99999999999 AS i11, COALESCE(99999999999) AS c11, + 999999999999 AS i12, COALESCE(999999999999) AS c12, + 9999999999999 AS i13, COALESCE(9999999999999) AS c13, + 99999999999999 AS i14, COALESCE(99999999999999) AS c14, + 999999999999999 AS i15, COALESCE(999999999999999) AS c15, + 9999999999999999 AS i16, COALESCE(9999999999999999) AS c16, + 99999999999999999 AS i17, COALESCE(99999999999999999) AS c17, + 999999999999999999 AS i18, COALESCE(999999999999999999) AS c18, + 9223372036854775807, COALESCE(9223372036854775807), + 9223372036854775808, COALESCE(9223372036854775808), + 9999999999999999999 AS i19, COALESCE(9999999999999999999) AS c19, + 18446744073709551615, COALESCE(18446744073709551615), + 18446744073709551616, COALESCE(18446744073709551616), + 99999999999999999999 AS i20, COALESCE(99999999999999999999) AS c20, + 999999999999999999999 AS i21, COALESCE(999999999999999999999) AS c21, + 9999999999999999999999 AS i22, COALESCE(9999999999999999999999) AS c22; +SHOW CREATE TABLE t1; +--vertical_results +SELECT * FROM t1; +--horizontal_results +DROP TABLE t1; + + --echo # --echo # MDEV-9406 CREATE TABLE..SELECT creates different columns for IFNULL() and equivalent COALESCE,CASE,IF --echo # diff --git a/mysql-test/t/func_regexp_pcre.test b/mysql-test/t/func_regexp_pcre.test index 26294ce2e24..4b2c18e0674 100644 --- a/mysql-test/t/func_regexp_pcre.test +++ b/mysql-test/t/func_regexp_pcre.test @@ -428,3 +428,14 @@ SELECT 0xE001 REGEXP @regCheck; SET NAMES latin1; SET @regCheck= '\\xE0\\x01'; SELECT CAST(0xE001 AS BINARY) REGEXP @regCheck; + +--echo # MDEV-12420: Testing recursion overflow +--replace_regex /[0-9]+ exceeded/NUM exceeded/ +SELECT 1 FROM dual WHERE ('Alpha,Bravo,Charlie,Delta,Echo,Foxtrot,StrataCentral,Golf,Hotel,India,Juliet,Kilo,Lima,Mike,StrataL3,November,Oscar,StrataL2,Sand,P3,P4SwitchTest,Arsys,Poppa,ExtensionMgr,Arp,Quebec,Romeo,StrataApiV2,PtReyes,Sierra,SandAcl,Arrow,Artools,BridgeTest,Tango,SandT,PAlaska,Namespace,Agent,Qos,PatchPanel,ProjectReport,Ark,Gimp,Agent,SliceAgent,Arnet,Bgp,Ale,Tommy,Central,AsicPktTestLib,Hsc,SandL3,Abuild,Pca9555,Standby,ControllerDut,CalSys,SandLib,Sb820,PointV2,BfnLib,Evpn,BfnSdk,Sflow,ManagementActive,AutoTest,GatedTest,Bgp,Sand,xinetd,BfnAgentLib,bf-utils,Hello,BfnState,Eos,Artest,Qos,Scd,ThermoMgr,Uniform,EosUtils,Eb,FanController,Central,BfnL3,BfnL2,tcp_wrappers,Victor,Environment,Route,Failover,Whiskey,Xray,Gimp,BfnFixed,Strata,SoCal,XApi,Msrp,XpProfile,tcpdump,PatchPanel,ArosTest,FhTest,Arbus,XpAcl,MacConc,XpApi,telnet,QosTest,Alpha2,BfnVlan,Stp,VxlanControllerTest,MplsAgent,Bravo2,Lanz,BfnMbb,Intf,XCtrl,Unicast,SandTunnel,L3Unicast,Ipsec,MplsTest,Rsvp,EthIntf,StageMgr,Sol,MplsUtils,Nat,Ira,P4NamespaceDut,Counters,Charlie2,Aqlc,Mlag,Power,OpenFlow,Lag,RestApi,BfdTest,strongs,Sfa,CEosUtils,Adt746,MaintenanceMode,MlagDut,EosImage,IpEth,MultiProtocol,Launcher,Max3179,Snmp,Acl,IpEthTest,PhyEee,bf-syslibs,tacc,XpL2,p4-ar-switch,p4-bf-switch,LdpTest,BfnPhy,Mirroring,Phy6,Ptp' REGEXP '^((?!\b(Strata|StrataApi|StrataApiV2)\b).)*$'); + +# +# MDEV-12942 REGEXP_INSTR returns 1 when using brackets +# +SELECT REGEXP_INSTR('a_kollision', 'oll'); +SELECT REGEXP_INSTR('a_kollision', '(oll)'); +SELECT REGEXP_INSTR('a_kollision', 'o([lm])\\1'); diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test index bf94b7dd3d5..a721943e8bc 100644 --- a/mysql-test/t/innodb_ext_key.test +++ b/mysql-test/t/innodb_ext_key.test @@ -778,5 +778,46 @@ where index_date_updated= 10 and index_id < 800; drop table t0,t1,t2; -set optimizer_switch=@save_ext_key_optimizer_switch; -SET SESSION STORAGE_ENGINE=DEFAULT; + +--echo # +--echo # MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff' +--echo # was corrupted, server crashes in opt_sum_query + +CREATE TABLE t1 ( + pk INT, + f1 VARCHAR(3), + f2 VARCHAR(1024), + PRIMARY KEY (pk), + KEY(f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; + +INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def'); +SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4; +drop table t1; + +CREATE TABLE t1 ( + pk1 INT, + pk2 INT, + f1 VARCHAR(3), + f2 VARCHAR(1021), + PRIMARY KEY (pk1,pk2), + KEY(f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; + +INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def'); +explain format= json +select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3'; +drop table t1; + +CREATE TABLE t1 ( +f2 INT, +pk2 INT, +f1 VARCHAR(3), +pk1 VARCHAR(1000), +PRIMARY KEY (pk1,pk2), +KEY k1(pk1,f2) +) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC; +INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def'); +explain format= json +select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3'; +drop table t1; diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test index a3a1278ef1e..88861511d79 100644 --- a/mysql-test/t/join_outer.test +++ b/mysql-test/t/join_outer.test @@ -1882,4 +1882,86 @@ WHERE v3 = 4; drop table t1,t2,t3; +--echo # +--echo # MDEV-11958: LEFT JOIN with stored routine produces incorrect result +--echo # + +CREATE TABLE t (x INT); +INSERT INTO t VALUES(1),(NULL); +CREATE FUNCTION f (val INT, ret INT) RETURNS INT DETERMINISTIC RETURN IFNULL(val, ret); + +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) + FROM t t1 LEFT JOIN t t2 + ON t1.x = t2.x + WHERE IFNULL(t2.x,0)=0; +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) + FROM t t1 LEFT JOIN t t2 + ON t1.x = t2.x + WHERE IFNULL(t2.x,0)=0; +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) + FROM t t1 LEFT JOIN t t2 + ON t1.x = t2.x + WHERE f(t2.x,0)=0; +explain extended +SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) + FROM t t1 LEFT JOIN t t2 + ON t1.x = t2.x + WHERE f(t2.x,0)=0; + +drop function f; +drop table t; +CREATE TABLE t1 ( + col1 DECIMAL(33,5) NULL DEFAULT NULL, + col2 DECIMAL(33,5) NULL DEFAULT NULL +); + +CREATE TABLE t2 ( + col1 DECIMAL(33,5) NULL DEFAULT NULL, + col2 DECIMAL(33,5) NULL DEFAULT NULL, + col3 DECIMAL(33,5) NULL DEFAULT NULL +); + +INSERT INTO t1 VALUES (2, 1.1), (2, 2.1); +INSERT INTO t2 VALUES (3, 3.1, 4), (1, 1, NULL); + +DELIMITER |; + +CREATE FUNCTION f1 ( p_num DECIMAL(45,15), p_return DECIMAL(45,15)) +RETURNS decimal(33,5) +LANGUAGE SQL +DETERMINISTIC +CONTAINS SQL +SQL SECURITY INVOKER +BEGIN + IF p_num IS NULL THEN + RETURN p_return; + ELSE + RETURN p_num; + END IF; +END | + +DELIMITER ;| + +let $q1= +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE IFNULL(t2.col3,0) = 0; + +eval $q1; +eval EXPLAIN EXTENDED $q1; + +let $q2= +SELECT t1.col1, t2.col1, t2.col3 +FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 +WHERE f1(t2.col3,0) = 0; +eval $q2; +eval EXPLAIN EXTENDED $q2; + +DROP FUNCTION f1; + +DROP TABLE t1,t2; + +--echo # end of 5.5 tests + SET optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/t/log_tables-big-master.opt b/mysql-test/t/log_tables-big-master.opt index b9bc885d0e4..0cdf12d284b 100644 --- a/mysql-test/t/log_tables-big-master.opt +++ b/mysql-test/t/log_tables-big-master.opt @@ -1 +1 @@ ---log-output=table,file --slow-query-log --general-log --general-log-file="" --slow-query-log-file="" +--slow-query-log --general-log --general-log-file="" --slow-query-log-file="" diff --git a/mysql-test/t/mdl_sync.test b/mysql-test/t/mdl_sync.test index 4aa191d3dfc..fbecd6bf547 100644 --- a/mysql-test/t/mdl_sync.test +++ b/mysql-test/t/mdl_sync.test @@ -4075,6 +4075,30 @@ disconnect con2; disconnect con3; +--echo # +--echo # MDEV-12620 - set lock_wait_timeout = 1;flush tables with read lock; +--echo # lock not released after timeout +--echo # +CREATE TABLE t1(a INT) ENGINE=InnoDB; +SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go'; +send SELECT * FROM t1; + +connect (con1,localhost,root,,); +SET debug_sync='now WAIT_FOR ready'; +# lock_wait_timeout should be 0 in 10.3, so that we don't have to wait at all +SET lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +FLUSH TABLES WITH READ LOCK; +SET debug_sync='now SIGNAL go'; + +connection default; +reap; +SET debug_sync='RESET'; +DROP TABLE t1; + +disconnect con1; + + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc diff --git a/mysql-test/t/myisam_debug.test b/mysql-test/t/myisam_debug.test index b106ecdea5e..465ecd70895 100644 --- a/mysql-test/t/myisam_debug.test +++ b/mysql-test/t/myisam_debug.test @@ -57,3 +57,16 @@ KILL QUERY @thread_id; CHECK TABLE t1; DROP TABLE t1,t2; DISCONNECT insertConn; + +# +# MDEV-12761 Error return from external_lock make the server crash +# +call mtr.add_suppression("Index for table '.*test.t1\\.MYI' is corrupt; try to repair it"); +create table t1 (a int, index(a)); +lock tables t1 write; +insert t1 values (1),(2),(1); +set @old_dbug=@@debug_dbug; +set debug_dbug='+d,mi_lock_database_failure'; +unlock tables; +set debug_dbug=@old_dbug; +drop table t1; diff --git a/mysql-test/t/mysqld--help.test b/mysql-test/t/mysqld--help.test index e6499957cd2..01f26f09543 100644 --- a/mysql-test/t/mysqld--help.test +++ b/mysql-test/t/mysqld--help.test @@ -3,6 +3,7 @@ # --source include/not_embedded.inc --source include/have_perfschema.inc +--source include/have_profiling.inc --source include/platform.inc # diff --git a/mysql-test/t/partition_alter.test b/mysql-test/t/partition_alter.test index 592d8fdaeaa..9194e9a8222 100644 --- a/mysql-test/t/partition_alter.test +++ b/mysql-test/t/partition_alter.test @@ -1,3 +1,4 @@ +--source include/have_innodb.inc --source include/have_partition.inc CREATE TABLE `test_data` ( @@ -64,3 +65,41 @@ deallocate prepare stmt; drop table test_data; +# +# MDEV-12389 ADD CHECK leaves an orphaned .par file +# + +--let $datadir=`SELECT @@datadir` + +# InnoDB +create table t1(id int, d date not null, b bool not null default 0, primary key(id,d)) +engine=innodb +partition by range columns (d) ( +partition p1 values less than ('2016-10-18'), +partition p2 values less than ('2020-10-19')); +insert t1 values (0, '2000-01-02', 0); +insert t1 values (1, '2020-01-02', 10); +--replace_regex /#sql-[0-9a-f_]*/#sql-temporary/ +--error ER_CONSTRAINT_FAILED +alter table t1 add check (b in (0, 1)); +alter table t1 add check (b in (0, 10)); +show create table t1; +--error ER_CONSTRAINT_FAILED +insert t1 values (2, '2020-01-03', 20); +drop table t1; +--list_files $datadir/test + +# MyISAM, different execution path +create table t1(id int, d date not null, b bool not null default 0, primary key(id,d)) +partition by range columns (d) ( +partition p1 values less than ('2016-10-18'), +partition p2 values less than ('2020-10-19')); +insert t1 values (0, '2000-01-02', 0); +insert t1 values (1, '2020-01-02', 10); +# FIXME: MDEV-12923 MyISAM allows CHECK constraint violation in ALTER TABLE +alter table t1 add check (b in (0, 1)); +show create table t1; +--error ER_CONSTRAINT_FAILED +insert t1 values (2, '2020-01-03', 20); +drop table t1; +--list_files $datadir/test diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test index 2451bc60fee..544bcd994ed 100644 --- a/mysql-test/t/subselect_innodb.test +++ b/mysql-test/t/subselect_innodb.test @@ -576,3 +576,38 @@ from t1; drop table t1,t2; + +--echo # +--echo # mdev-12931: semi-join in ON expression of STRAIGHT_JOIN +--echo # joining a base table and a mergeable derived table +--echo # + +CREATE TABLE t1 (f1 int) ENGINE=InnoDB; +INSERT INTO t1 VALUES (3),(2); + +CREATE TABLE t2 (f2 int) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1),(4); + +CREATE TABLE t3 (f3 int) ENGINE=InnoDB; +INSERT INTO t3 VALUES (5),(6); + +CREATE TABLE t4 (f4 int) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1),(8); + +SELECT * +FROM t1 + INNER JOIN + ( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq + ON ( 1 IN ( SELECT f4 FROM t4 ) ) ) + ON ( f1 >= f2 ); + +EXPLAIN EXTENDED +SELECT * +FROM t1 + INNER JOIN + ( t2 STRAIGHT_JOIN ( SELECT * FROM t3 ) AS sq + ON ( 1 IN ( SELECT f4 FROM t4 ) ) ) + ON ( f1 >= f2 ); + +DROP TABLE t1,t2,t3,t4; + diff --git a/mysql-test/t/subselect_mat_cost_bugs.test b/mysql-test/t/subselect_mat_cost_bugs.test index 316ac707bef..9e3ac603ec6 100644 --- a/mysql-test/t/subselect_mat_cost_bugs.test +++ b/mysql-test/t/subselect_mat_cost_bugs.test @@ -406,6 +406,8 @@ drop table t3, t4, t5; --echo # LP BUG#858038 The result of a query with NOT IN subquery depends on the state of the optimizer switch --echo # +set @optimizer_switch_save= @@optimizer_switch; + create table t1 (c1 char(2) not null, c2 char(2)); create table t2 (c3 char(2), c4 char(2)); @@ -425,6 +427,8 @@ select * from t1 where c1 = 'a2' and (c1, c2) not in (select * from t2); drop table t1, t2; +set optimizer_switch= @optimizer_switch_save; + --echo # --echo # MDEV-12673: cost-based choice between materialization and in-to-exists --echo # @@ -463,3 +467,43 @@ SELECT * FROM t1 WHERE i1 NOT IN ( ); DROP TABLE t1,t2,t3; + +--echo # +--echo # MDEV-7599: in-to-exists chosen after min/max optimization +--echo # + +set @optimizer_switch_save= @@optimizer_switch; + +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); + +CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (1,6),(2,4), (8,9); + +let $q= +SELECT * FROM t2 WHERE b != ALL (SELECT MIN(a) FROM t1, t2 WHERE t2.c = t2.b); + +eval $q; +eval EXPLAIN EXTENDED $q; +set optimizer_switch= 'materialization=off'; +eval $q; +eval EXPLAIN EXTENDED $q; +set optimizer_switch= @optimizer_switch_save; + +DROP TABLE t1,t2; + +CREATE TABLE t1 (f1 varchar(10)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); + +CREATE TABLE t2 (f2 varchar(10), key(f2)) ENGINE=MyISAM; +INSERT INTO t2 VALUES ('baz'),('qux'); + +CREATE TABLE t3 (f3 varchar(10)) ENGINE=MyISAM; +INSERT INTO t3 VALUES ('abc'),('def'); + +SELECT * FROM t1 + WHERE f1 = ALL( SELECT MAX(t2a.f2) + FROM t2 AS t2a INNER JOIN t2 t2b INNER JOIN t3 + ON (f3 = t2b.f2) ); + +DROP TABLE t1,t2,t3; diff --git a/mysql-test/t/subselect_sj.test b/mysql-test/t/subselect_sj.test index b6d6e0a5172..f90f1e2e927 100644 --- a/mysql-test/t/subselect_sj.test +++ b/mysql-test/t/subselect_sj.test @@ -2773,5 +2773,77 @@ WHERE ( SELECT z.country drop table t1, t2, t3; set optimizer_switch= @tmp_mdev6859; +--echo # +--echo # MDEV-12675: subquery subject to semi-join optimizations +--echo # in ON expression of INNER JOIN +--echo # + +set @tmp_mdev12675=@@optimizer_switch; +set optimizer_switch=default; +create table t1 (a int) engine=myisam; +insert into t1 values (5),(3),(2),(7),(2),(5),(1); +create table t2 (b int, index idx(b)) engine=myisam; +insert into t2 values (2),(3),(2),(1),(3),(4); +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +insert into t2 select b+10 from t2; +analyze table t1,t2; + +explain +select a from t1, t2 where b between 1 and 2 and a in (select b from t2); +explain +select a from t1 join t2 on b between 1 and 2 and a in (select b from t2); + +drop table t1,t2; +set optimizer_switch= @tmp_mdev12675; + +--echo # +--echo # MDEV-12817: subquery NOT subject to semi-join optimizations +--echo # in ON expression of INNER JOIN +--echo # + +CREATE TABLE t1 (c1 int) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); + +CREATE TABLE t2 (c2 int) ENGINE=MyISAM; +INSERT INTO t2 VALUES (3),(4); + +CREATE TABLE t3 (c3 int) ENGINE=MyISAM; +INSERT INTO t3 VALUES (5),(6); + +CREATE TABLE t4 (c4 int) ENGINE=MyISAM; +INSERT INTO t4 VALUES (7),(8); + +let $q1= +SELECT c1 +FROM t1 +LEFT JOIN +( t2 INNER JOIN t3 ON ( 1 IN ( SELECT c4 FROM t4 ) ) ) +ON (c1 = c3); + +eval $q1; +eval EXPLAIN EXTENDED $q1; + +let $q2= +SELECT * +FROM t1 +LEFT JOIN +( ( SELECT * FROM t2 WHERE c2 IN ( SELECT c3 FROM t3 ) ) AS sq INNER JOIN t4 ) +ON (c1 = c2); + +--echo # mdev-12820 +eval $q2; +eval EXPLAIN EXTENDED $q2; + +DROP TABLE t1,t2,t3,t4; + # The following command must be the last one the file set optimizer_switch=@subselect_sj_tmp; diff --git a/mysql-test/t/subselect_sj2_mat.test b/mysql-test/t/subselect_sj2_mat.test index 61d9b09edff..0f2892ae2dc 100644 --- a/mysql-test/t/subselect_sj2_mat.test +++ b/mysql-test/t/subselect_sj2_mat.test @@ -263,3 +263,23 @@ DROP TABLE t1,t2,t3; set join_cache_level= @save_join_cache_level; set optimizer_switch=@save_optimizer_switch; +--echo # +--echo # mdev-7791: materialization of a semi-join subquery + +--echo # RAND() in WHERE +--echo # (materialized table is accessed last) +--echo # + +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='materialization=on'; + +create table t1(i int); +insert into t1 values (1), (2), (3), (7), (9), (10); +create table t2(i int); +insert into t2 values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); + +select * from t1 where (rand() < 0) and i in (select i from t2); +explain extended +select * from t1 where (rand() < 0) and i in (select i from t2); + +drop table t1,t2; +set optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index d6eca47c0d8..aeab884670d 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -2677,6 +2677,30 @@ select trigger_name, action_order, created from information_schema.triggers drop table t1; set time_zone= @@global.time_zone; +--echo # MDEV-12992: Increasing memory consumption +--echo with each invocation of trigger +--echo # + +--let $n= 20000 + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (b INT); +CREATE TRIGGER tr + AFTER UPDATE ON t1 FOR EACH ROW SELECT (SELECT b FROM t2) INTO @x; + +--disable_query_log +--echo # Running $n queries +while ($n) +{ + UPDATE t1 SET a = 2; + --dec $n +} +--enable_query_log + +DROP TABLE t1,t2; + + --echo # --echo # Start of 10.3 tests --echo # diff --git a/mysql-test/t/truncate.test b/mysql-test/t/truncate.test index c7a066cc203..8895677b79f 100644 --- a/mysql-test/t/truncate.test +++ b/mysql-test/t/truncate.test @@ -56,7 +56,7 @@ drop table t1; # Test for Bug#5507 "TRUNCATE should work with views" # # when it'll be fixed, the error should become 1347 -# (test.v1' is not BASE TABLE) +# (test.v1' is not of type 'BASE TABLE') # create table t1 (s1 int); diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index ce8b2bc9c2a..abea52fe584 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -1524,6 +1524,31 @@ SELECT * FROM t1 t1_1 LEFT JOIN t1 t1_2 ON ( t1_2.b = t1_1.a ) DROP TABLE t1; +--echo # Bug mdev-12788: UNION ALL + impossible having for derived +--echo # with IN subquery in WHERE +--echo # + +CREATE TABLE t1 (i int) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1); + +CREATE TABLE t2 (pk int PRIMARY KEY) ENGINE=MyISAM; +INSERT INTO t2 VALUES (1),(2); + +let $q= +SELECT 1, 2 +UNION ALL +SELECT i, COUNT(*) FROM ( + SELECT * FROM t1 WHERE i IN ( SELECT pk FROM t2 ) +) AS sq +GROUP BY i +HAVING i = 10; + +eval $q; +eval EXPLAIN EXTENDED $q; + +DROP TABLE t1,t2; + + --echo # --echo # Start of 10.3 tests --echo # diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c index 6c813333d09..e2073df1e4d 100644 --- a/mysys/lf_alloc-pin.c +++ b/mysys/lf_alloc-pin.c @@ -323,12 +323,6 @@ static int match_pins(LF_PINS *el, void *addr) return 0; } -#if STACK_DIRECTION < 0 -#define available_stack_size(CUR,END) (long) ((char*)(CUR) - (char*)(END)) -#else -#define available_stack_size(CUR,END) (long) ((char*)(END) - (char*)(CUR)) -#endif - #define next_node(P, X) (*((uchar * volatile *)(((uchar *)(X)) + (P)->free_ptr_offset))) #define anext_node(X) next_node(&allocator->pinbox, (X)) diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c index 41174a66ced..430f1007f30 100644 --- a/mysys/lf_hash.c +++ b/mysys/lf_hash.c @@ -550,7 +550,10 @@ static int initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node, return -1; if (*el == NULL && bucket && unlikely(initialize_bucket(hash, el, parent, pins))) + { + my_free(dummy); return -1; + } dummy->hashnr= my_reverse_bits(bucket) | 0; /* dummy node */ dummy->key= dummy_key; dummy->keylen= 0; diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c index 155a4367345..125b3a4632d 100644 --- a/mysys/ma_dyncol.c +++ b/mysys/ma_dyncol.c @@ -4038,6 +4038,8 @@ mariadb_dyncol_val_double(double *dbl, DYNAMIC_COLUMN_VALUE *val) *dbl= strtod(str, &end); if (*end != '\0') rc= ER_DYNCOL_TRUNCATED; + free(str); + break; } case DYN_COL_DECIMAL: if (decimal2double(&val->x.decimal.value, dbl) != E_DEC_OK) diff --git a/mysys/waiting_threads.c b/mysys/waiting_threads.c index 7d8aae032ea..2549bd8a587 100644 --- a/mysys/waiting_threads.c +++ b/mysys/waiting_threads.c @@ -556,7 +556,7 @@ my_bool wt_resource_id_memcmp(const void *a, const void *b) { /* we use the fact that there's no padding in the middle of WT_RESOURCE_ID */ compile_time_assert(offsetof(WT_RESOURCE_ID, type) == sizeof(ulonglong)); - return memcmp(a, b, sizeof_WT_RESOURCE_ID); + return MY_TEST(memcmp(a, b, sizeof_WT_RESOURCE_ID)); } /** diff --git a/plugin/auth_pam/auth_pam.c b/plugin/auth_pam/auth_pam.c index 1f25163b371..ffc3d6f5537 100644 --- a/plugin/auth_pam/auth_pam.c +++ b/plugin/auth_pam/auth_pam.c @@ -162,10 +162,11 @@ static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) if (new_username && strcmp(new_username, info->user_name)) strncpy(info->authenticated_as, new_username, sizeof(info->authenticated_as)); + info->authenticated_as[sizeof(info->authenticated_as)-1]= 0; end: pam_end(pamh, status); - PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, new_username)); + PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, info->authenticated_as)); return status == PAM_SUCCESS ? CR_OK : CR_ERROR; } diff --git a/plugin/aws_key_management/aws_key_management_plugin.cc b/plugin/aws_key_management/aws_key_management_plugin.cc index d7a948369f5..365c5caf198 100644 --- a/plugin/aws_key_management/aws_key_management_plugin.cc +++ b/plugin/aws_key_management/aws_key_management_plugin.cc @@ -621,6 +621,6 @@ maria_declare_plugin(aws_key_management) NULL, settings, "1.0", - MariaDB_PLUGIN_MATURITY_BETA + MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/plugin/cracklib_password_check/cracklib_password_check.c b/plugin/cracklib_password_check/cracklib_password_check.c index 7861d5fd83e..94587a6d659 100644 --- a/plugin/cracklib_password_check/cracklib_password_check.c +++ b/plugin/cracklib_password_check/cracklib_password_check.c @@ -30,6 +30,7 @@ static int crackme(MYSQL_LEX_STRING *username, MYSQL_LEX_STRING *password) const char *res; memcpy(user, username->str, username->length); + user[username->length]= 0; if ((host= strchr(user, '@'))) *host++= 0; diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc index 66f47e7302a..4742d5f4920 100644 --- a/plugin/feedback/sender_thread.cc +++ b/plugin/feedback/sender_thread.cc @@ -204,7 +204,7 @@ static void send_report(const char *when) /* otherwise, prepare the THD and TABLE_LIST, create and fill the temporary table with data just like - SELECT * FROM INFORMATION_SCHEMA.feedback is doing, + SELECT * FROM INFORMATION_SCHEMA.FEEDBACK is doing, read and concatenate table data into a String. */ if (!(thd= new THD(thd_thread_id))) diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index c0ec9aa6b8d..ee00c9e1385 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -1041,6 +1041,7 @@ static int start_logging() error_header(); fprintf(stderr, "logging started to the file %s.\n", alt_fname); strncpy(current_log_buf, alt_fname, sizeof(current_log_buf)); + current_log_buf[sizeof(current_log_buf)-1]= 0; } else if (output_type == OUTPUT_SYSLOG) { @@ -2570,6 +2571,7 @@ static void update_file_path(MYSQL_THD thd, } strncpy(path_buffer, new_name, sizeof(path_buffer)); + path_buffer[sizeof(path_buffer)-1]= 0; file_path= path_buffer; exit_func: internal_stop_logging= 0; @@ -2622,6 +2624,7 @@ static void update_incl_users(MYSQL_THD thd, flogger_mutex_lock(&lock_operations); mark_always_logged(thd); strncpy(incl_user_buffer, new_users, sizeof(incl_user_buffer)); + incl_user_buffer[sizeof(incl_user_buffer)-1]= 0; incl_users= incl_user_buffer; user_coll_fill(&incl_user_coll, incl_users, &excl_user_coll, 1); error_header(); @@ -2640,6 +2643,7 @@ static void update_excl_users(MYSQL_THD thd __attribute__((unused)), flogger_mutex_lock(&lock_operations); mark_always_logged(thd); strncpy(excl_user_buffer, new_users, sizeof(excl_user_buffer)); + excl_user_buffer[sizeof(excl_user_buffer)-1]= 0; excl_users= excl_user_buffer; user_coll_fill(&excl_user_coll, excl_users, &incl_user_coll, 0); error_header(); @@ -2771,6 +2775,7 @@ static void update_syslog_ident(MYSQL_THD thd __attribute__((unused)), { char *new_ident= (*(char **) save) ? *(char **) save : empty_str; strncpy(syslog_ident_buffer, new_ident, sizeof(syslog_ident_buffer)); + syslog_ident_buffer[sizeof(syslog_ident_buffer)-1]= 0; syslog_ident= syslog_ident_buffer; error_header(); fprintf(stderr, "SYSYLOG ident was changed to '%s'\n", syslog_ident); diff --git a/scripts/galera_new_cluster.sh b/scripts/galera_new_cluster.sh index b873192cf31..8bf2fa35cec 100755 --- a/scripts/galera_new_cluster.sh +++ b/scripts/galera_new_cluster.sh @@ -5,7 +5,7 @@ # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. -if [ "${1}" == "-h" -o "${1}" == "--help" ]; then +if [ "${1}" = "-h" ] || [ "${1}" = "--help" ]; then cat <<EOF Usage: ${0} diff --git a/sql-common/client_plugin.c b/sql-common/client_plugin.c index dd87b01d932..f93e50125c5 100644 --- a/sql-common/client_plugin.c +++ b/sql-common/client_plugin.c @@ -375,8 +375,7 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type, if (!(sym= dlsym(dlhandle, plugin_declarations_sym))) { errmsg= "not a plugin"; - (void)dlclose(dlhandle); - goto err; + goto errc; } plugin= (struct st_mysql_client_plugin*)sym; @@ -384,19 +383,19 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type, if (type >=0 && type != plugin->type) { errmsg= "type mismatch"; - goto err; + goto errc; } if (strcmp(name, plugin->name)) { errmsg= "name mismatch"; - goto err; + goto errc; } if (type < 0 && find_plugin(name, plugin->type)) { errmsg= "it is already loaded"; - goto err; + goto errc; } plugin= add_plugin(mysql, plugin, dlhandle, argc, args); @@ -406,6 +405,8 @@ mysql_load_plugin_v(MYSQL *mysql, const char *name, int type, DBUG_PRINT ("leave", ("plugin loaded ok")); DBUG_RETURN (plugin); +errc: + dlclose(dlhandle); err: mysql_mutex_unlock(&LOCK_load_client_plugin); DBUG_PRINT ("leave", ("plugin load error : %s", errmsg)); diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 87e41817857..88a4e40e373 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -62,15 +62,22 @@ SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES} # Gen_lex_token # Make sure sql_yacc.h is generated before compiling gen_lex_token + +IF(NOT CMAKE_GENERATOR MATCHES "Visual Studio") + SET(DEPENDS_gen_lex_token DEPENDS gen_lex_token) + SET(DEPENDS_gen_lex_hash DEPENDS gen_lex_hash) +ENDIF() + + IF(NOT CMAKE_CROSSCOMPILING) - ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc) - ADD_DEPENDENCIES(gen_lex_token GenServerSource) + ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc + ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.h) ENDIF() ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h COMMAND gen_lex_token > lex_token.h - DEPENDS gen_lex_token + ${DEPENDS_gen_lex_token} ) ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER) @@ -341,7 +348,7 @@ ENDIF() ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h COMMAND gen_lex_hash > lex_hash.h - DEPENDS gen_lex_hash + ${DEPENDS_gen_lex_hash} ) MYSQL_ADD_EXECUTABLE(mysql_tzinfo_to_sql tztime.cc COMPONENT Server) diff --git a/sql/discover.cc b/sql/discover.cc index d8ed718fc58..d8bf6ca79c5 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -89,8 +89,7 @@ int readfrm(const char *name, const uchar **frmdata, size_t *len) error= 0; err: - if (file > 0) - (void) mysql_file_close(file, MYF(MY_WME)); + (void) mysql_file_close(file, MYF(MY_WME)); err_end: /* Here when no file */ DBUG_RETURN (error); diff --git a/sql/events.cc b/sql/events.cc index 978a1ebc710..86e85d7f757 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -1,5 +1,6 @@ /* Copyright (c) 2005, 2013, Oracle and/or its affiliates. + Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -243,6 +244,7 @@ common_1_lev_code: break; case INTERVAL_WEEK: expr/= 7; + /* fall through */ default: close_quote= FALSE; break; diff --git a/sql/filesort.cc b/sql/filesort.cc index 007216b392f..8ca6cd04cda 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -182,12 +182,6 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, outfile= &sort->io_cache; - /* - Release InnoDB's adaptive hash index latch (if holding) before - running a sort. - */ - ha_release_temporary_latches(thd); - my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; @@ -953,6 +947,7 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count, /* check we won't have more buffpeks than we can possibly keep in memory */ if (my_b_tell(buffpek_pointers) + sizeof(BUFFPEK) > (ulonglong)UINT_MAX) goto err; + bzero(&buffpek, sizeof(buffpek)); buffpek.file_pos= my_b_tell(tempfile); if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; /* purecov: inspected */ diff --git a/sql/ha_sequence.cc b/sql/ha_sequence.cc index e0e9e2a42e7..7f2248bf3cb 100644 --- a/sql/ha_sequence.cc +++ b/sql/ha_sequence.cc @@ -198,6 +198,18 @@ int ha_sequence::write_row(uchar *buf) /* This calls is from ha_open() as part of create table */ DBUG_RETURN(file->write_row(buf)); } + if (unlikely(sequence->initialized == SEQUENCE::SEQ_IN_ALTER)) + { + int error= 0; + /* This is called from alter table */ + tmp_seq.read_fields(table); + if (tmp_seq.check_and_adjust(0)) + DBUG_RETURN(HA_ERR_SEQUENCE_INVALID_DATA); + sequence->copy(&tmp_seq); + if (!(error= file->write_row(buf))) + sequence->initialized= SEQUENCE::SEQ_READY_TO_USE; + DBUG_RETURN(error); + } if (unlikely(sequence->initialized != SEQUENCE::SEQ_READY_TO_USE)) DBUG_RETURN(HA_ERR_WRONG_COMMAND); @@ -280,7 +292,7 @@ int ha_sequence::extra(enum ha_extra_function operation) if (operation == HA_EXTRA_PREPARE_FOR_ALTER_TABLE) { /* In case of ALTER TABLE allow ::write_row() to copy rows */ - sequence->initialized= SEQUENCE::SEQ_IN_PREPARE; + sequence->initialized= SEQUENCE::SEQ_IN_ALTER; } return file->extra(operation); } diff --git a/sql/handler.cc b/sql/handler.cc index 9b66801b109..c74675ed113 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2020,44 +2020,6 @@ commit_checkpoint_notify_ha(handlerton *hton, void *cookie) /** - @details - This function should be called when MySQL sends rows of a SELECT result set - or the EOF mark to the client. It releases a possible adaptive hash index - S-latch held by thd in InnoDB and also releases a possible InnoDB query - FIFO ticket to enter InnoDB. To save CPU time, InnoDB allows a thd to - keep them over several calls of the InnoDB handler interface when a join - is executed. But when we let the control to pass to the client they have - to be released because if the application program uses mysql_use_result(), - it may deadlock on the S-latch if the application on another connection - performs another SQL query. In MySQL-4.1 this is even more important because - there a connection can have several SELECT queries open at the same time. - - @param thd the thread handle of the current connection - - @return - always 0 -*/ - -int ha_release_temporary_latches(THD *thd) -{ - Ha_trx_info *info; - - /* - Note that below we assume that only transactional storage engines - may need release_temporary_latches(). If this will ever become false, - we could iterate on thd->open_tables instead (and remove duplicates - as if (!seen[hton->slot]) { seen[hton->slot]=1; ... }). - */ - for (info= thd->transaction.stmt.ha_list; info; info= info->next()) - { - handlerton *hton= info->ht(); - if (hton && hton->release_temporary_latches) - hton->release_temporary_latches(hton, thd); - } - return 0; -} - -/** Check if all storage engines used in transaction agree that after rollback to savepoint it is safe to release MDL locks acquired after savepoint creation. @@ -5685,6 +5647,20 @@ bool handler::check_table_binlog_row_based_internal(bool binlog_row) table->file->partition_ht()->db_type != DB_TYPE_INNODB) || (thd->wsrep_ignore_table == true))) return 0; + + /* enforce wsrep_max_ws_rows */ + if (WSREP(thd) && table->s->tmp_table == NO_TMP_TABLE) + { + thd->wsrep_affected_rows++; + if (wsrep_max_ws_rows && + thd->wsrep_exec_mode != REPL_RECV && + thd->wsrep_affected_rows > wsrep_max_ws_rows) + { + trans_rollback_stmt(thd) || trans_rollback(thd); + my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); + return ER_ERROR_DURING_COMMIT; + } + } #endif return (table->s->cached_row_logging_check && @@ -5893,7 +5869,7 @@ int handler::ha_external_lock(THD *thd, int lock_type) DBUG_EXECUTE_IF("external_lock_failure", error= HA_ERR_GENERIC;); - if (error == 0) + if (error == 0 || lock_type == F_UNLCK) { m_lock_type= lock_type; cached_table_flags= table_flags(); diff --git a/sql/handler.h b/sql/handler.h index 98269118bea..52e7f276db0 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1235,7 +1235,6 @@ struct handlerton enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name); - int (*release_temporary_latches)(handlerton *hton, THD *thd); /* Get log status. @@ -4362,9 +4361,6 @@ int ha_change_key_cache_param(KEY_CACHE *key_cache); int ha_repartition_key_cache(KEY_CACHE *key_cache); int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache); -/* report to InnoDB that control passes to the client */ -int ha_release_temporary_latches(THD *thd); - /* transactions: interface to handlerton functions */ int ha_start_consistent_snapshot(THD *thd); int ha_commit_or_rollback_by_xid(XID *xid, bool commit); diff --git a/sql/item.cc b/sql/item.cc index f4236eee013..173444ad580 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2010, 2016, MariaDB + Copyright (c) 2010, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3635,7 +3635,7 @@ Item_param::Item_param(THD *thd, const LEX_CSTRING *name_arg, state(NO_VALUE), /* Don't pretend to be a literal unless value for this item is set. */ item_type(PARAM_ITEM), - indicators(0), indicator(STMT_INDICATOR_NONE), + indicator(STMT_INDICATOR_NONE), set_param_func(default_set_param_func), m_out_param_info(NULL), /* @@ -5763,7 +5763,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) SELECT_LEX *select= thd->lex->current_select; thd->change_item_tree(reference, - select->parsing_place == IN_GROUP_BY && + select->context_analysis_place == IN_GROUP_BY && alias_name_used ? *rf->ref : rf); /* diff --git a/sql/item.h b/sql/item.h index 76ce4aa935f..bc349e8003a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -3066,10 +3066,8 @@ public: }; /* - Used for bulk protocol. Indicates if we should expect - indicators byte before value of the parameter + Used for bulk protocol only. */ - my_bool indicators; enum enum_indicator_type indicator; /* diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 07b5f90bf69..49135e846cd 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -5264,6 +5264,15 @@ int Regexp_processor_pcre::default_regex_flags() return default_regex_flags_pcre(current_thd); } +void Regexp_processor_pcre::set_recursion_limit(THD *thd) +{ + long stack_used; + DBUG_ASSERT(thd == current_thd); + stack_used= available_stack_size(thd->thread_stack, &stack_used); + m_pcre_extra.match_limit_recursion= + (my_thread_stack_size - stack_used)/my_pcre_frame_size; +} + /** Convert string to lib_charset, if needed. @@ -5355,15 +5364,77 @@ void Regexp_processor_pcre::pcre_exec_warn(int rc) const */ switch (rc) { + case PCRE_ERROR_NULL: + errmsg= "pcre_exec: null arguement passed"; + break; + case PCRE_ERROR_BADOPTION: + errmsg= "pcre_exec: bad option"; + break; + case PCRE_ERROR_BADMAGIC: + errmsg= "pcre_exec: bad magic - not a compiled regex"; + break; + case PCRE_ERROR_UNKNOWN_OPCODE: + errmsg= "pcre_exec: error in compiled regex"; + break; case PCRE_ERROR_NOMEMORY: errmsg= "pcre_exec: Out of memory"; break; + case PCRE_ERROR_NOSUBSTRING: + errmsg= "pcre_exec: no substring"; + break; + case PCRE_ERROR_MATCHLIMIT: + errmsg= "pcre_exec: match limit exceeded"; + break; + case PCRE_ERROR_CALLOUT: + errmsg= "pcre_exec: callout error"; + break; case PCRE_ERROR_BADUTF8: errmsg= "pcre_exec: Invalid utf8 byte sequence in the subject string"; break; + case PCRE_ERROR_BADUTF8_OFFSET: + errmsg= "pcre_exec: Started at invalid location within utf8 byte sequence"; + break; + case PCRE_ERROR_PARTIAL: + errmsg= "pcre_exec: partial match"; + break; + case PCRE_ERROR_INTERNAL: + errmsg= "pcre_exec: internal error"; + break; + case PCRE_ERROR_BADCOUNT: + errmsg= "pcre_exec: ovesize is negative"; + break; + case PCRE_ERROR_RECURSIONLIMIT: + my_snprintf(buf, sizeof(buf), "pcre_exec: recursion limit of %ld exceeded", + m_pcre_extra.match_limit_recursion); + errmsg= buf; + break; + case PCRE_ERROR_BADNEWLINE: + errmsg= "pcre_exec: bad newline options"; + break; + case PCRE_ERROR_BADOFFSET: + errmsg= "pcre_exec: start offset negative or greater than string length"; + break; + case PCRE_ERROR_SHORTUTF8: + errmsg= "pcre_exec: ended in middle of utf8 sequence"; + break; + case PCRE_ERROR_JIT_STACKLIMIT: + errmsg= "pcre_exec: insufficient stack memory for JIT compile"; + break; case PCRE_ERROR_RECURSELOOP: errmsg= "pcre_exec: Recursion loop detected"; break; + case PCRE_ERROR_BADMODE: + errmsg= "pcre_exec: compiled pattern passed to wrong bit library function"; + break; + case PCRE_ERROR_BADENDIANNESS: + errmsg= "pcre_exec: compiled pattern passed to wrong endianness processor"; + break; + case PCRE_ERROR_JIT_BADOPTION: + errmsg= "pcre_exec: bad jit option"; + break; + case PCRE_ERROR_BADLENGTH: + errmsg= "pcre_exec: negative length"; + break; default: /* As other error codes should normally not happen, @@ -5399,8 +5470,8 @@ int Regexp_processor_pcre::pcre_exec_with_warn(const pcre *code, bool Regexp_processor_pcre::exec(const char *str, int length, int offset) { - m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, NULL, str, length, offset, 0, - m_SubStrVec, m_subpatterns_needed * 3); + m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, &m_pcre_extra, str, length, offset, 0, + m_SubStrVec, array_elements(m_SubStrVec)); return false; } @@ -5410,10 +5481,10 @@ bool Regexp_processor_pcre::exec(String *str, int offset, { if (!(str= convert_if_needed(str, &subject_converter))) return true; - m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, NULL, + m_pcre_exec_rc= pcre_exec_with_warn(m_pcre, &m_pcre_extra, str->c_ptr_safe(), str->length(), offset, 0, - m_SubStrVec, m_subpatterns_needed * 3); + m_SubStrVec, array_elements(m_SubStrVec)); if (m_pcre_exec_rc > 0) { uint i; @@ -5471,7 +5542,7 @@ Item_func_regex::fix_length_and_dec() if (agg_arg_charsets_for_comparison(cmp_collation, args, 2)) return; - re.init(cmp_collation.collation, 0, 0); + re.init(cmp_collation.collation, 0); re.fix_owner(this, args[0], args[1]); } @@ -5495,7 +5566,7 @@ Item_func_regexp_instr::fix_length_and_dec() if (agg_arg_charsets_for_comparison(cmp_collation, args, 2)) return; - re.init(cmp_collation.collation, 0, 1); + re.init(cmp_collation.collation, 0); re.fix_owner(this, args[0], args[1]); max_length= MY_INT32_NUM_DECIMAL_DIGITS; // See also Item_func_locate } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index e21e074a7a3..f19cae72cb5 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -2554,6 +2554,7 @@ public: class Regexp_processor_pcre { pcre *m_pcre; + pcre_extra m_pcre_extra; bool m_conversion_is_needed; bool m_is_const; int m_library_flags; @@ -2562,7 +2563,6 @@ class Regexp_processor_pcre String m_prev_pattern; int m_pcre_exec_rc; int m_SubStrVec[30]; - uint m_subpatterns_needed; void pcre_exec_warn(int rc) const; int pcre_exec_with_warn(const pcre *code, const pcre_extra *extra, const char *subject, int length, int startoffset, @@ -2576,11 +2576,14 @@ public: m_pcre(NULL), m_conversion_is_needed(true), m_is_const(0), m_library_flags(0), m_data_charset(&my_charset_utf8_general_ci), - m_library_charset(&my_charset_utf8_general_ci), - m_subpatterns_needed(0) - {} + m_library_charset(&my_charset_utf8_general_ci) + { + m_pcre_extra.flags= PCRE_EXTRA_MATCH_LIMIT_RECURSION; + m_pcre_extra.match_limit_recursion= 100L; + } int default_regex_flags(); - void init(CHARSET_INFO *data_charset, int extra_flags, uint nsubpatterns_arg) + void set_recursion_limit(THD *); + void init(CHARSET_INFO *data_charset, int extra_flags) { m_library_flags= default_regex_flags() | extra_flags | (data_charset != &my_charset_bin ? @@ -2594,7 +2597,6 @@ public: m_conversion_is_needed= (data_charset != &my_charset_bin) && !my_charset_same(data_charset, m_library_charset); - m_subpatterns_needed= nsubpatterns_arg; } void fix_owner(Item_func *owner, Item *subject_arg, Item *pattern_arg); bool compile(String *pattern, bool send_error); diff --git a/sql/item_func.cc b/sql/item_func.cc index 63a3937a12e..6fe6ddecc10 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2009, 2015, MariaDB + Copyright (c) 2009, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1730,6 +1730,7 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value) return decimal_value; case E_DEC_DIV_ZERO: signal_divide_by_null(); + /* fall through */ default: null_value= 1; return 0; @@ -3649,6 +3650,7 @@ longlong Item_master_gtid_wait::val_int() { DBUG_ASSERT(fixed == 1); longlong result= 0; + String *gtid_pos = args[0]->val_str(&value); if (args[0]->null_value) { @@ -3660,7 +3662,6 @@ longlong Item_master_gtid_wait::val_int() #ifdef HAVE_REPLICATION THD* thd= current_thd; longlong timeout_us; - String *gtid_pos = args[0]->val_str(&value); if (arg_count==2 && !args[1]->null_value) timeout_us= (longlong)(1e6*args[1]->val_real()); diff --git a/sql/item_func.h b/sql/item_func.h index 077f69fe0f5..03eb01bbcc9 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1622,7 +1622,11 @@ public: longlong val_int(); const char *func_name() const { return "coercibility"; } void fix_length_and_dec() { max_length=10; maybe_null= 0; } - table_map not_null_tables() const { return 0; } + bool eval_not_null_tables(void *) + { + not_null_tables_cache= 0; + return false; + } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) { return this; } bool const_item() const { return true; } @@ -1953,7 +1957,11 @@ public: } } void cleanup(); - table_map not_null_tables() const { return 0; } + bool eval_not_null_tables(void *opt_arg) + { + not_null_tables_cache= 0; + return 0; + } bool is_expensive() { return 1; } virtual void print(String *str, enum_query_type query_type); bool check_vcol_func_processor(void *arg) @@ -2201,7 +2209,7 @@ public: :Item_long_func(thd, a, b) {} longlong val_int(); const char *func_name() const { return "master_gtid_wait"; } - void fix_length_and_dec() { max_length= 2; maybe_null=0;} + void fix_length_and_dec() { max_length=2; } bool check_vcol_func_processor(void *arg) { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); @@ -2482,7 +2490,11 @@ public: bool is_expensive_processor(void *arg) { return TRUE; } enum Functype functype() const { return FT_FUNC; } const char *func_name() const { return "match"; } - table_map not_null_tables() const { return 0; } + bool eval_not_null_tables(void *opt_arg) + { + not_null_tables_cache= 0; + return 0; + } bool fix_fields(THD *thd, Item **ref); bool eq(const Item *, bool binary_cmp) const; /* The following should be safe, even if we compare doubles */ @@ -2769,6 +2781,11 @@ public: clone->sp_result_field= NULL; return clone; } + bool eval_not_null_tables(void *opt_arg) + { + not_null_tables_cache= 0; + return 0; + } }; @@ -2863,8 +2880,12 @@ public: my_decimal *val_decimal(my_decimal *); void fix_length_and_dec(); const char *func_name() const { return "last_value"; } - table_map not_null_tables() const { return 0; } const Type_handler *type_handler() const { return last_value->type_handler(); } + bool eval_not_null_tables(void *) + { + not_null_tables_cache= 0; + return 0; + } bool const_item() const { return 0; } void evaluate_sideeffects(); void update_used_tables() diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 4fcff4c0d2e..fb7a60f356e 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -225,7 +225,7 @@ String *Item_func_sha2::val_str_ascii(String *str) break; case 0: // SHA-256 is the default digest_length= 256; - /* fall trough */ + /* fall through */ case 256: my_sha256(digest_buf, input_ptr, input_len); break; @@ -271,7 +271,7 @@ void Item_func_sha2::fix_length_and_dec() switch (sha_variant) { case 0: // SHA-256 is the default sha_variant= 256; - /* fall trough */ + /* fall through */ case 512: case 384: case 256: @@ -1349,7 +1349,7 @@ void Item_func_regexp_replace::fix_length_and_dec() if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 3)) return; max_length= MAX_BLOB_WIDTH; - re.init(collation.collation, 0, 10); + re.init(collation.collation, 0); re.fix_owner(this, args[0], args[1]); } @@ -1484,7 +1484,7 @@ void Item_func_regexp_substr::fix_length_and_dec() if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 2)) return; fix_char_length(args[0]->max_char_length()); - re.init(collation.collation, 0, 10); + re.init(collation.collation, 0); re.fix_owner(this, args[0], args[1]); } @@ -5163,7 +5163,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) goto null; case DYN_COL_INT: signed_value= 1; // For error message - /* fall_trough */ + /* fall through */ case DYN_COL_UINT: if (signed_value || val.x.ulong_value <= LONGLONG_MAX) { @@ -5177,7 +5177,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) } /* let double_to_datetime_with_warn() issue the warning message */ val.x.double_value= static_cast<double>(ULONGLONG_MAX); - /* fall_trough */ + /* fall through */ case DYN_COL_DOUBLE: if (double_to_datetime_with_warn(val.x.double_value, ltime, fuzzy_date, 0 /* TODO */)) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 09fbf3e239c..dd362cbc15e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1425,8 +1425,9 @@ Item_in_subselect::Item_in_subselect(THD *thd, Item * left_exp, st_select_lex *select_lex): Item_exists_subselect(thd), left_expr_cache(0), first_execution(TRUE), in_strategy(SUBS_NOT_TRANSFORMED), - pushed_cond_guards(NULL), is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE), - is_flattenable_semijoin(FALSE), is_registered_semijoin(FALSE), + pushed_cond_guards(NULL), do_not_convert_to_sj(FALSE), is_jtbm_merged(FALSE), + is_jtbm_const_tab(FALSE), is_flattenable_semijoin(FALSE), + is_registered_semijoin(FALSE), upper_item(0) { DBUG_ENTER("Item_in_subselect::Item_in_subselect"); @@ -2587,6 +2588,27 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg) DBUG_ENTER("Item_in_subselect::inject_in_to_exists_cond"); DBUG_ASSERT(thd == join_arg->thd); + if (select_lex->min_max_opt_list.elements) + { + /* + MIN/MAX optimizations have been applied to Item_sum objects + of the subquery this subquery predicate in opt_sum_query(). + Injection of new condition invalidates this optimizations. + Thus those optimizations must be rolled back. + */ + List_iterator_fast<Item_sum> it(select_lex->min_max_opt_list); + Item_sum *item; + while ((item= it++)) + { + item->clear(); + item->reset_forced_const(); + } + if (where_item) + where_item->update_used_tables(); + if (having_item) + having_item->update_used_tables(); + } + if (where_item) { List<Item> *and_args= NULL; @@ -3761,7 +3783,10 @@ int subselect_single_select_engine::exec() } } if (item->engine_changed(this)) + { + thd->lex->current_select= save_select; DBUG_RETURN(1); + } } if (select_lex->uncacheable && select_lex->uncacheable != UNCACHEABLE_EXPLAIN diff --git a/sql/item_subselect.h b/sql/item_subselect.h index cb60b646979..6112c1c22f4 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -505,6 +505,8 @@ public: Item *left_expr_orig; /* Priority of this predicate in the convert-to-semi-join-nest process. */ int sj_convert_priority; + /* May be TRUE only for the candidates to semi-join conversion */ + bool do_not_convert_to_sj; /* Types of left_expr and subquery's select list allow to perform subquery materialization. Currently, we set this to FALSE when it as well could @@ -595,8 +597,8 @@ public: Item_in_subselect(THD *thd_arg): Item_exists_subselect(thd_arg), left_expr_cache(0), first_execution(TRUE), in_strategy(SUBS_NOT_TRANSFORMED), - pushed_cond_guards(NULL), func(NULL), is_jtbm_merged(FALSE), - is_jtbm_const_tab(FALSE), upper_item(0) {} + pushed_cond_guards(NULL), func(NULL), do_not_convert_to_sj(FALSE), + is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE), upper_item(0) {} void cleanup(); subs_type substype() { return IN_SUBS; } void reset() @@ -651,6 +653,8 @@ public: */ int get_identifier(); + void block_conversion_to_sj () { do_not_convert_to_sj= TRUE; } + bool test_strategy(uchar strategy) { return MY_TEST(in_strategy & strategy); } diff --git a/sql/item_sum.h b/sql/item_sum.h index 65306ab6f48..a160d0ee522 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -485,6 +485,7 @@ public: used_tables_cache= 0; const_item_cache= true; } + void reset_forced_const() { const_item_cache= false; } virtual bool const_during_execution() const { return false; } virtual void print(String *str, enum_query_type query_type); void fix_num_length_and_dec(); diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc index 6ab903a81bb..a216930cad6 100644 --- a/sql/item_windowfunc.cc +++ b/sql/item_windowfunc.cc @@ -71,7 +71,7 @@ Item_window_func::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - enum_parsing_place place= thd->lex->current_select->parsing_place; + enum_parsing_place place= thd->lex->current_select->context_analysis_place; if (!(place == SELECT_LIST || place == IN_ORDER_BY)) { diff --git a/sql/lock.cc b/sql/lock.cc index 12de6ae0616..a34613fb7fe 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -422,6 +422,7 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock) void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock) { DBUG_ENTER("mysql_unlock_tables"); + bool errors= thd->is_error(); THD_STAGE_INFO(thd, stage_unlocking_tables); if (sql_lock->table_count) @@ -430,6 +431,8 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock) thr_multi_unlock(sql_lock->locks, sql_lock->lock_count, 0); if (free_lock) my_free(sql_lock); + if (!errors) + thd->clear_error(); DBUG_VOID_RETURN; } diff --git a/sql/log.cc b/sql/log.cc index 0ffa4a5a82d..166180e08a4 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -5681,7 +5681,11 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional, /* Annotate event should be written not more than once */ *with_annotate= 0; if ((error= writer.write(&anno))) + { + if (my_errno == EFBIG) + cache_data->set_incident(); DBUG_RETURN(error); + } } if ((error= writer.write(&the_event))) DBUG_RETURN(error); @@ -9220,8 +9224,10 @@ void TC_LOG_MMAP::close() mysql_cond_destroy(&COND_pool); mysql_cond_destroy(&COND_active); mysql_cond_destroy(&COND_queue_busy); + /* fall through */ case 5: data[0]='A'; // garble the first (signature) byte, in case mysql_file_delete fails + /* fall through */ case 4: for (i=0; i < npages; i++) { @@ -9230,10 +9236,13 @@ void TC_LOG_MMAP::close() mysql_mutex_destroy(&pages[i].lock); mysql_cond_destroy(&pages[i].cond); } + /* fall through */ case 3: my_free(pages); + /* fall through */ case 2: my_munmap((char*)data, (size_t)file_length); + /* fall through */ case 1: mysql_file_close(fd, MYF(0)); } diff --git a/sql/log_event.cc b/sql/log_event.cc index 3c062975041..24bcaf8a60b 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8404,7 +8404,6 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) consistent. */ #ifdef WITH_WSREP - /*Set wsrep_affected_rows = 0 */ thd->wsrep_affected_rows= 0; #endif diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 9ba29ddb0f6..8f1cfe17656 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1587,7 +1587,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: row application failed. %s", get_type_str(), thd->net.last_error); - thd->is_slave_error = 1; + thd->is_slave_error= 1; break; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d7c46071c38..6da1bd75531 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -105,6 +105,7 @@ #include "sp_rcontext.h" #include "sp_cache.h" #include "sql_reload.h" // reload_acl_and_cache +#include "pcre.h" #ifdef HAVE_POLL_H #include <poll.h> @@ -3721,6 +3722,7 @@ static void init_libstrings() #endif } +ulonglong my_pcre_frame_size; static void init_pcre() { @@ -3728,6 +3730,8 @@ static void init_pcre() pcre_free= pcre_stack_free= my_str_free_mysqld; #ifndef EMBEDDED_LIBRARY pcre_stack_guard= check_enough_stack_size_slow; + /* See http://pcre.org/original/doc/html/pcrestack.html */ + my_pcre_frame_size= -pcre_exec(NULL, NULL, NULL, -999, -999, 0, NULL, 0) + 16; #endif } @@ -7297,13 +7301,6 @@ struct my_option my_long_options[]= &max_binlog_dump_events, &max_binlog_dump_events, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ -#ifdef SAFE_MUTEX - {"debug-mutex-deadlock-detector", 0, - "Enable checking of wrong mutex usage.", - &safe_mutex_deadlock_detector, - &safe_mutex_deadlock_detector, - 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, -#endif {"debug-no-sync", 0, "Disables system sync calls. Only for running tests or debugging!", &my_disable_sync, &my_disable_sync, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -8252,7 +8249,7 @@ static int show_default_keycache(THD *thd, SHOW_VAR *var, char *buff, { struct st_data { KEY_CACHE_STATISTICS stats; - SHOW_VAR var[8]; + SHOW_VAR var[9]; } *data; SHOW_VAR *v; @@ -9363,7 +9360,10 @@ mysql_getopt_value(const char *name, uint length, return (uchar**) &key_cache->changed_blocks_hash_size; } } + /* We return in all cases above. Let us silence -Wimplicit-fallthrough */ + DBUG_ASSERT(0); #ifdef HAVE_REPLICATION + /* fall through */ case OPT_REPLICATE_DO_DB: case OPT_REPLICATE_DO_TABLE: case OPT_REPLICATE_IGNORE_DB: diff --git a/sql/mysqld.h b/sql/mysqld.h index b7de4ecc324..38e42dd61f1 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -133,7 +133,7 @@ extern ulong slave_retried_transactions; extern ulong slave_run_triggers_for_rbr; extern ulonglong slave_type_conversions_options; extern my_bool read_only, opt_readonly; -extern my_bool lower_case_file_system; +extern MYSQL_PLUGIN_IMPORT my_bool lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; extern my_bool opt_secure_auth; extern const char *current_dbug_option; @@ -527,6 +527,8 @@ extern pthread_t signal_thread; extern struct st_VioSSLFd * ssl_acceptor_fd; #endif /* HAVE_OPENSSL */ +extern ulonglong my_pcre_frame_size; + /* The following variables were under INNODB_COMPABILITY_HOOKS */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 284f4348080..9161dee7836 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -6955,7 +6955,10 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param, { if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) || quick_roru->push_quick_back(quick)) + { + delete quick_roru; DBUG_RETURN(NULL); + } } quick_roru->records= records; quick_roru->read_time= read_cost; @@ -7501,7 +7504,7 @@ SEL_TREE *Item_bool_func::get_full_func_mm_tree(RANGE_OPT_PARAM *param, param->current_table); #ifdef HAVE_SPATIAL Field::geometry_type sav_geom_type; - LINT_INIT(sav_geom_type); + LINT_INIT_STRUCT(sav_geom_type); if (field_item->field->type() == MYSQL_TYPE_GEOMETRY) { @@ -10795,9 +10798,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, */ thd->mem_root= old_root; - if (!quick || create_err) - return 0; /* no ranges found */ - if (quick->init()) + if (!quick || create_err || quick->init()) goto err; quick->records= records; diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index b18fb8f2ae5..1b245342f76 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -980,6 +980,25 @@ bool check_for_outer_joins(List<TABLE_LIST> *join_list) } +void find_and_block_conversion_to_sj(Item *to_find, + List_iterator_fast<Item_in_subselect> &li) +{ + if (to_find->type() != Item::SUBSELECT_ITEM || + ((Item_subselect *) to_find)->substype() != Item_subselect::IN_SUBS) + return; + Item_in_subselect *in_subq; + li.rewind(); + while ((in_subq= li++)) + { + if (in_subq == to_find) + { + in_subq->block_conversion_to_sj(); + return; + } + } +} + + /* Convert semi-join subquery predicates into semi-join join nests @@ -1032,7 +1051,6 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) Query_arena *arena, backup; Item_in_subselect *in_subq; THD *thd= join->thd; - List_iterator<TABLE_LIST> ti(join->select_lex->leaf_tables); DBUG_ENTER("convert_join_subqueries_to_semijoins"); if (join->select_lex->sj_subselects.is_empty()) @@ -1050,6 +1068,89 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) subq_sel->update_used_tables(); } + /* + Check all candidates to semi-join conversion that occur + in ON expressions of outer join. Set the flag blocking + this conversion for them. + */ + TABLE_LIST *tbl; + List_iterator<TABLE_LIST> ti(join->select_lex->leaf_tables); + while ((tbl= ti++)) + { + TABLE_LIST *embedded; + TABLE_LIST *embedding= tbl; + do + { + embedded= embedding; + bool block_conversion_to_sj= false; + if (embedded->on_expr) + { + /* + Conversion of an IN subquery predicate into semi-join + is blocked now if the predicate occurs: + - in the ON expression of an outer join + - in the ON expression of an inner join embedded directly + or indirectly in the inner nest of an outer join + */ + for (TABLE_LIST *tl= embedded; tl; tl= tl->embedding) + { + if (tl->outer_join) + { + block_conversion_to_sj= true; + break; + } + } + } + if (block_conversion_to_sj) + { + Item *cond= embedded->on_expr; + if (!cond) + ; + else if (cond->type() != Item::COND_ITEM) + find_and_block_conversion_to_sj(cond, li); + else if (((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC) + { + Item *item; + List_iterator<Item> it(*(((Item_cond*) cond)->argument_list())); + while ((item= it++)) + { + find_and_block_conversion_to_sj(item, li); + } + } + } + embedding= embedded->embedding; + } + while (embedding && + embedding->nested_join->join_list.head() == embedded); + } + + /* + Block conversion to semi-joins for those candidates that + are encountered in the WHERE condition of the multi-table view + with CHECK OPTION if this view is used in UPDATE/DELETE. + (This limitation can be, probably, easily lifted.) + */ + li.rewind(); + while ((in_subq= li++)) + { + if (in_subq->emb_on_expr_nest != NO_JOIN_NEST && + in_subq->emb_on_expr_nest->effective_with_check) + { + in_subq->block_conversion_to_sj(); + } + } + + if (join->select_options & SELECT_STRAIGHT_JOIN) + { + /* Block conversion to semijoins for all candidates */ + li.rewind(); + while ((in_subq= li++)) + { + in_subq->block_conversion_to_sj(); + } + } + li.rewind(); /* First, convert child join's subqueries. We proceed bottom-up here */ while ((in_subq= li++)) @@ -1068,8 +1169,10 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) if (convert_join_subqueries_to_semijoins(child_join)) DBUG_RETURN(TRUE); + + in_subq->sj_convert_priority= - MY_TEST(in_subq->emb_on_expr_nest != NO_JOIN_NEST) * MAX_TABLES * 2 + + MY_TEST(in_subq->do_not_convert_to_sj) * MAX_TABLES * 2 + in_subq->is_correlated * MAX_TABLES + child_join->outer_tables; } @@ -1102,7 +1205,7 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) bool remove_item= TRUE; /* Stop processing if we've reached a subquery that's attached to the ON clause */ - if (in_subq->emb_on_expr_nest != NO_JOIN_NEST) + if (in_subq->do_not_convert_to_sj) break; if (in_subq->is_flattenable_semijoin) diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 9bc21ab3ac3..ab587b8b279 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2008-2011 Monty Program Ab + Copyright (c) 2008, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -254,6 +254,8 @@ int opt_sum_query(THD *thd, int error= 0; DBUG_ENTER("opt_sum_query"); + thd->lex->current_select->min_max_opt_list.empty(); + if (conds) where_tables= conds->used_tables(); @@ -447,7 +449,14 @@ int opt_sum_query(THD *thd, item_sum->aggregator_clear(); } else + { item_sum->reset_and_add(); + /* + Save a reference to the item for possible rollback + of the min/max optimizations for this select + */ + thd->lex->current_select->min_max_opt_list.push_back(item_sum); + } item_sum->make_const(); recalc_const_item= 1; break; @@ -1042,6 +1051,7 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond) case Item_func::LT_FUNC: case Item_func::LE_FUNC: less_fl= 1; + /* fall through */ case Item_func::GT_FUNC: case Item_func::GE_FUNC: { diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index a2617de2757..c385434e41e 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -2045,7 +2045,7 @@ gtid_waiting::wait_for_pos(THD *thd, String *gtid_str, longlong timeout_us) { case -1: status_var_increment(thd->status_var.master_gtid_wait_timeouts); - /* Deliberate fall through. */ + /* fall through */ case 0: status_var_add(thd->status_var.master_gtid_wait_time, microsecond_interval_timer() - before); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 60c887965fb..e90557efd0d 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -41,7 +41,7 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg, master_id(0), prev_master_id(0), using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0), gtid_reconnect_event_skip_count(0), gtid_event_seen(false), - in_start_all_slaves(0), in_stop_all_slaves(0), + in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0), users(0), killed(0) { char *tmp; @@ -663,7 +663,7 @@ file '%s')", fname); mi->connect_retry= (uint) connect_retry; mi->ssl= (my_bool) ssl; mi->ssl_verify_server_cert= ssl_verify_server_cert; - mi->heartbeat_period= master_heartbeat_period; + mi->heartbeat_period= MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD, master_heartbeat_period); } DBUG_PRINT("master_info",("log_file_name: %s position: %ld", mi->master_log_name, @@ -798,8 +798,8 @@ int flush_master_info(Master_info* mi, contents of file). But because of number of lines in the first line of file we don't care about this garbage. */ - char heartbeat_buf[sizeof(mi->heartbeat_period) * 4]; // buffer to suffice always - sprintf(heartbeat_buf, "%.3f", mi->heartbeat_period); + char heartbeat_buf[FLOATING_POINT_BUFFER]; + my_fcvt(mi->heartbeat_period, 3, heartbeat_buf, NULL); my_b_seek(file, 0L); my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n" @@ -1979,4 +1979,53 @@ void prot_store_ids(THD *thd, DYNAMIC_ARRAY *ids) return; } +bool Master_info_index::flush_all_relay_logs() +{ + DBUG_ENTER("flush_all_relay_logs"); + bool result= false; + int error= 0; + mysql_mutex_lock(&LOCK_active_mi); + for (uint i= 0; i< master_info_hash.records; i++) + { + Master_info *mi; + mi= (Master_info *) my_hash_element(&master_info_hash, i); + mi->in_flush_all_relay_logs= 0; + } + for (uint i=0; i < master_info_hash.records;) + { + Master_info *mi; + mi= (Master_info *)my_hash_element(&master_info_hash, i); + DBUG_ASSERT(mi); + + if (mi->in_flush_all_relay_logs) + { + i++; + continue; + } + mi->in_flush_all_relay_logs= 1; + + mysql_mutex_lock(&mi->sleep_lock); + mi->users++; // Mark used + mysql_mutex_unlock(&mi->sleep_lock); + mysql_mutex_unlock(&LOCK_active_mi); + + mysql_mutex_lock(&mi->data_lock); + error= rotate_relay_log(mi); + mysql_mutex_unlock(&mi->data_lock); + mi->release(); + mysql_mutex_lock(&LOCK_active_mi); + + if (error) + { + result= true; + break; + } + /* Restart from first element as master_info_hash may have changed */ + i= 0; + continue; + } + mysql_mutex_unlock(&LOCK_active_mi); + DBUG_RETURN(result); +} + #endif /* HAVE_REPLICATION */ diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index 535abd25f6d..ccc1be6e5ce 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -302,6 +302,7 @@ class Master_info : public Slave_reporting_capability /* gtid_event_seen is false until we receive first GTID event from master. */ bool gtid_event_seen; bool in_start_all_slaves, in_stop_all_slaves; + bool in_flush_all_relay_logs; uint users; /* Active user for object */ uint killed; @@ -354,6 +355,7 @@ public: bool start_all_slaves(THD *thd); bool stop_all_slaves(THD *thd); void free_connections(); + bool flush_all_relay_logs(); }; diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 1f282e6aee5..cfc544cc1bd 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -5202,7 +5202,7 @@ ER_FRM_UNKNOWN_TYPE rus "Файл '%-.192s' Ñодержит неизвеÑтный тип '%-.64s' в заголовке" ukr "Файл '%-.192s' має невідомий тип '%-.64s' у заголовку" ER_WRONG_OBJECT - eng "'%-.192s.%-.192s' is not %s" + eng "'%-.192s.%-.192s' is not of type '%s'" ger "'%-.192s.%-.192s' ist nicht %s" rus "'%-.192s.%-.192s' - не %s" ukr "'%-.192s.%-.192s' не Ñ” %s" diff --git a/sql/slave.cc b/sql/slave.cc index 641bdae9e31..a7f0f003e5c 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2009, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -6286,9 +6286,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) mi->last_queued_gtid.seq_no == 1000) goto skip_relay_logging; }); - /* Fall through to default case ... */ #endif - + /* fall through */ default: default_action: DBUG_EXECUTE_IF("kill_slave_io_after_2_events", diff --git a/sql/sp_head.cc b/sql/sp_head.cc index f8ad3c305a7..c87a15ff927 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2002, 2016, Oracle and/or its affiliates. - Copyright (c) 2011, 2016, MariaDB + Copyright (c) 2011, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index d63a2f2bc51..433d48a3de4 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -11698,12 +11698,6 @@ void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, /* global privileges */ grant->privilege= sctx->master_access; - if (!sctx->priv_user[0] && !sctx->priv_role[0]) - { - DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); - DBUG_VOID_RETURN; // it is slave - } - if (!thd->db || strcmp(db, thd->db)) { /* db privileges */ diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc index 8a523ebbf4b..8134adca13f 100644 --- a/sql/sql_audit.cc +++ b/sql/sql_audit.cc @@ -240,7 +240,7 @@ void mysql_audit_finalize() /** Initialize an Audit plug-in - + @param[in] plugin @retval FALSE OK @@ -251,12 +251,13 @@ int initialize_audit_plugin(st_plugin_int *plugin) { st_mysql_audit *data= (st_mysql_audit*) plugin->plugin->info; - if (!data->event_notify || !data->class_mask[0]) { + if (!data->event_notify || !data->class_mask[0]) + { sql_print_error("Plugin '%s' has invalid data.", plugin->name.str); return 1; } - + if (plugin->plugin->init && plugin->plugin->init(NULL)) { sql_print_error("Plugin '%s' init function returned error.", @@ -266,7 +267,7 @@ int initialize_audit_plugin(st_plugin_int *plugin) /* Make the interface info more easily accessible */ plugin->data= plugin->plugin->info; - + /* Add the bits the plugin is interested in to the global mask */ mysql_mutex_lock(&LOCK_audit_mask); add_audit_mask(mysql_global_audit_mask, data->class_mask); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index b2927c89fda..89afef4eaf7 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2010, 2013, Monty Program Ab + Copyright (c) 2010, 2017, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1466,12 +1466,6 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", (int)flags.autocommit)); /* - Make InnoDB to release the adaptive hash index latch before - acquiring the query cache mutex. - */ - ha_release_temporary_latches(thd); - - /* A table- or a full flush operation can potentially take a long time to finish. We choose not to wait for them and skip caching statements instead. diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 076ae8b9923..740d5648214 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2818,13 +2818,6 @@ int select_send::send_data(List<Item> &items) if (thd->killed == ABORT_QUERY) DBUG_RETURN(FALSE); - /* - We may be passing the control from mysqld to the client: release the - InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved - by thd - */ - ha_release_temporary_latches(thd); - protocol->prepare_for_resend(); if (protocol->send_result_set_row(&items)) { @@ -2844,13 +2837,6 @@ int select_send::send_data(List<Item> &items) bool select_send::send_eof() { /* - We may be passing the control from mysqld to the client: release the - InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved - by thd - */ - ha_release_temporary_latches(thd); - - /* Don't send EOF if we're in error condition (which implies we've already sent or are sending an error) */ @@ -5601,6 +5587,7 @@ bool xid_cache_insert(THD *thd, XID_STATE *xid_state) break; case 1: my_error(ER_XAER_DUPID, MYF(0)); + /* fall through */ default: xid_state->xid_cache_element= 0; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 817be9d939c..02bfb5f402d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4697,7 +4697,7 @@ public: select_result(thd_arg), suppress_my_ok(false) { DBUG_ENTER("select_result_interceptor::select_result_interceptor"); - DBUG_PRINT("enter", ("this 0x%lx", (ulong) this)); + DBUG_PRINT("enter", ("this %p", this)); DBUG_VOID_RETURN; } /* Remove gcc warning */ uint field_count(List<Item> &fields) const { return 0; } diff --git a/sql/sql_digest.cc b/sql/sql_digest.cc index 18106a70475..27c33f1c64b 100644 --- a/sql/sql_digest.cc +++ b/sql/sql_digest.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -454,7 +455,8 @@ sql_digest_state* digest_add_token(sql_digest_state *state, } } while (found_unary); } - /* fall through, for case NULL_SYM below */ + /* for case NULL_SYM below */ + /* fall through */ case LEX_HOSTNAME: case TEXT_STRING: case NCHAR_STRING: diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 8085e3ded1b..16ab4ee650a 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2010, 2016, MariaDB + Copyright (c) 2010, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -699,9 +699,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, bool using_bulk_insert= 0; uint value_count; ulong counter = 1; - ulong iteration= 0; + /* counter of iteration in bulk PS operation*/ + ulonglong iteration= 0; ulonglong id; - ulong bulk_iterations= bulk_parameters_iterations(thd); COPY_INFO info; TABLE *table= 0; List_iterator_fast<List_item> its(values_list); @@ -769,7 +769,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, DBUG_RETURN(TRUE); value_count= values->elements; - DBUG_ASSERT(bulk_iterations > 0); if (mysql_prepare_insert(thd, table_list, table, fields, values, update_fields, update_values, duplic, &unused_conds, FALSE)) @@ -941,6 +940,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } do { + DBUG_PRINT("info", ("iteration %llu", iteration)); if (iteration && bulk_parameters_set(thd)) goto abort; @@ -1061,7 +1061,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } its.rewind(); iteration++; - } while (iteration < bulk_iterations); + } while (bulk_parameters_iterations(thd)); values_loop_end: free_underlaid_joins(thd, &thd->lex->select_lex); @@ -1208,7 +1208,7 @@ values_loop_end: retval= thd->lex->explain->send_explain(thd); goto abort; } - if ((bulk_iterations * values_list.elements) == 1 && (!(thd->variables.option_bits & OPTION_WARNINGS) || + if ((iteration * values_list.elements) == 1 && (!(thd->variables.option_bits & OPTION_WARNINGS) || !thd->cuted_fields)) { my_ok(thd, info.copied + info.deleted + @@ -3753,9 +3753,6 @@ int select_insert::send_data(List<Item> &values) } } - // Release latches in case bulk insert takes a long time - ha_release_temporary_latches(thd); - error= write_record(thd, table, &info); table->auto_increment_field_not_null= FALSE; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 4c763f50eaf..ef7ca58fe93 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2014, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2009, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1477,12 +1477,14 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) state= MY_LEX_HEX_NUMBER; break; } + /* fall through */ case MY_LEX_IDENT_OR_BIN: if (lip->yyPeek() == '\'') { // Found b'bin-number' state= MY_LEX_BIN_NUMBER; break; } + /* fall through */ case MY_LEX_IDENT: const char *start; #if defined(USE_MB) && defined(USE_MB_IDENT) @@ -1824,6 +1826,7 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) break; } /* " used for strings */ + /* fall through */ case MY_LEX_STRING: // Incomplete text string { uint sep; @@ -2182,6 +2185,7 @@ void st_select_lex::init_query() leaf_tables_prep.empty(); leaf_tables.empty(); item_list.empty(); + min_max_opt_list.empty(); join= 0; having= prep_having= where= prep_where= 0; cond_pushed_into_where= cond_pushed_into_having= 0; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 61c13814eeb..08a6936db6e 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -845,6 +845,11 @@ public: */ List<Item_func_match> *ftfunc_list; List<Item_func_match> ftfunc_list_alloc; + /* + The list of items to which MIN/MAX optimizations of opt_sum_query() + have been applied. Used to rollback those optimizations if it's needed. + */ + List<Item_sum> min_max_opt_list; JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ List<TABLE_LIST> top_join_list; /* join list of the top level */ List<TABLE_LIST> *join_list; /* list for the currently parsed join */ @@ -906,6 +911,7 @@ public: /* reserved for exists 2 in */ uint select_n_reserved; enum_parsing_place parsing_place; /* where we are parsing expression */ + enum_parsing_place context_analysis_place; /* where we are in prepare */ bool with_sum_func; /* sum function indicator */ ulong table_join_options; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 174d57b99af..fbfbe0f77fb 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -392,7 +392,7 @@ const LEX_STRING command_name[257]={ { 0, 0 }, //247 { 0, 0 }, //248 { 0, 0 }, //249 - { 0, 0 }, //250 + { C_STRING_WITH_LEN("Bulk_execute") }, //250 { C_STRING_WITH_LEN("Slave_worker") }, //251 { C_STRING_WITH_LEN("Slave_IO") }, //252 { C_STRING_WITH_LEN("Slave_SQL") }, //253 @@ -1754,6 +1754,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } break; } + case COM_STMT_BULK_EXECUTE: + { + mysqld_stmt_bulk_execute(thd, packet, packet_length); + break; + } case COM_STMT_EXECUTE: { mysqld_stmt_execute(thd, packet, packet_length); @@ -3534,8 +3539,8 @@ mysql_execute_command(THD *thd) MYF(0)); goto error; } - /* no break; fall through */ } + /* fall through */ case SQLCOM_SHOW_STATUS_PROC: case SQLCOM_SHOW_STATUS_FUNC: case SQLCOM_SHOW_DATABASES: @@ -3549,7 +3554,7 @@ mysql_execute_command(THD *thd) case SQLCOM_SELECT: if (WSREP_CLIENT(thd) && wsrep_sync_wait(thd)) goto error; - + /* fall through */ case SQLCOM_SHOW_PLUGINS: case SQLCOM_SHOW_VARIABLES: case SQLCOM_SHOW_CHARSETS: @@ -4391,8 +4396,8 @@ end_with_restore_list: /* mysql_update return 2 if we need to switch to multi-update */ if (up_result != 2) break; - /* Fall through */ } + /* Fall through */ case SQLCOM_UPDATE_MULTI: { DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -4510,6 +4515,7 @@ end_with_restore_list: } #endif } + /* fall through */ case SQLCOM_INSERT: { DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -5442,6 +5448,7 @@ end_with_restore_list: initialize this variable because RESET shares the same code as FLUSH */ lex->no_write_to_binlog= 1; + /* fall through */ case SQLCOM_FLUSH: { int write_to_binlog; @@ -7354,12 +7361,6 @@ bool check_fk_parent_table_access(THD *thd, ****************************************************************************/ -#if STACK_DIRECTION < 0 -#define used_stack(A,B) (long) (A - B) -#else -#define used_stack(A,B) (long) (B - A) -#endif - #ifndef DBUG_OFF long max_stack_used; #endif @@ -7376,7 +7377,7 @@ bool check_stack_overrun(THD *thd, long margin, { long stack_used; DBUG_ASSERT(thd == current_thd); - if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >= + if ((stack_used= available_stack_size(thd->thread_stack, &stack_used)) >= (long) (my_thread_stack_size - margin)) { thd->is_fatal_error= 1; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index e9fc0f62617..92b9c94e84c 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2005, 2013, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, SkySQL Ab. + Copyright (c) 2010, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1864,8 +1864,8 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list) switch ((*(p++)= *(list++))) { case '\0': list= NULL; /* terminate the loop */ - /* fall through */ #ifndef __WIN__ + /* fall through */ case ':': /* can't use this as delimiter as it may be drive letter */ #endif case ';': @@ -1908,6 +1908,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list) str->str= p; continue; } + /* fall through */ default: str->length++; continue; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index a1c352e8b19..f671e45d51a 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2002, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2016, MariaDB + Copyright (c) 2008, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -164,7 +164,6 @@ public: Server_side_cursor *cursor; uchar *packet; uchar *packet_end; - ulong iterations; uint param_count; uint last_errno; uint flags; @@ -183,7 +182,9 @@ public: */ uint select_number_after_prepare; char last_error[MYSQL_ERRMSG_SIZE]; + my_bool iterations; my_bool start_param; + my_bool read_types; #ifndef EMBEDDED_LIBRARY bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end, uchar *read_pos, String *expanded_query); @@ -213,11 +214,10 @@ public: uchar *packet_arg, uchar *packet_end_arg); bool execute_bulk_loop(String *expanded_query, bool open_cursor, - uchar *packet_arg, uchar *packet_end_arg, - ulong iterations); + uchar *packet_arg, uchar *packet_end_arg); bool execute_server_runnable(Server_runnable *server_runnable); my_bool set_bulk_parameters(bool reset); - ulong bulk_iterations(); + bool bulk_iterations() { return iterations; }; /* Destroy this statement */ void deallocate(); bool execute_immediate(const char *query, uint query_length); @@ -923,6 +923,7 @@ static bool insert_params(Prepared_statement *stmt, uchar *null_array, for (Item_param **it= begin; it < end; ++it) { Item_param *param= *it; + param->indicator= STMT_INDICATOR_NONE; // only for bulk parameters if (!param->has_long_data_value()) { if (is_param_null(null_array, (uint) (it - begin))) @@ -967,10 +968,7 @@ static bool insert_bulk_params(Prepared_statement *stmt, param->reset(); if (!param->has_long_data_value()) { - if (param->indicators) - param->indicator= (enum_indicator_type) *((*read_pos)++); - else - param->indicator= STMT_INDICATOR_NONE; + param->indicator= (enum_indicator_type) *((*read_pos)++); if ((*read_pos) > data_end) DBUG_RETURN(1); switch (param->indicator) @@ -981,6 +979,8 @@ static bool insert_bulk_params(Prepared_statement *stmt, param->set_param_func(param, read_pos, (uint) (data_end - (*read_pos))); if (param->has_no_value()) DBUG_RETURN(1); + if (param->convert_str_value(stmt->thd)) + DBUG_RETURN(1); /* out of memory */ break; case STMT_INDICATOR_NULL: param->set_null(); @@ -999,6 +999,36 @@ static bool insert_bulk_params(Prepared_statement *stmt, DBUG_RETURN(0); } +static bool set_conversion_functions(Prepared_statement *stmt, + uchar **data, uchar *data_end) +{ + uchar *read_pos= *data; + const uint signed_bit= 1 << 15; + DBUG_ENTER("set_conversion_functions"); + /* + First execute or types altered by the client, setup the + conversion routines for all parameters (one time) + */ + Item_param **it= stmt->param_array; + Item_param **end= it + stmt->param_count; + THD *thd= stmt->thd; + for (; it < end; ++it) + { + ushort typecode; + + if (read_pos >= data_end) + DBUG_RETURN(1); + + typecode= sint2korr(read_pos); + read_pos+= 2; + (**it).unsigned_flag= MY_TEST(typecode & signed_bit); + setup_one_conversion_function(thd, *it, (uchar) (typecode & 0xff)); + } + *data= read_pos; + DBUG_RETURN(0); +} + + static bool setup_conversion_functions(Prepared_statement *stmt, uchar **data, uchar *data_end, bool bulk_protocol= 0) @@ -1012,30 +1042,9 @@ static bool setup_conversion_functions(Prepared_statement *stmt, if (*read_pos++) //types supplied / first execute { - /* - First execute or types altered by the client, setup the - conversion routines for all parameters (one time) - */ - Item_param **it= stmt->param_array; - Item_param **end= it + stmt->param_count; - THD *thd= stmt->thd; - for (; it < end; ++it) - { - ushort typecode; - const uint signed_bit= 1 << 15; - const uint indicators_bit= 1 << 14; - - if (read_pos >= data_end) - DBUG_RETURN(1); - - typecode= sint2korr(read_pos); - read_pos+= 2; - (**it).unsigned_flag= MY_TEST(typecode & signed_bit); - if (bulk_protocol) - (**it).indicators= MY_TEST(typecode & indicators_bit); - setup_one_conversion_function(thd, *it, - (uchar) (typecode & 0xff)); - } + *data= read_pos; + bool res= set_conversion_functions(stmt, data, data_end); + DBUG_RETURN(res); } *data= read_pos; DBUG_RETURN(0); @@ -2343,7 +2352,7 @@ static bool check_prepared_statement(Prepared_statement *stmt) /* mysql_test_update returns 2 if we need to switch to multi-update */ if (res != 2) break; - + /* fall through */ case SQLCOM_UPDATE_MULTI: res= mysql_test_multiupdate(stmt, tables, res == 2); break; @@ -3022,6 +3031,14 @@ static void reset_stmt_params(Prepared_statement *stmt) } +static void mysql_stmt_execute_common(THD *thd, + ulong stmt_id, + uchar *packet, + uchar *packet_end, + ulong cursor_flags, + bool iteration, + bool types); + /** COM_STMT_EXECUTE handler: execute a previously prepared statement. @@ -3044,20 +3061,91 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length) uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround ulong stmt_id= uint4korr(packet); ulong flags= (ulong) packet[4]; -#ifndef EMBEDDED_LIBRARY - ulong iterations= uint4korr(packet + 5); -#else - ulong iterations= 0; // no support -#endif + uchar *packet_end= packet + packet_length; + DBUG_ENTER("mysqld_stmt_execute"); + + packet+= 9; /* stmt_id + 5 bytes of flags */ + + mysql_stmt_execute_common(thd, stmt_id, packet, packet_end, flags, FALSE, + FALSE); + DBUG_VOID_RETURN; +} + + +/** + COM_STMT_BULK_EXECUTE handler: execute a previously prepared statement. + + If there are any parameters, then replace parameter markers with the + data supplied from the client, and then execute the statement. + This function uses binary protocol to send a possible result set + to the client. + + @param thd current thread + @param packet_arg parameter types and data, if any + @param packet_length packet length, including the terminator character. + + @return + none: in case of success OK packet or a result set is sent to the + client, otherwise an error message is set in THD. +*/ + +void mysqld_stmt_bulk_execute(THD *thd, char *packet_arg, uint packet_length) +{ + uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround + ulong stmt_id= uint4korr(packet); + uint flags= (uint) uint2korr(packet + 4); + uchar *packet_end= packet + packet_length; + DBUG_ENTER("mysqld_stmt_execute_bulk"); + + if (!(thd->client_capabilities & + MARIADB_CLIENT_STMT_BULK_OPERATIONS)) + { + DBUG_PRINT("error", + ("An attempt to execute bulk operation without support")); + my_error(ER_UNSUPPORTED_PS, MYF(0)); + } + /* Check for implemented parameters */ + if (flags & (~STMT_BULK_FLAG_CLIENT_SEND_TYPES)) + { + DBUG_PRINT("error", ("unsupported bulk execute flags %x", flags)); + my_error(ER_UNSUPPORTED_PS, MYF(0)); + } + + /* stmt id and two bytes of flags */ + packet+= 4 + 2; + mysql_stmt_execute_common(thd, stmt_id, packet, packet_end, 0, TRUE, + (flags & STMT_BULK_FLAG_CLIENT_SEND_TYPES)); + DBUG_VOID_RETURN; +} + + +/** + Common part of prepared statement execution + + @param thd THD handle + @param stmt_id id of the prepared statement + @param paket packet with parameters to bind + @param packet_end pointer to the byte after parameters end + @param cursor_flags cursor flags + @param bulk_op id it bulk operation + @param read_types flag say that types muast been read +*/ + +static void mysql_stmt_execute_common(THD *thd, + ulong stmt_id, + uchar *packet, + uchar *packet_end, + ulong cursor_flags, + bool bulk_op, + bool read_types) +{ /* Query text for binary, general or slow log, if any of them is open */ String expanded_query; - uchar *packet_end= packet + packet_length; Prepared_statement *stmt; Protocol *save_protocol= thd->protocol; bool open_cursor; - DBUG_ENTER("mysqld_stmt_execute"); - - packet+= 9; /* stmt_id + 5 bytes of flags */ + DBUG_ENTER("mysqld_stmt_execute_common"); + DBUG_ASSERT((!read_types) || (read_types && bulk_op)); /* First of all clear possible warnings from the previous command */ thd->reset_for_next_command(); @@ -3069,21 +3157,21 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length) llstr(stmt_id, llbuf), "mysqld_stmt_execute"); DBUG_VOID_RETURN; } + stmt->read_types= read_types; #if defined(ENABLED_PROFILING) thd->profiling.set_query_source(stmt->query(), stmt->query_length()); #endif DBUG_PRINT("exec_query", ("%s", stmt->query())); - DBUG_PRINT("info",("stmt: %p iterations: %lu", stmt, iterations)); + DBUG_PRINT("info",("stmt: %p bulk_op %d", stmt, bulk_op)); - open_cursor= MY_TEST(flags & (ulong) CURSOR_TYPE_READ_ONLY); + open_cursor= MY_TEST(cursor_flags & (ulong) CURSOR_TYPE_READ_ONLY); thd->protocol= &thd->protocol_binary; - if (iterations <= 1) + if (!bulk_op) stmt->execute_loop(&expanded_query, open_cursor, packet, packet_end); else - stmt->execute_bulk_loop(&expanded_query, open_cursor, packet, packet_end, - iterations); + stmt->execute_bulk_loop(&expanded_query, open_cursor, packet, packet_end); thd->protocol= save_protocol; sp_cache_enforce_limit(thd->sp_proc_cache, stored_program_cache_size); @@ -3426,7 +3514,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) { stmt->state= Query_arena::STMT_ERROR; stmt->last_errno= thd->get_stmt_da()->sql_errno(); - strncpy(stmt->last_error, thd->get_stmt_da()->message(), MYSQL_ERRMSG_SIZE); + strmake_buf(stmt->last_error, thd->get_stmt_da()->message()); } thd->set_stmt_da(save_stmt_da); @@ -3590,11 +3678,12 @@ Prepared_statement::Prepared_statement(THD *thd_arg) cursor(0), packet(0), packet_end(0), - iterations(0), param_count(0), last_errno(0), flags((uint) IS_IN_USE), + iterations(0), start_param(0), + read_types(0), m_sql_mode(thd->variables.sql_mode) { init_sql_alloc(&main_mem_root, thd_arg->variables.query_alloc_block_size, @@ -3631,7 +3720,7 @@ void Prepared_statement::setup_set_params() set_params_from_actual_params= insert_params_from_actual_params_with_log; #ifndef EMBEDDED_LIBRARY set_params= insert_params_with_log; - set_bulk_params= insert_bulk_params; // TODO: add binlog support + set_bulk_params= insert_bulk_params; // RBR is on for bulk operation #else //TODO: add bulk support for bulk parameters set_params_data= emb_insert_params_with_log; @@ -4013,7 +4102,7 @@ Prepared_statement::execute_loop(String *expanded_query, Reprepare_observer reprepare_observer; bool error; int reprepare_attempt= 0; - iterations= 0; + iterations= FALSE; /* - In mysql_sql_stmt_execute() we hide all "external" Items @@ -4116,11 +4205,11 @@ my_bool bulk_parameters_set(THD *thd) DBUG_RETURN(FALSE); } -ulong bulk_parameters_iterations(THD *thd) +my_bool bulk_parameters_iterations(THD *thd) { Prepared_statement *stmt= (Prepared_statement *) thd->bulk_param; if (!stmt) - return 1; + return FALSE; return stmt->bulk_iterations(); } @@ -4128,7 +4217,8 @@ ulong bulk_parameters_iterations(THD *thd) my_bool Prepared_statement::set_bulk_parameters(bool reset) { DBUG_ENTER("Prepared_statement::set_bulk_parameters"); - DBUG_PRINT("info", ("iteration: %lu", iterations)); + DBUG_PRINT("info", ("iteration: %d", iterations)); + if (iterations) { #ifndef EMBEDDED_LIBRARY @@ -4142,31 +4232,24 @@ my_bool Prepared_statement::set_bulk_parameters(bool reset) reset_stmt_params(this); DBUG_RETURN(true); } - iterations--; + if (packet >= packet_end) + iterations= FALSE; } start_param= 0; DBUG_RETURN(false); } -ulong Prepared_statement::bulk_iterations() -{ - if (iterations) - return iterations; - return start_param ? 1 : 0; -} - bool Prepared_statement::execute_bulk_loop(String *expanded_query, bool open_cursor, uchar *packet_arg, - uchar *packet_end_arg, - ulong iterations_arg) + uchar *packet_end_arg) { Reprepare_observer reprepare_observer; bool error= 0; packet= packet_arg; packet_end= packet_end_arg; - iterations= iterations_arg; + iterations= TRUE; start_param= true; #ifndef DBUG_OFF Item *free_list_state= thd->free_list; @@ -4180,16 +4263,26 @@ Prepared_statement::execute_bulk_loop(String *expanded_query, thd->set_bulk_execution(0); return TRUE; } + /* Check for non zero parameter count*/ + if (param_count == 0) + { + DBUG_PRINT("error", ("Statement with no parameters for bulk execution.")); + my_error(ER_UNSUPPORTED_PS, MYF(0)); + thd->set_bulk_execution(0); + return TRUE; + } if (!(sql_command_flags[lex->sql_command] & CF_SP_BULK_SAFE)) { + DBUG_PRINT("error", ("Command is not supported in bulk execution.")); my_error(ER_UNSUPPORTED_PS, MYF(0)); thd->set_bulk_execution(0); return TRUE; } #ifndef EMBEDDED_LIBRARY - if (setup_conversion_functions(this, &packet, packet_end, TRUE)) + if (read_types && + set_conversion_functions(this, &packet, packet_end)) #else // bulk parameters are not supported for embedded, so it will an error #endif @@ -4200,6 +4293,7 @@ Prepared_statement::execute_bulk_loop(String *expanded_query, thd->set_bulk_execution(0); return true; } + read_types= FALSE; #ifdef NOT_YET_FROM_MYSQL_5_6 if (unlikely(thd->security_ctx->password_expired && diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h index 820cb43e6d5..203b37b3b26 100644 --- a/sql/sql_prepare.h +++ b/sql/sql_prepare.h @@ -72,6 +72,7 @@ private: void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length); void mysqld_stmt_execute(THD *thd, char *packet, uint packet_length); +void mysqld_stmt_execute_bulk(THD *thd, char *packet, uint packet_length); void mysqld_stmt_bulk_execute(THD *thd, char *packet, uint packet_length); void mysqld_stmt_close(THD *thd, char *packet); void mysql_sql_stmt_prepare(THD *thd); @@ -83,7 +84,7 @@ void mysqld_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); void reinit_stmt_before_use(THD *thd, LEX *lex); -ulong bulk_parameters_iterations(THD *thd); +my_bool bulk_parameters_iterations(THD *thd); my_bool bulk_parameters_set(THD *thd); /** Execute a fragment of server code in an isolated context, so that diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index 400c230ea43..c01ad90f5d2 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -181,8 +181,12 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, slave is not likely to have the same connection names. */ tmp_write_to_binlog= 0; - - if (!(mi= (get_master_info(&connection_name, + if (connection_name.length == 0) + { + if (master_info_index->flush_all_relay_logs()) + *write_to_binlog= -1; + } + else if (!(mi= (get_master_info(&connection_name, Sql_condition::WARN_LEVEL_ERROR)))) { result= 1; @@ -249,7 +253,8 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, NOTE: my_error() has been already called by reopen_tables() within close_cached_tables(). */ - result= 1; + thd->global_read_lock.unlock_global_read_lock(thd); + return 1; } if (thd->global_read_lock.make_global_read_lock_block_commit(thd)) // Killed diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 98009827cb4..c5fd142f620 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3912,9 +3912,6 @@ bool mysql_show_binlog_events(THD* thd) Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); - Format_description_log_event *description_event= new - Format_description_log_event(3); /* MySQL 4.0 by default */ - DBUG_ASSERT(thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS || thd->lex->sql_command == SQLCOM_SHOW_RELAYLOG_EVENTS); @@ -3935,6 +3932,9 @@ bool mysql_show_binlog_events(THD* thd) binary_log= &(mi->rli.relay_log); } + Format_description_log_event *description_event= new + Format_description_log_event(3); /* MySQL 4.0 by default */ + if (binary_log->is_open()) { SELECT_LEX_UNIT *unit= &thd->lex->unit; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index c6613facde7..15082cdd0c1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -648,17 +648,15 @@ setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array, thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; - save_place= thd->lex->current_select->parsing_place; - thd->lex->current_select->parsing_place= IN_ORDER_BY; + save_place= thd->lex->current_select->context_analysis_place; + thd->lex->current_select->context_analysis_place= IN_ORDER_BY; res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, order); - thd->lex->current_select->parsing_place= save_place; - thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); - save_place= thd->lex->current_select->parsing_place; - thd->lex->current_select->parsing_place= IN_GROUP_BY; + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); + thd->lex->current_select->context_analysis_place= IN_GROUP_BY; res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, group, hidden_group_fields); - thd->lex->current_select->parsing_place= save_place; + thd->lex->current_select->context_analysis_place= save_place; thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; res= res || setup_windows(thd, ref_pointer_array, tables, fields, all_fields, win_specs, win_funcs); @@ -712,6 +710,7 @@ JOIN::prepare(TABLE_LIST *tables_init, if (select_lex->handle_derived(thd->lex, DT_PREPARE)) DBUG_RETURN(1); + thd->lex->current_select->context_analysis_place= NO_MATTER; thd->lex->current_select->is_item_list_lookup= 1; /* If we have already executed SELECT, then it have not sense to prevent @@ -801,12 +800,13 @@ JOIN::prepare(TABLE_LIST *tables_init, ref_ptrs= ref_ptr_array_slice(0); - enum_parsing_place save_place= thd->lex->current_select->parsing_place; - thd->lex->current_select->parsing_place= SELECT_LIST; + enum_parsing_place save_place= + thd->lex->current_select->context_analysis_place; + thd->lex->current_select->context_analysis_place= SELECT_LIST; if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ, &all_fields, 1)) DBUG_RETURN(-1); - thd->lex->current_select->parsing_place= save_place; + thd->lex->current_select->context_analysis_place= save_place; if (setup_without_group(thd, ref_ptrs, tables_list, select_lex->leaf_tables, fields_list, @@ -1991,7 +1991,8 @@ JOIN::optimize_inner() having= new (thd->mem_root) Item_int(thd, (longlong) 0,1); zero_result_cause= "Impossible HAVING noticed after reading const tables"; error= 0; - DBUG_RETURN(0); + select_lex->mark_const_derived(zero_result_cause); + goto setup_subq_exit; } } @@ -3384,7 +3385,8 @@ void JOIN::exec_inner() condtions may be arbitrarily costly, and because the optimize phase might not have produced a complete executable plan for EXPLAINs. */ - if (exec_const_cond && !(select_options & SELECT_DESCRIBE) && + if (!zero_result_cause && + exec_const_cond && !(select_options & SELECT_DESCRIBE) && !exec_const_cond->val_int()) zero_result_cause= "Impossible WHERE noticed after reading const tables"; @@ -9725,12 +9727,20 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) /* Step #2: Extract WHERE/ON parts */ + uint i; + for (i= join->top_join_tab_count - 1; i >= join->const_tables; i--) + { + if (!join->join_tab[i].bush_children) + break; + } + uint last_top_base_tab_idx= i; + table_map save_used_tables= 0; used_tables=((select->const_tables=join->const_table_map) | OUTER_REF_TABLE_BIT | RAND_TABLE_BIT); JOIN_TAB *tab; table_map current_map; - uint i= join->const_tables; + i= join->const_tables; for (tab= first_depth_first_tab(join); tab; tab= next_depth_first_tab(join, tab), i++) { @@ -9769,7 +9779,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) Following force including random expression in last table condition. It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5 */ - if (tab == join->join_tab + join->top_join_tab_count - 1) + if (tab == join->join_tab + last_top_base_tab_idx) current_map|= RAND_TABLE_BIT; used_tables|=current_map; @@ -9809,10 +9819,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) save_used_tables= 0; } else - { - tmp= make_cond_for_table(thd, cond, used_tables, current_map, i, + { + tmp= make_cond_for_table(thd, cond, used_tables, current_map, i, FALSE, FALSE); - } + } /* Add conditions added by add_not_null_conds(). */ if (tab->select_cond) add_cond_and_fix(thd, &tmp, tab->select_cond); @@ -14517,7 +14527,8 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top, table->table->maybe_null= FALSE; table->outer_join= 0; if (!(straight_join || table->straight)) - table->dep_tables= table->embedding? table->embedding->dep_tables: 0; + table->dep_tables= table->embedding && !table->embedding->sj_subq_pred ? + table->embedding->dep_tables : 0; if (table->on_expr) { /* Add ON expression to the WHERE or upper-level ON condition. */ @@ -22206,14 +22217,16 @@ int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List<Item> &all_fields, ORDER *order, bool from_window_spec) { - enum_parsing_place parsing_place= thd->lex->current_select->parsing_place; + enum_parsing_place context_analysis_place= + thd->lex->current_select->context_analysis_place; thd->where="order clause"; for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, all_fields, FALSE, from_window_spec)) return 1; - if ((*order->item)->with_window_func && parsing_place != IN_ORDER_BY) + if ((*order->item)->with_window_func && + context_analysis_place != IN_ORDER_BY) { my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0)); return 1; @@ -22255,7 +22268,8 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List<Item> &all_fields, ORDER *order, bool *hidden_group_fields, bool from_window_spec) { - enum_parsing_place parsing_place= thd->lex->current_select->parsing_place; + enum_parsing_place context_analysis_place= + thd->lex->current_select->context_analysis_place; *hidden_group_fields=0; ORDER *ord; @@ -22271,14 +22285,14 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, all_fields, TRUE, from_window_spec)) return 1; (*ord->item)->marker= UNDEF_POS; /* Mark found */ - if ((*ord->item)->with_sum_func && parsing_place == IN_GROUP_BY) + if ((*ord->item)->with_sum_func && context_analysis_place == IN_GROUP_BY) { my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name()); return 1; } if ((*ord->item)->with_window_func) { - if (parsing_place == IN_GROUP_BY) + if (context_analysis_place == IN_GROUP_BY) my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); else my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0)); diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc index 35792bfe72e..be360c59e34 100644 --- a/sql/sql_sequence.cc +++ b/sql/sql_sequence.cc @@ -47,18 +47,19 @@ struct Field_definition static Field_definition sequence_structure[]= { - {"next_value", 21, &type_handler_longlong, {STRING_WITH_LEN("next not cached value")}, - FL}, - {"min_value", 21, &type_handler_longlong, {STRING_WITH_LEN("min value")}, FL}, - {"max_value", 21, &type_handler_longlong, {STRING_WITH_LEN("max value")}, FL}, - {"start", 21, &type_handler_longlong, {STRING_WITH_LEN("start value")}, FL}, + {"next_not_cached_value", 21, &type_handler_longlong, + {STRING_WITH_LEN("")}, FL}, + {"minimum_value", 21, &type_handler_longlong, STRING_WITH_LEN(""), FL}, + {"maximum_value", 21, &type_handler_longlong, STRING_WITH_LEN(""), FL}, + {"start_value", 21, &type_handler_longlong, {STRING_WITH_LEN("start value when sequences is created or value if RESTART is used")}, FL}, {"increment", 21, &type_handler_longlong, {C_STRING_WITH_LEN("increment value")}, FL}, - {"cache", 21, &type_handler_longlong, {STRING_WITH_LEN("cache size")}, FL}, - {"cycle", 1, &type_handler_tiny, {STRING_WITH_LEN("cycle state")}, + {"cache_size", 21, &type_handler_longlong, STRING_WITH_LEN(""), + FL | UNSIGNED_FLAG}, + {"cycle_option", 1, &type_handler_tiny, {STRING_WITH_LEN("0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed")}, FL | UNSIGNED_FLAG }, - {"round", 21, &type_handler_longlong, - {STRING_WITH_LEN("How many cycles has been done")}, FL}, + {"cycle_count", 21, &type_handler_longlong, + {STRING_WITH_LEN("How many cycles have been done")}, FL}, {NULL, 0, &type_handler_longlong, {STRING_WITH_LEN("")}, 0} }; @@ -458,9 +459,12 @@ int SEQUENCE::read_initial_values(TABLE *table_arg) DBUG_RETURN(error); } + /* - Read data from sequence table and update values - Done when table is opened + Do the actiual reading of data from sequence table and + update values in the sequence object. + + Called once from when table is opened */ int SEQUENCE::read_stored_values() diff --git a/sql/sql_sequence.h b/sql/sql_sequence.h index b560d03ca52..1dc777657d5 100644 --- a/sql/sql_sequence.h +++ b/sql/sql_sequence.h @@ -88,7 +88,8 @@ protected: class SEQUENCE :public sequence_definition { public: - enum seq_init { SEQ_UNINTIALIZED, SEQ_IN_PREPARE, SEQ_READY_TO_USE }; + enum seq_init { SEQ_UNINTIALIZED, SEQ_IN_PREPARE, SEQ_IN_ALTER, + SEQ_READY_TO_USE }; SEQUENCE(); ~SEQUENCE(); int read_initial_values(TABLE *table); @@ -101,6 +102,7 @@ public: { sequence_definition::operator= (*seq); adjust_values(reserved_until); + all_values_used= 0; } longlong next_value(TABLE *table, bool second_round, int *error); bool set_value(TABLE *table, longlong next_value, ulonglong round_arg, diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 1fbd631b754..b2ee8a2eef0 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2009, 2017, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1095,6 +1095,7 @@ public: is_handled= FALSE; break; } + /* fall through */ case ER_COLUMNACCESS_DENIED_ERROR: case ER_VIEW_NO_EXPLAIN: /* Error was anonymized, ignore all the same. */ case ER_PROCACCESS_DENIED_ERROR: @@ -1181,8 +1182,8 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list, else if (thd->lex->table_type == TABLE_TYPE_SEQUENCE && table_list->table->s->table_type != TABLE_TYPE_SEQUENCE) { - my_error(ER_WRONG_OBJECT, MYF(0), - table_list->db, table_list->table_name, "SEQUENCE"); + my_error(ER_NOT_SEQUENCE, MYF(0), + table_list->db, table_list->table_name); goto exit; } @@ -4372,7 +4373,8 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys, */ if (!is_show_fields_or_keys && result && (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE || - thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT)) + thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT || + thd->get_stmt_da()->sql_errno() == ER_NOT_SEQUENCE)) { /* Hide error for a non-existing table. @@ -4699,7 +4701,8 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table, if (!share) { if (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE || - thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT) + thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT || + thd->get_stmt_da()->sql_errno() == ER_NOT_SEQUENCE) { res= 0; } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 462b78aeb62..90be57868cd 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6896,7 +6896,8 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, case Alter_info::LEAVE_AS_IS: if (!indexes_were_disabled) break; - /* fall-through: disabled indexes */ + /* disabled indexes */ + /* fall through */ case Alter_info::DISABLE: error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); } @@ -9002,7 +9003,9 @@ bool mysql_alter_table(THD *thd, const char *new_db, const char *new_name, TODO don't create the frm in the first place */ - deletefrm(alter_ctx.get_tmp_path()); + const char *path= alter_ctx.get_tmp_path(); + table->file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); + deletefrm(path); my_free(const_cast<uchar*>(frm.str)); goto end_inplace; } @@ -9807,7 +9810,9 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } if (to->file->ha_end_bulk_insert() && error <= 0) { - to->file->print_error(my_errno,MYF(0)); + /* Give error, if not already given */ + if (!thd->is_error()) + to->file->print_error(my_errno,MYF(0)); error= 1; } to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a0bbf39b138..6ae5a9b91f0 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -4738,9 +4738,11 @@ size_number: case 'g': case 'G': text_shift_number+=10; + /* fall through */ case 'm': case 'M': text_shift_number+=10; + /* fall through */ case 'k': case 'K': text_shift_number+=10; @@ -11970,17 +11972,22 @@ delete_limit_clause: | LIMIT limit_option ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; } ; +opt_plus: + /* empty */ + | '+' + ; + int_num: - NUM { int error; $$= (int) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (int) my_strtoll10($2.str, (char**) 0, &error); } | '-' NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); } ulong_num: - NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); } - | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | opt_plus LONG_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus FLOAT_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } ; real_ulong_num: @@ -11992,18 +11999,18 @@ real_ulong_num: ; longlong_num: - NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (longlong) my_strtoll10($2.str, (char**) 0, &error); } | LONG_NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); } | '-' NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); } | '-' LONG_NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); } ulonglong_num: - NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus LONG_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } ; real_ulonglong_num: diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index 3787f34aec4..9cd0944077a 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -12051,17 +12051,22 @@ delete_limit_clause: | LIMIT limit_option ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; } ; +opt_plus: + /* empty */ + | '+' + ; + int_num: - NUM { int error; $$= (int) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (int) my_strtoll10($2.str, (char**) 0, &error); } | '-' NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); } ulong_num: - NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); } - | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | opt_plus LONG_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus FLOAT_NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); } ; real_ulong_num: @@ -12073,18 +12078,18 @@ real_ulong_num: ; longlong_num: - NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (longlong) my_strtoll10($2.str, (char**) 0, &error); } | LONG_NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); } | '-' NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); } | '-' LONG_NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); } ulonglong_num: - NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + opt_plus NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus LONG_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } + | opt_plus FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); } ; real_ulonglong_num: diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c2a5e183187..28665098d4d 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1740,7 +1740,10 @@ Sys_var_gtid_binlog_state::do_check(THD *thd, set_var *var) return true; } if (res->length() == 0) + { list= NULL; + list_len= 0; + } else if (!(list= gtid_parse_string_to_list(res->ptr(), res->length(), &list_len))) { diff --git a/sql/table.cc b/sql/table.cc index 9c3613a879c..0a23d2f1f41 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2056,6 +2056,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, keyinfo= share->key_info; uint primary_key= my_strcasecmp(system_charset_info, share->keynames.type_names[0], primary_key_name) ? MAX_KEY : 0; + KEY* key_first_info; if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME) { @@ -2135,19 +2136,38 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, keyinfo->name_length+1); } + if (!key) + key_first_info= keyinfo; + if (ext_key_parts > share->key_parts && key) { KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part + (keyinfo-1)->ext_key_parts; uint add_keyparts_for_this_key= add_first_key_parts; + uint length_bytes= 0, len_null_byte= 0, ext_key_length= 0; + Field *field; /* Do not extend the key that contains a component defined over the beginning of a field. */ for (i= 0; i < keyinfo->user_defined_key_parts; i++) - { + { uint fieldnr= keyinfo->key_part[i].fieldnr; + field= share->field[keyinfo->key_part[i].fieldnr-1]; + + if (field->null_ptr) + len_null_byte= HA_KEY_NULL_LENGTH; + + if (field->type() == MYSQL_TYPE_BLOB || + field->real_type() == MYSQL_TYPE_VARCHAR || + field->type() == MYSQL_TYPE_GEOMETRY) + { + length_bytes= HA_KEY_BLOB_LENGTH; + } + + ext_key_length+= keyinfo->key_part[i].length + len_null_byte + + length_bytes; if (share->field[fieldnr-1]->key_length() != keyinfo->key_part[i].length) { @@ -2156,6 +2176,23 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } } + if (add_keyparts_for_this_key) + { + for (i= 0; i < add_keyparts_for_this_key; i++) + { + uint pk_part_length= key_first_info->key_part[i].store_length; + if (keyinfo->ext_key_part_map & 1<<i) + { + if (ext_key_length + pk_part_length > MAX_KEY_LENGTH) + { + add_keyparts_for_this_key= i; + break; + } + ext_key_length+= pk_part_length; + } + } + } + if (add_keyparts_for_this_key < (keyinfo->ext_key_parts - keyinfo->user_defined_key_parts)) { diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 4acf8a3bf1e..f8db20ace99 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -384,7 +384,8 @@ static void wsrep_replication_process(THD *thd) case WSREP_TRX_MISSING: /* these suggests a bug in provider code */ WSREP_WARN("bad return from recv() call: %d", rcode); - /* fall through to node shutdown */ + /* Shut down this node. */ + /* fall through */ case WSREP_FATAL: /* Cluster connectivity is lost. * diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index a602084b5bd..81441892215 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -18,10 +18,10 @@ SET(CONNECT_PLUGIN_DYNAMIC "connect") SET(CONNECT_SOURCES ha_connect.cc connect.cc user_connect.cc mycat.cc -fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h +fmdlex.c osutil.c rcmsg.c rcmsg.h array.cpp blkfil.cpp colblk.cpp csort.cpp -filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp -filter.cpp json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp +filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp filter.cpp +json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp plugutil.cpp reldef.cpp tabcol.cpp tabdos.cpp tabext.cpp tabfix.cpp tabfmt.cpp tabjson.cpp table.cpp tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp @@ -38,7 +38,7 @@ user_connect.h valblk.h value.h xindex.h xobject.h xtable.h) # Definitions that are shared for all OSes # add_definitions( -DMARIADB -DFORCE_INIT_OF_VARS -Dconnect_EXPORTS) -add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT ) +add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT -DUSE_TRY ) # @@ -270,8 +270,8 @@ IF(CONNECT_WITH_JDBC) # Find required libraries and include directories SET (JAVA_SOURCES JdbcInterface.java) add_jar(JdbcInterface ${JAVA_SOURCES}) - install_jar(JdbcInterface DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/JavaWrappers.jar + ${CMAKE_CURRENT_BINARY_DIR}/JdbcInterface.jar DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) add_definitions(-DJDBC_SUPPORT) ELSE() diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 1998ab890e9..6e0da312ca3 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -155,6 +155,7 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec) switch (type) { case TYPE_STRING: Len = length; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DOUBLE: @@ -518,8 +519,8 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) vp = valp; } else if (opc != OP_EXIST) { - sprintf(g->Message, MSG(MISSING_ARG), opc); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + sprintf(g->Message, MSG(MISSING_ARG), opc); + throw TYPE_ARRAY; } else // OP_EXIST return Nval > 0; @@ -592,6 +593,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) switch (Type) { case TYPE_DOUBLE: prec = 2; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DATE: @@ -681,15 +683,15 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) { if (Vblp == NULL) { strcpy(g->Message, MSG(PREC_VBLP_NULL)); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + throw TYPE_ARRAY; } // endif Vblp bool was = Vblp->IsCi(); if (was && !p) { strcpy(g->Message, MSG(BAD_SET_CASE)); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); - } // endif Vblp + throw TYPE_ARRAY; + } // endif Vblp if (was || !p) return; @@ -699,7 +701,7 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) if (!was && Type == TYPE_STRING) // Must be resorted to eliminate duplicate strings if (Sort(g)) - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + throw TYPE_ARRAY; } // end of SetPrecision @@ -977,14 +979,14 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) size_t z, len = 2; if (Type == TYPE_LIST) - return "(?" "?" "?)"; // To be implemented + return (PSZ)("(?" "?" "?)"); // To be implemented z = MY_MAX(24, GetTypeSize(Type, Len) + 4); tp = (char*)PlugSubAlloc(g, NULL, z); for (i = 0; i < Nval; i++) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, tp, z); + Value->Prints(g, tp, z); len += strlen(tp); } // enfor i @@ -996,7 +998,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) for (i = 0; i < Nval;) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, tp, z); + Value->Prints(g, tp, z); strcat(p, tp); strcat(p, (++i == Nval) ? ")" : ","); } // enfor i @@ -1010,7 +1012,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) /***********************************************************************/ /* Make file output of ARRAY contents. */ /***********************************************************************/ -void ARRAY::Print(PGLOBAL g, FILE *f, uint n) +void ARRAY::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; int lim = MY_MIN(Nval,10); @@ -1027,7 +1029,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n) if (Vblp) for (int i = 0; i < lim; i++) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, f, n+4); + Value->Printf(g, f, n+4); } // endfor i } else @@ -1038,7 +1040,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of ARRAY contents. */ /***********************************************************************/ -void ARRAY::Print(PGLOBAL, char *ps, uint z) +void ARRAY::Prints(PGLOBAL, char *ps, uint z) { if (z < 16) return; diff --git a/storage/connect/array.h b/storage/connect/array.h index dfc3638de8a..bd38344de06 100644 --- a/storage/connect/array.h +++ b/storage/connect/array.h @@ -56,8 +56,8 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock virtual bool Compare(PXOB) {assert(false); return false;} virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(false); return false;} //virtual int CheckSpcCol(PTDB, int) {return 0;} - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // void Empty(void); void SetPrecision(PGLOBAL g, int p); bool AddValue(PGLOBAL g, PSZ sp); diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp index 50535d20a3d..3a0292481be 100644 --- a/storage/connect/blkfil.cpp +++ b/storage/connect/blkfil.cpp @@ -1,11 +1,11 @@ /************* BlkFil C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: BLKFIL */ /* ------------- */ -/* Version 2.5 */ +/* Version 2.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -56,7 +56,7 @@ BLOCKFILTER::BLOCKFILTER(PTDBDOS tdbp, int op) /***********************************************************************/ /* Make file output of BLOCKFILTER contents. */ /***********************************************************************/ -void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n) +void BLOCKFILTER::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; @@ -70,7 +70,7 @@ void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of BLOCKFILTER contents. */ /***********************************************************************/ -void BLOCKFILTER::Print(PGLOBAL, char *ps, uint z) +void BLOCKFILTER::Prints(PGLOBAL, char *ps, uint z) { strncat(ps, "BlockFilter(s)", z); } // end of Print @@ -595,8 +595,8 @@ BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) if (Colp->GetResultType() != Type) { sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], 99); - } else if (Colp->GetValue()->IsCi()) + throw g->Message; + } else if (Colp->GetValue()->IsCi()) Arap->SetPrecision(g, 1); // Case insensitive Sorted = Colp->IsSorted() > 0; @@ -995,7 +995,7 @@ int BLOCKINDEX::BlockEval(PGLOBAL g) /***********************************************************************/ /* Make file output of BLOCKINDEX contents. */ /***********************************************************************/ -void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) +void BLOCKINDEX::Printf(PGLOBAL g, FILE *f, UINT n) { char m[64]; @@ -1013,7 +1013,7 @@ void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) /***********************************************************************/ /* Make string output of BLOCKINDEX contents. */ /***********************************************************************/ -void BLOCKINDEX::Print(PGLOBAL g, char *ps, UINT z) +void BLOCKINDEX::Prints(PGLOBAL g, char *ps, UINT z) { strncat(ps, "BlockIndex(es)", z); } // end of Print diff --git a/storage/connect/blkfil.h b/storage/connect/blkfil.h index 00b00139042..61b02c53c14 100644 --- a/storage/connect/blkfil.h +++ b/storage/connect/blkfil.h @@ -27,8 +27,8 @@ class DllExport BLOCKFILTER : public BLOCK { /* Block Filter */ // Methods virtual void Reset(PGLOBAL) = 0; virtual int BlockEval(PGLOBAL) = 0; - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); protected: BLOCKFILTER(void) {} // Standard constructor not to be used @@ -234,8 +234,8 @@ class DllExport BLOCKINDEX : public BLOCK { /* Indexing Test Block */ // Methods void Reset(void); virtual int BlockEval(PGLOBAL); - virtual void Print(PGLOBAL g, FILE *f, UINT n); - virtual void Print(PGLOBAL g, char *ps, UINT z); + virtual void Printf(PGLOBAL g, FILE *f, UINT n); + virtual void Prints(PGLOBAL g, char *ps, UINT z); protected: BLOCKINDEX(void) {} // Standard constructor not to be used diff --git a/storage/connect/block.h b/storage/connect/block.h index aa4edde5ec9..8ac7be80988 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -44,8 +44,8 @@ class DllExport BLOCK { return (PlugSubAlloc(g, p, size)); } // end of new - virtual void Print(PGLOBAL, FILE *, uint) {} // Produce file desc - virtual void Print(PGLOBAL, char *, uint) {} // Produce string desc + virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc + virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc #if !defined(__BORLANDC__) // Avoid warning C4291 by defining a matching dummy delete operator diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h index 70304c410cc..48347d7519e 100644 --- a/storage/connect/catalog.h +++ b/storage/connect/catalog.h @@ -36,7 +36,7 @@ typedef struct _curtab { /* Defines the structure used to get column catalog info. */ /***********************************************************************/ typedef struct _colinfo { - char *Name; + PCSZ Name; int Type; int Offset; int Length; @@ -45,9 +45,9 @@ typedef struct _colinfo { int Scale; int Opt; int Freq; - char *Remark; - char *Datefmt; - char *Fieldfmt; + PCSZ Remark; + PCSZ Datefmt; + PCSZ Fieldfmt; ushort Flags; // Used by MariaDB CONNECT handlers } COLINFO, *PCOLINFO; @@ -68,11 +68,9 @@ class DllExport CATALOG { bool GetDefHuge(void) {return DefHuge;} void SetDefHuge(bool b) {DefHuge = b;} char *GetCbuf(void) {return Cbuf;} -//char *GetDataPath(void) {return (char*)DataPath;} // Methods virtual void Reset(void) {} -//virtual void SetDataPath(PGLOBAL g, const char *path) {} virtual bool CheckName(PGLOBAL, char*) {return true;} virtual bool ClearName(PGLOBAL, PSZ) {return true;} virtual PRELDEF MakeOneTableDesc(PGLOBAL, LPCSTR, LPCSTR) {return NULL;} @@ -102,7 +100,6 @@ class DllExport CATALOG { int Cblen; /* Length of suballoc. buffer */ CURTAB Ctb; /* Used to enumerate tables */ bool DefHuge; /* true: tables default to huge */ -//LPCSTR DataPath; /* Is the Path of DB data dir */ }; // end of class CATALOG #endif // __CATALOG__H diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index 58841387249..324d59ab40e 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -195,10 +195,10 @@ int COLBLK::GetLengthEx(void) /* corresponding to this column and convert it to buffer type. */ /***********************************************************************/ void COLBLK::ReadColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn + throw TYPE_COLBLK; +} // end of ReadColumn /***********************************************************************/ /* WriteColumn: what this routine does is to access the last line */ @@ -206,15 +206,15 @@ void COLBLK::ReadColumn(PGLOBAL g) /* corresponding to this column from the column buffer and type. */ /***********************************************************************/ void COLBLK::WriteColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn + throw TYPE_COLBLK; +} // end of WriteColumn /***********************************************************************/ /* Make file output of a column descriptor block. */ /***********************************************************************/ -void COLBLK::Print(PGLOBAL, FILE *f, uint n) +void COLBLK::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; int i; @@ -237,7 +237,7 @@ void COLBLK::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of a column descriptor block. */ /***********************************************************************/ -void COLBLK::Print(PGLOBAL, char *ps, uint) +void COLBLK::Prints(PGLOBAL, char *ps, uint) { sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name); } // end of Print @@ -260,10 +260,10 @@ SPCBLK::SPCBLK(PCOLUMN cp) /* corresponding to this column from the column buffer and type. */ /***********************************************************************/ void SPCBLK::WriteColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(SPCOL_READONLY), Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn + throw TYPE_COLBLK; +} // end of WriteColumn /***********************************************************************/ /* RIDBLK constructor for the ROWID special column. */ @@ -377,7 +377,7 @@ PRTBLK::PRTBLK(PCOLUMN cp) : SPCBLK(cp) void PRTBLK::ReadColumn(PGLOBAL g) { if (Pname == NULL) { - char *p; + const char *p; Pname = To_Tdb->GetDef()->GetStringCatInfo(g, "partname", "?"); p = strrchr(Pname, '#'); @@ -407,7 +407,7 @@ SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp) void SIDBLK::ReadColumn(PGLOBAL) { //if (Sname == NULL) { - Sname = (char*)To_Tdb->GetServer(); + Sname = To_Tdb->GetServer(); Value->SetValue_psz(Sname); // } // endif Sname diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index c64f9d95129..608aa040787 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -72,8 +72,8 @@ class DllExport COLBLK : public XOBJECT { virtual void SetTo_Val(PVAL) {} virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *, uint); virtual bool VarSize(void) {return false;} bool InitValue(PGLOBAL g); @@ -154,7 +154,7 @@ class DllExport FIDBLK : public SPCBLK { virtual void ReadColumn(PGLOBAL g); protected: - PSZ Fn; // The current To_File of the table + PCSZ Fn; // The current To_File of the table OPVAL Op; // The file part operator }; // end of class FIDBLK @@ -178,7 +178,7 @@ class DllExport TIDBLK : public SPCBLK { TIDBLK(void) {} // Members - PSZ Tname; // The current table name + PCSZ Tname; // The current table name }; // end of class TIDBLK /***********************************************************************/ @@ -201,7 +201,7 @@ class DllExport PRTBLK : public SPCBLK { PRTBLK(void) {} // Members - PSZ Pname; // The current partition name + PCSZ Pname; // The current partition name }; // end of class PRTBLK /***********************************************************************/ @@ -224,7 +224,7 @@ class DllExport SIDBLK : public SPCBLK { SIDBLK(void) {} // Members - PSZ Sname; // The current server name + PCSZ Sname; // The current server name }; // end of class SIDBLK #endif // __COLBLK__H diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 098119e7be1..e15cc724b85 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2015 +/* Copyright (C) Olivier Bertrand 2004 - 2017 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,10 +15,10 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /***********************************************************************/ -/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2015 */ +/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2017 */ /* */ -/* WHAT THIS PROGRAM DOES: */ -/* ----------------------- */ +/* WHAT THIS PROGRAM DOES: */ +/* ----------------------- */ /* This program are the CONNECT general purpose semantic routines. */ /***********************************************************************/ #ifdef USE_PRAGMA_IMPLEMENTATION @@ -117,11 +117,10 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) handler); // Set the database path for this table - handler->SetDataPath(g, pathname); + if (handler->SetDataPath(g, pathname)) + return true; if (dbuserp->Catalog) { -// ((MYCAT *)dbuserp->Catalog)->SetHandler(handler); done later -// ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); return false; // Nothing else to do } // endif Catalog @@ -138,9 +137,6 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) if (!(dbuserp->Catalog= new MYCAT(handler))) return true; -//((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); -//dbuserp->UseTemp= TMP_AUTO; - /*********************************************************************/ /* All is correct. */ /*********************************************************************/ @@ -172,7 +168,7 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) // info->mean_rec_length= tdbp->GetLrecl(); info->mean_rec_length= 0; - info->data_file_name= (b) ? NULL : tdbp->GetFile(g); + info->data_file_name= (b) ? NULL : (char*)tdbp->GetFile(g); return true; } else { info->data_file_length= 0; @@ -188,49 +184,43 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) /* GetTDB: Get the table description block of a CONNECT table. */ /***********************************************************************/ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) - { - int rc; - PTDB tdbp; - PTABLE tabp; - PDBUSER dup= PlgGetUser(g); - volatile PCATLG cat= (dup) ? dup->Catalog : NULL; // Safe over longjmp - - if (trace) - printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat); +{ + PTDB tdbp; + PTABLE tabp; + PDBUSER dup = PlgGetUser(g); + volatile PCATLG cat = (dup) ? dup->Catalog : NULL; // Safe over longjmp - if (!cat) - return NULL; + if (trace) + printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat); - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level + if (!cat) + return NULL; - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - tdbp= NULL; - goto err; - } // endif rc + try { + // Get table object from the catalog + tabp = new(g) XTAB(name); - // Get table object from the catalog - tabp= new(g) XTAB(name); + if (trace) + printf("CntGetTDB: tabp=%p\n", tabp); - if (trace) - printf("CntGetTDB: tabp=%p\n", tabp); + // Perhaps this should be made thread safe + ((MYCAT*)cat)->SetHandler(h); - // Perhaps this should be made thread safe - ((MYCAT*)cat)->SetHandler(h); + if (!(tdbp = cat->GetTable(g, tabp, mode))) + printf("CntGetTDB: %s\n", g->Message); - if (!(tdbp= cat->GetTable(g, tabp, mode))) - printf("CntGetTDB: %s\n", g->Message); + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch - err: if (trace) printf("Returning tdbp=%p mode=%d\n", tdbp, mode); - g->jump_level--; return tdbp; - } // end of CntGetTDB +} // end of CntGetTDB /***********************************************************************/ /* OPENTAB: Open a Table. */ @@ -239,7 +229,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, bool del, PHC) { char *p; - int i, n, rc; + int i, n; bool rcop= true; PCOL colp; //PCOLUMN cp; @@ -254,120 +244,116 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, return true; } // endif tdbp - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; - } // endif rc - - if (!c1) { - if (mode == MODE_INSERT) - // Allocate all column blocks for that table - tdbp->ColDB(g, NULL, 0); - - } else for (p= c1; *p; p+= n) { - // Allocate only used column blocks - if (trace) - printf("Allocating column %s\n", p); - - g->Message[0] = 0; // To check whether ColDB made an error message - colp= tdbp->ColDB(g, p, 0); - - if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) { - if (g->Message[0] == 0) - sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName()); - - goto err; - } // endif colp - - n= strlen(p) + 1; - } // endfor p - - for (i= 0, colp= tdbp->GetColumns(); colp; i++, colp= colp->GetNext()) { - if (colp->InitValue(g)) - goto err; - - if (mode == MODE_INSERT) - // Allow type conversion - if (colp->SetBuffer(g, colp->GetValue(), true, false)) - goto err; - - colp->AddColUse(U_P); // For PLG tables - } // endfor colp - - /*********************************************************************/ - /* In Update mode, the updated column blocks must be distinct from */ - /* the read column blocks. So make a copy of the TDB and allocate */ - /* its column blocks in mode write (required by XML tables). */ - /*********************************************************************/ - if (mode == MODE_UPDATE) { - PTDBASE utp; - - if (!(utp= (PTDBASE)tdbp->Duplicate(g))) { - sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); - goto err; - } // endif tp - - if (!c2) - // Allocate all column blocks for that table - utp->ColDB(g, NULL, 0); - else for (p= c2; *p; p+= n) { - // Allocate only used column blocks - colp= utp->ColDB(g, p, 0); - n= strlen(p) + 1; - } // endfor p - - for (i= 0, colp= utp->GetColumns(); colp; i++, colp= colp->GetNext()) { - if (colp->InitValue(g)) - goto err; - - if (colp->SetBuffer(g, colp->GetValue(), true, false)) - goto err; - - } // endfor colp - - // Attach the updated columns list to the main table - tdbp->SetSetCols(utp->GetColumns()); - } else if (tdbp && mode == MODE_INSERT) - tdbp->SetSetCols(tdbp->GetColumns()); - - // Now do open the physical table - if (trace) - printf("Opening table %s in mode %d tdbp=%p\n", - tdbp->GetName(), mode, tdbp); - -//tdbp->SetMode(mode); - - if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) { - // To avoid erasing the table when doing a partial delete - // make a fake Next + try { + if (!c1) { + if (mode == MODE_INSERT) + // Allocate all column blocks for that table + tdbp->ColDB(g, NULL, 0); + + } else for (p = c1; *p; p += n) { + // Allocate only used column blocks + if (trace) + printf("Allocating column %s\n", p); + + g->Message[0] = 0; // To check whether ColDB made an error message + colp = tdbp->ColDB(g, p, 0); + + if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) { + if (g->Message[0] == 0) + sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName()); + + throw 1; + } // endif colp + + n = strlen(p) + 1; + } // endfor p + + for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) { + if (colp->InitValue(g)) + throw 2; + + if (mode == MODE_INSERT) + // Allow type conversion + if (colp->SetBuffer(g, colp->GetValue(), true, false)) + throw 3; + + colp->AddColUse(U_P); // For PLG tables + } // endfor colp + + /*******************************************************************/ + /* In Update mode, the updated column blocks must be distinct from */ + /* the read column blocks. So make a copy of the TDB and allocate */ + /* its column blocks in mode write (required by XML tables). */ + /*******************************************************************/ + if (mode == MODE_UPDATE) { + PTDBASE utp; + + if (!(utp = (PTDBASE)tdbp->Duplicate(g))) { + sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); + throw 4; + } // endif tp + + if (!c2) + // Allocate all column blocks for that table + utp->ColDB(g, NULL, 0); + else for (p = c2; *p; p += n) { + // Allocate only used column blocks + colp = utp->ColDB(g, p, 0); + n = strlen(p) + 1; + } // endfor p + + for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) { + if (colp->InitValue(g)) + throw 5; + + if (colp->SetBuffer(g, colp->GetValue(), true, false)) + throw 6; + + } // endfor colp + + // Attach the updated columns list to the main table + tdbp->SetSetCols(utp->GetColumns()); + } else if (tdbp && mode == MODE_INSERT) + tdbp->SetSetCols(tdbp->GetColumns()); + + // Now do open the physical table + if (trace) + printf("Opening table %s in mode %d tdbp=%p\n", + tdbp->GetName(), mode, tdbp); + + //tdbp->SetMode(mode); + + if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) { + // To avoid erasing the table when doing a partial delete + // make a fake Next // PDOSDEF ddp= new(g) DOSDEF; // PTDB tp= new(g) TDBDOS(ddp, NULL); - tdbp->SetNext((PTDB)1); - dup->Check &= ~CHK_DELETE; - } // endif del + tdbp->SetNext((PTDB)1); + dup->Check &= ~CHK_DELETE; + } // endif del - if (trace) - printf("About to open the table: tdbp=%p\n", tdbp); + if (trace) + printf("About to open the table: tdbp=%p\n", tdbp); - if (mode != MODE_ANY && mode != MODE_ALTER) { - if (tdbp->OpenDB(g)) { - printf("%s\n", g->Message); - goto err; - } else - tdbp->SetNext(NULL); + if (mode != MODE_ANY && mode != MODE_ALTER) { + if (tdbp->OpenDB(g)) { + printf("%s\n", g->Message); + throw 7; + } else + tdbp->SetNext(NULL); + + } // endif mode - } // endif mode + rcop = false; - rcop= false; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch - err: - g->jump_level--; return rcop; } // end of CntOpenTable @@ -387,50 +373,40 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp) /* Evaluate all columns after a record is read. */ /***********************************************************************/ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) - { +{ RCODE rc= RC_OK; PCOL colp; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - if (trace) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - printf("EvalColumns: %s\n", g->Message); - } // endif - - return RC_FX; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - if (trace) - printf("Error reading columns: %s\n", g->Message); + try { + for (colp = tdbp->GetColumns(); rc == RC_OK && colp; + colp = colp->GetNext()) { + if (reset) + colp->Reset(); - rc= RC_FX; - goto err; - } // endif rc + // Virtual columns are computed by MariaDB + if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) + if (colp->Eval(g)) + rc = RC_FX; - for (colp= tdbp->GetColumns(); rc == RC_OK && colp; - colp= colp->GetNext()) { - if (reset) - colp->Reset(); + } // endfor colp - // Virtual columns are computed by MariaDB - if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) - if (colp->Eval(g)) - rc= RC_FX; + } catch (int n) { + if (trace) + printf("Error %d reading columns: %s\n", n, g->Message); - } // endfor colp + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch - err: - g->jump_level--; return rc; - } // end of EvalColumns +} // end of EvalColumns /***********************************************************************/ /* ReadNext: Read next record sequentially. */ /***********************************************************************/ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) - { +{ RCODE rc; if (!tdbp) @@ -445,76 +421,66 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) ((PTDBASE)tdbp)->ResetKindex(g, NULL); } // endif index - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level - - if ((setjmp(g->jumper[++g->jump_level])) != 0) { - rc= RC_FX; - goto err; - } // endif rc + try { + // Do it now to avoid double eval when filtering + for (PCOL colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) + colp->Reset(); - // Do it now to avoid double eval when filtering - for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - colp->Reset(); + do { + if ((rc = (RCODE)tdbp->ReadDB(g)) == RC_OK) + if (!ApplyFilter(g, tdbp->GetFilter())) + rc = RC_NF; - do { - if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK) - if (!ApplyFilter(g, tdbp->GetFilter())) - rc= RC_NF; + } while (rc == RC_NF); - } while (rc == RC_NF); + if (rc == RC_OK) + rc = EvalColumns(g, tdbp, false); - if (rc == RC_OK) - rc= EvalColumns(g, tdbp, false); + } catch (int) { + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch - err: - g->jump_level--; return rc; - } // end of CntReadNext +} // end of CntReadNext /***********************************************************************/ /* WriteRow: Insert a new row into a table. */ /***********************************************************************/ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) - { - RCODE rc; - PCOL colp; -//PTDBASE tp= (PTDBASE)tdbp; - - if (!tdbp) - return RC_FX; - - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - rc= RC_FX; - goto err; - } // endif rc - - // Store column values in table write buffer(s) - for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext()) - if (!colp->GetColUse(U_VIRTUAL)) - colp->WriteColumn(g); - - if (tdbp->IsIndexed()) - // Index values must be sorted before updating - rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true); - else - // Return result code from write operation - rc= (RCODE)tdbp->WriteDB(g); - - err: - g->jump_level--; - return rc; - } // end of CntWriteRow +{ + RCODE rc; + PCOL colp; + //PTDBASE tp= (PTDBASE)tdbp; + + if (!tdbp) + return RC_FX; + + try { + // Store column values in table write buffer(s) + for (colp = tdbp->GetSetCols(); colp; colp = colp->GetNext()) + if (!colp->GetColUse(U_VIRTUAL)) + colp->WriteColumn(g); + + if (tdbp->IsIndexed()) + // Index values must be sorted before updating + rc = (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true); + else + // Return result code from write operation + rc = (RCODE)tdbp->WriteDB(g); + + } catch (int n) { + printf("Exception %d: %s\n", n, g->Message); + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch + + return rc; +} // end of CntWriteRow /***********************************************************************/ /* UpdateRow: Update a row into a table. */ @@ -562,88 +528,78 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all) /* CLOSETAB: Close a table. */ /***********************************************************************/ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) - { - int rc= RC_OK; -//TDBASE *tbxp= (PTDBASE)tdbp; - - if (!tdbp) - return rc; // Nothing to do - else if (tdbp->GetUse() != USE_OPEN) { - if (tdbp->GetAmType() == TYPE_AM_XML) - tdbp->CloseDB(g); // Opened by GetMaxSize - - return rc; - } // endif !USE_OPEN - - if (trace) - printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", - tdbp, tdbp->GetMode(), nox, abort); - - if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) { - if (tdbp->IsIndexed()) - rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g); - - if (!rc) - rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine - - } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed()) - rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g); - - switch(rc) { - case RC_FX: - abort= true; - break; - case RC_INFO: - PushWarning(g, tdbp); - break; - } // endswitch rc - - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - rc= RC_FX; - goto err; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - rc= RC_FX; - g->jump_level--; - goto err; - } // endif - - // This will close the table file(s) and also finalize write - // operations such as Insert, Update, or Delete. - tdbp->SetAbort(abort); - tdbp->CloseDB(g); - tdbp->SetAbort(false); - g->jump_level--; - - if (trace > 1) - printf("Table %s closed\n", tdbp->GetName()); - -//if (!((PTDBDOX)tdbp)->GetModified()) -// return 0; - - if (nox || tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) - return 0; - - if (trace > 1) - printf("About to reset opt\n"); - - if (!tdbp->IsRemote()) { - // Make all the eventual indexes - PTDBDOX tbxp = (PTDBDOX)tdbp; - tbxp->ResetKindex(g, NULL); - tbxp->SetKey_Col(NULL); - rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); - } // endif remote - - err: - if (trace > 1) - printf("Done rc=%d\n", rc); - - return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; - } // end of CntCloseTable +{ + int rc = RC_OK; + //TDBASE *tbxp= (PTDBASE)tdbp; + + if (!tdbp) + return rc; // Nothing to do + else if (tdbp->GetUse() != USE_OPEN) { + if (tdbp->GetAmType() == TYPE_AM_XML) + tdbp->CloseDB(g); // Opened by GetMaxSize + + return rc; + } // endif !USE_OPEN + + if (trace) + printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", + tdbp, tdbp->GetMode(), nox, abort); + + if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) { + if (tdbp->IsIndexed()) + rc = ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g); + + if (!rc) + rc = tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine + + } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed()) + rc = ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g); + + switch (rc) { + case RC_FX: + abort = true; + break; + case RC_INFO: + PushWarning(g, tdbp); + break; + } // endswitch rc + + try { + // This will close the table file(s) and also finalize write + // operations such as Insert, Update, or Delete. + tdbp->SetAbort(abort); + tdbp->CloseDB(g); + tdbp->SetAbort(false); + + if (trace > 1) + printf("Table %s closed\n", tdbp->GetName()); + + if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) { + if (trace > 1) + printf("About to reset opt\n"); + + if (!tdbp->IsRemote()) { + // Make all the eventual indexes + PTDBDOX tbxp = (PTDBDOX)tdbp; + tbxp->ResetKindex(g, NULL); + tbxp->SetKey_Col(NULL); + rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); + } // endif remote + + } // endif nox + + } catch (int) { + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch + + if (trace > 1) + htrc("Done rc=%d\n", rc); + + return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; +} // end of CntCloseTable /***********************************************************************/ /* Load and initialize the use of an index. */ @@ -752,8 +708,9 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName()); return RC_FX; } else if (x == 2) { - // Remote index - if (op != OP_SAME && ptdb->ReadKey(g, op, kr)) + // Remote index. Only used in read mode + if ((ptdb->GetMode() == MODE_READ || ptdb->GetMode() == MODE_READX) + && op != OP_SAME && ptdb->ReadKey(g, op, kr)) return RC_FX; goto rnd; diff --git a/storage/connect/csort.h b/storage/connect/csort.h index 55ff6268a4b..6e700059881 100644 --- a/storage/connect/csort.h +++ b/storage/connect/csort.h @@ -49,8 +49,8 @@ class DllExport CSORT { public: // Methods int Qsort(PGLOBAL g, int n); /* Sort calling routine */ -//virtual void Print(PGLOBAL g, FILE *f, uint n); -//virtual void Print(PGLOBAL g, char *ps, uint z); +//virtual void Printf(PGLOBAL g, FILE *f, uint n); +//virtual void Prints(PGLOBAL g, char *ps, uint z); #ifdef DEBTRACE int GetNcmp(void) {return num_comp;} #endif diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp index 1622ec16c68..e24e10835c1 100644 --- a/storage/connect/domdoc.cpp +++ b/storage/connect/domdoc.cpp @@ -58,13 +58,15 @@ void CloseXMLFile(PGLOBAL g, PFBLOCK fp, bool all) if (xp && xp->Count > 1 && !all) { xp->Count--; } else if (xp && xp->Count > 0) { - try { + try { if (xp->Docp) xp->Docp->Release(); - } catch(_com_error e) { - sprintf(g->Message, "%s %s", MSG(COM_ERROR), e.Description()); - } catch(...) {} + } catch(_com_error e) { + char *p = _com_util::ConvertBSTRToString(e.Description()); + sprintf(g->Message, "%s %s", MSG(COM_ERROR), p); + delete[] p; + } catch(...) {} CoUninitialize(); xp->Count = 0; @@ -89,7 +91,7 @@ DOMDOC::DOMDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp) /******************************************************************/ /* Initialize XML parser and check library compatibility. */ /******************************************************************/ -bool DOMDOC::Initialize(PGLOBAL g, char *entry, bool zipped) +bool DOMDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) { if (zipped && InitZip(g, entry)) return true; @@ -155,7 +157,7 @@ PFBLOCK DOMDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn) /******************************************************************/ /* Create the XML node. */ /******************************************************************/ -bool DOMDOC::NewDoc(PGLOBAL g, char *ver) +bool DOMDOC::NewDoc(PGLOBAL g, PCSZ ver) { char buf[64]; MSXML2::IXMLDOMProcessingInstructionPtr pip; @@ -490,9 +492,9 @@ PXATTR DOMNODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new element child node to this node and return it. */ /******************************************************************/ -PXNODE DOMNODE::AddChildNode(PGLOBAL g, char *name, PXNODE np) +PXNODE DOMNODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) { - char *p, *pn; + const char *p, *pn; // char *p, *pn, *epf, *pf = NULL; MSXML2::IXMLDOMNodePtr ep; // _bstr_t uri((wchar_t*)NULL); @@ -585,7 +587,7 @@ PXATTR DOMNODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new text node to this node. */ /******************************************************************/ -void DOMNODE::AddText(PGLOBAL g, char *txtp) +void DOMNODE::AddText(PGLOBAL g, PCSZ txtp) { MSXML2::IXMLDOMTextPtr tp= Docp->createTextNode((_bstr_t)txtp); diff --git a/storage/connect/domdoc.h b/storage/connect/domdoc.h index 7f269002d59..dd8936097e2 100644 --- a/storage/connect/domdoc.h +++ b/storage/connect/domdoc.h @@ -37,9 +37,9 @@ class DOMDOC : public XMLDOCUMENT { virtual void SetNofree(bool b) {} // Only libxml2 // Methods - virtual bool Initialize(PGLOBAL g, char *entry, bool zipped); + virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped); virtual bool ParseFile(PGLOBAL g, char *fn); - virtual bool NewDoc(PGLOBAL g, char *ver); + virtual bool NewDoc(PGLOBAL g, PCSZ ver); virtual void AddComment(PGLOBAL g, char *com); virtual PXNODE GetRoot(PGLOBAL g); virtual PXNODE NewRoot(PGLOBAL g, char *name); @@ -78,9 +78,9 @@ class DOMNODE : public XMLNODE { virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp); virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np); virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap); - virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np); + virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np); virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap); - virtual void AddText(PGLOBAL g, char *txtp); + virtual void AddText(PGLOBAL g, PCSZ txtp); virtual void DeleteChild(PGLOBAL g, PXNODE dnp); protected: diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 8fffaca3d06..84dff422db7 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -301,10 +301,9 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header) PDBUSER dup = (PDBUSER)g->Activityp->Aptr; // Skip this record - while (*Mempos++ != '\n') ; // What about Unix ??? - - if (Mempos >= Top) - return RC_EF; + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) + return RC_EF; // Update progress information dup->ProgCur = GetPos(); @@ -320,7 +319,7 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int MAPFAM::ReadBuffer(PGLOBAL g) { - int rc, len; + int rc, len, n = 1; // Are we at the end of the memory if (Mempos >= Top) { @@ -362,10 +361,14 @@ int MAPFAM::ReadBuffer(PGLOBAL g) Placed = false; // Immediately calculate next position (Used by DeleteDB) - while (*Mempos++ != '\n') ; // What about Unix ??? + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) { + n = 0; + break; + } // endif Mempos // Set caller line buffer - len = (Mempos - Fpos) - 1; + len = (Mempos - Fpos) - n; // Don't rely on ENDING setting if (len > 0 && *(Mempos - 2) == '\r') @@ -619,7 +622,9 @@ int MBKFAM::ReadBuffer(PGLOBAL g) } // endif's // Immediately calculate next position (Used by DeleteDB) - while (*Mempos++ != '\n') ; // What about Unix ??? + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) + break; // Set caller line buffer len = (Mempos - Fpos) - Ending; diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 55feaa02bc4..2ac0071a92d 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -128,7 +128,7 @@ typedef struct _descriptor { /* Moves file pointer to byte 32; fills buffer at buf with */ /* first 32 bytes of file. */ /****************************************************************************/ -static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf) +static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) { char endmark[2]; int dbc = 2, rc = RC_OK; @@ -186,7 +186,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf) /* DBFColumns: constructs the result blocks containing the description */ /* of all the columns of a DBF file that will be retrieved by #GetData. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info) +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) { int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -393,7 +393,7 @@ DBFBASE::DBFBASE(DBFBASE *txfp) /* and header length. Set Records, check that Reclen is equal to lrecl and */ /* return the header length or 0 in case of error. */ /****************************************************************************/ -int DBFBASE::ScanHeader(PGLOBAL g, PSZ fn, int lrecl, int *rln, char *defpath) +int DBFBASE::ScanHeader(PGLOBAL g, PCSZ fn, int lrecl, int *rln, PCSZ defpath) { int rc; char filename[_MAX_PATH]; @@ -503,7 +503,8 @@ bool DBFFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -623,6 +624,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) case 'L': // Large (big) integer case 'T': // Tiny integer c = 'N'; // Numeric + /* fall through */ case 'N': // Numeric (integer) case 'F': // Float (double) descp->Decimals = (uchar)cdp->F.Prec; diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index 66458a10eaa..640fc349b4c 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM; /****************************************************************************/ /* Functions used externally. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info); +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info); /****************************************************************************/ /* This is the base class for dBASE file access methods. */ @@ -31,7 +31,7 @@ class DllExport DBFBASE { DBFBASE(PDBF txfp); // Implementation - int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, int *rlen, char *defpath); + int ScanHeader(PGLOBAL g, PCSZ fname, int lrecl, int *rlen, PCSZ defpath); protected: // Default constructor, not to be used diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index cd25429318a..1d6194b154d 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -761,7 +761,8 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { - char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile"; + char buf[256]; + PCSZ fn = (h == Hfile) ? To_File : "Tempfile"; if (brc) strcpy(buf, MSG(BAD_BYTE_NUM)); @@ -919,7 +920,8 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag |= (UseTemp) ? O_RDONLY : O_RDWR; diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp index dc6f277ee27..df366ef15f9 100644 --- a/storage/connect/filamgz.cpp +++ b/storage/connect/filamgz.cpp @@ -920,8 +920,8 @@ int ZLBFAM::GetFileLength(PGLOBAL g) /***********************************************************************/ bool ZLBFAM::AllocateBuffer(PGLOBAL g) { - char *msg; - int n, zrc; + PCSZ msg; + int n, zrc; #if 0 if (!Optimized && Tdbp->NeedIndexing(g)) { diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e53cdcd9ba9..c456ee9e9b7 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.6 */ +/* Version 1.7 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -71,8 +71,23 @@ TXTFAM::TXTFAM(PDOSDEF tdp) { Tdbp = NULL; To_Fb = NULL; - To_File = tdp->Fn; - Lrecl = tdp->Lrecl; + + if (tdp) { + To_File = tdp->Fn; + Lrecl = tdp->Lrecl; + Eof = tdp->Eof; + Ending = tdp->Ending; + } else { + To_File = NULL; + Lrecl = 0; + Eof = false; +#if defined(__WIN__) + Ending = 2; +#else + Ending = 1; +#endif + } // endif tdp + Placed = false; IsRead = true; Blocked = false; @@ -103,8 +118,6 @@ TXTFAM::TXTFAM(PDOSDEF tdp) Blksize = 0; Fpos = Spos = Tpos = 0; Padded = false; - Eof = tdp->Eof; - Ending = tdp->Ending; Abort = false; CrLf = (char*)(Ending == 1 ? "\n" : "\r\n"); } // end of TXTFAM standard constructor @@ -561,6 +574,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) // Selective delete, pass thru Bin = true; + /* fall through */ case MODE_UPDATE: if ((UseTemp = Tdbp->IsUsingTemp(g))) { strcpy(opmode, "r"); @@ -973,7 +987,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) } else { /*****************************************************************/ - /* Move of eventual preceding lines is not required here. */ + /* Move of eventual preceding lines is not required here. */ /* Set the target file as being the source file itself. */ /* Set the future Tpos, and give Spos a value to block copying. */ /*****************************************************************/ @@ -1161,13 +1175,13 @@ int DOSFAM::RenameTempFile(PGLOBAL g) if (rename(filename, filetemp)) { // Save file for security sprintf(g->Message, MSG(RENAME_ERROR), filename, filetemp, strerror(errno)); - longjmp(g->jumper[g->jump_level], 51); - } else if (rename(tempname, filename)) { + throw 51; + } else if (rename(tempname, filename)) { sprintf(g->Message, MSG(RENAME_ERROR), tempname, filename, strerror(errno)); rc = rename(filetemp, filename); // Restore saved file - longjmp(g->jumper[g->jump_level], 52); - } else if (remove(filetemp)) { + throw 52; + } else if (remove(filetemp)) { sprintf(g->Message, MSG(REMOVE_ERROR), filetemp, strerror(errno)); rc = RC_INFO; // Acceptable diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index ae8f74a9830..1fdae8fcd37 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -80,7 +80,7 @@ class DllExport TXTFAM : public BLOCK { protected: // Members PTDBDOS Tdbp; // To table class - PSZ To_File; // Points to table file name + PCSZ To_File; // Points to table file name PFBLOCK To_Fb; // Pointer to file block PPARM To_Pos; // Pointer to position list PPARM To_Sos; // Pointer to start position list diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index fdc5433f4a4..537f77d01ac 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -143,9 +143,9 @@ int VCTFAM::GetFileLength(PGLOBAL g) { if (Split) { // Get the total file length - char filename[_MAX_PATH]; - char *savfile = To_File; - int i, len = 0; + char filename[_MAX_PATH]; + PCSZ savfile = To_File; + int i, len = 0; // Initialize the array of file structures if (!Colfn) { @@ -313,8 +313,8 @@ int VCTFAM::Cardinality(PGLOBAL g) // and Last must be set from the file cardinality. // Only happens when called by sub classes. char filename[_MAX_PATH]; - PSZ savfn = To_File; - int len, clen, card = -1; + PCSZ savfn = To_File; + int len, clen, card = -1; PCOLDEF cdp = Tdbp->GetDef()->GetCols(); if (!Colfn) { @@ -368,7 +368,7 @@ int VCTFAM::GetRowID(void) /***********************************************************************/ /* VCT Create an empty file for Vector formatted tables. */ /***********************************************************************/ -bool VCTFAM::MakeEmptyFile(PGLOBAL g, char *fn) +bool VCTFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) { // Vector formatted file: this will create an empty file of the // required length if it does not exists yet. @@ -440,6 +440,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -559,41 +560,42 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g) /* Do initial action when inserting. */ /***********************************************************************/ bool VCTFAM::InitInsert(PGLOBAL g) - { +{ + bool rc = false; + // We come here in MODE_INSERT only if (Last == Nrec) { CurBlk = Block; CurNum = 0; AddBlock = !MaxBlk; } else { - int rc; PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns(); // The starting point must be at the end of file as for append. CurBlk = Block - 1; CurNum = Last; - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif + try { + // Last block must be updated by new values + for (; cp; cp = (PVCTCOL)cp->Next) + cp->ReadBlock(g); - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = true; + } // end catch - // Last block must be updated by new values - for (; cp; cp = (PVCTCOL)cp->Next) - cp->ReadBlock(g); - - g->jump_level--; } // endif Last - // We are not currently using a temporary file for Insert - T_Stream = Stream; - return false; + if (!rc) + // We are not currently using a temporary file for Insert + T_Stream = Stream; + + return rc; } // end of InitInsert /***********************************************************************/ @@ -878,8 +880,9 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ bool VCTFAM::OpenTempFile(PGLOBAL g) { - char *opmode, tempname[_MAX_PATH]; - bool rc = false; + PCSZ opmode; + char tempname[_MAX_PATH]; + bool rc = false; /*********************************************************************/ /* Open the temporary file, Spos is at the beginning of file. */ @@ -1107,7 +1110,7 @@ void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); - longjmp(g->jumper[g->jump_level], 44); + throw 44; } // endif } else if (mode == MODE_UPDATE) { @@ -1527,8 +1530,8 @@ bool VCMFAM::AllocateBuffer(PGLOBAL g) /* Do initial action when inserting. */ /***********************************************************************/ bool VCMFAM::InitInsert(PGLOBAL g) - { - int rc; +{ + bool rc = false; volatile PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns(); // We come here in MODE_INSERT only @@ -1542,24 +1545,22 @@ bool VCMFAM::InitInsert(PGLOBAL g) CurNum = Last; } // endif Last - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif + try { + // Initialize the column block pointer + for (; cp; cp = (PVCTCOL)cp->Next) + cp->ReadBlock(g); - // Initialize the column block pointer - for (; cp; cp = (PVCTCOL)cp->Next) - cp->ReadBlock(g); + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = true; + } // end catch - g->jump_level--; - return false; - } // end of InitInsert + return rc; +} // end of InitInsert /***********************************************************************/ /* Data Base write routine for VMP access method. */ @@ -1918,6 +1919,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) } // endif filter // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb": "r+b"); @@ -1998,7 +2000,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) /***********************************************************************/ /* Open the file corresponding to one column. */ /***********************************************************************/ -bool VECFAM::OpenColumnFile(PGLOBAL g, char *opmode, int i) +bool VECFAM::OpenColumnFile(PGLOBAL g, PCSZ opmode, int i) { char filename[_MAX_PATH]; PDBUSER dup = PlgGetUser(g); @@ -2503,7 +2505,7 @@ void VECFAM::CloseTableFile(PGLOBAL g, bool abort) if (wrc != RC_FX) rc = ResetTableSize(g, Block, Last); else - longjmp(g->jumper[g->jump_level], 44); + throw 44; } else if (mode == MODE_UPDATE) { if (UseTemp && !InitUpdate && !Abort) { @@ -3143,7 +3145,8 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { - char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile"; + char buf[256]; + PCSZ fn = (h == Hfile) ? To_File : "Tempfile"; if (brc) strcpy(buf, MSG(BAD_BYTE_NUM)); @@ -3319,7 +3322,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g) /***********************************************************************/ /* VEC Create an empty file for new Vector formatted tables. */ /***********************************************************************/ -bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) +bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) { // Vector formatted file this will create an empty file of the // required length if it does not exists yet. @@ -3329,7 +3332,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) PlugSetPath(filename, fn, Tdbp->GetPath()); #if defined(__WIN__) - char *p; + PCSZ p; DWORD rc; bool brc; LARGE_INTEGER of; @@ -3584,6 +3587,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag = (UseTemp) ? O_RDONLY : O_RDWR; @@ -4164,8 +4168,8 @@ void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); - longjmp(g->jumper[g->jump_level], 44); - } // endif + throw 44; + } // endif } else if (mode == MODE_UPDATE) { // Write back to file any pending modifications diff --git a/storage/connect/filamvct.h b/storage/connect/filamvct.h index be66232acfb..85982403270 100644 --- a/storage/connect/filamvct.h +++ b/storage/connect/filamvct.h @@ -61,7 +61,7 @@ class DllExport VCTFAM : public FIXFAM { virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); @@ -160,7 +160,7 @@ class DllExport VECFAM : public VCTFAM { virtual bool MoveLines(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual int RenameTempFile(PGLOBAL g); - bool OpenColumnFile(PGLOBAL g, char *opmode, int i); + bool OpenColumnFile(PGLOBAL g, PCSZ opmode, int i); // Members FILE* *Streams; // Points to Dos file structure array @@ -235,7 +235,7 @@ class BGVFAM : public VCTFAM { bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos, bool b = false); bool BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual bool CleanUnusedSpace(PGLOBAL g); diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 3d157da5e87..eb06ee7ad1e 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,7 +1,7 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.1 */ +/* Version 1.3 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -45,12 +45,12 @@ #define WRITEBUFFERSIZE (16384) -bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul); +bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul); /***********************************************************************/ /* Compress a file in zip when creating a table. */ /***********************************************************************/ -static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf) +static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, PCSZ fn, PCSZ entry, char *buf) { int rc = RC_OK, size_read, size_buf = WRITEBUFFERSIZE; FILE *fin; @@ -88,7 +88,7 @@ static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf) /***********************************************************************/ /* Find and Compress several files in zip when creating a table. */ /***********************************************************************/ -static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf) +static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf) { char filename[_MAX_PATH]; int rc; @@ -203,7 +203,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf) /***********************************************************************/ /* Load and Compress a file in zip when creating a table. */ /***********************************************************************/ -bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul) +bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul) { char *buf; bool err; @@ -228,7 +228,7 @@ bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool /***********************************************************************/ /* Constructors. */ /***********************************************************************/ -ZIPUTIL::ZIPUTIL(PSZ tgt) +ZIPUTIL::ZIPUTIL(PCSZ tgt) { zipfile = NULL; target = tgt; @@ -269,7 +269,7 @@ void ZIPUTIL::getTime(tm_zip& tmZip) /* append: set true to append the zip file */ /* return: true if open, false otherwise. */ /***********************************************************************/ -bool ZIPUTIL::open(PGLOBAL g, char *filename, bool append) +bool ZIPUTIL::open(PGLOBAL g, PCSZ filename, bool append) { if (!zipfile && !(zipfile = zipOpen64(filename, append ? APPEND_STATUS_ADDINZIP @@ -295,7 +295,7 @@ void ZIPUTIL::close() /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ -bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append) +bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append) { /*********************************************************************/ /* The file will be compressed. */ @@ -338,10 +338,10 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append) /***********************************************************************/ /* Add target in zip file. */ /***********************************************************************/ -bool ZIPUTIL::addEntry(PGLOBAL g, char *entry) +bool ZIPUTIL::addEntry(PGLOBAL g, PCSZ entry) { //?? we dont need the stinking time - zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + zip_fileinfo zi = { {0, 0, 0, 0, 0, 0}, 0, 0, 0 }; getTime(zi.tmz_date); target = entry; @@ -382,10 +382,11 @@ void ZIPUTIL::closeEntry() /***********************************************************************/ /* Constructors. */ /***********************************************************************/ -UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul) +UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul) { zipfile = NULL; target = tgt; + pwd = NULL; fp = NULL; memory = NULL; size = 0; @@ -401,6 +402,26 @@ UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul) #endif } // end of UNZIPUTL standard constructor +UNZIPUTL::UNZIPUTL(PDOSDEF tdp) +{ + zipfile = NULL; + target = tdp->GetEntry(); + pwd = tdp->Pwd; + fp = NULL; + memory = NULL; + size = 0; + entryopen = false; + multiple = tdp->GetMul(); + memset(fn, 0, sizeof(fn)); + + // Init the case mapping table. +#if defined(__WIN__) + for (int i = 0; i < 256; ++i) mapCaseTable[i] = toupper(i); +#else + for (int i = 0; i < 256; ++i) mapCaseTable[i] = i; +#endif +} // end of UNZIPUTL standard constructor + #if 0 UNZIPUTL::UNZIPUTL(PZIPUTIL zutp) { @@ -418,8 +439,8 @@ UNZIPUTL::UNZIPUTL(PZIPUTIL zutp) /* This code is the copyright property of Alessandro Felice Cantatore. */ /* http://xoomer.virgilio.it/acantato/dev/wildcard/wildmatch.html */ /***********************************************************************/ -bool UNZIPUTL::WildMatch(PSZ pat, PSZ str) { - PSZ s, p; +bool UNZIPUTL::WildMatch(PCSZ pat, PCSZ str) { + PCSZ s, p; bool star = FALSE; loopStart: @@ -453,7 +474,7 @@ starCheck: /* param: filename path and the filename of the zip file to open. */ /* return: true if open, false otherwise. */ /***********************************************************************/ -bool UNZIPUTL::open(PGLOBAL g, char *filename) +bool UNZIPUTL::open(PGLOBAL g, PCSZ filename) { if (!zipfile && !(zipfile = unzOpen64(filename))) sprintf(g->Message, "Zipfile open error on %s", filename); @@ -543,7 +564,7 @@ int UNZIPUTL::nextEntry(PGLOBAL g) /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ -bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) +bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn) { /*********************************************************************/ /* The file will be decompressed into virtual memory. */ @@ -581,7 +602,7 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) if (openEntry(g)) return true; - if (size > 0) { + if (size > 0) { /*******************************************************************/ /* Link a Fblock. This make possible to automatically close it */ /* in case of error g->jump. */ @@ -613,6 +634,28 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) } // end of OpenTableFile /***********************************************************************/ +/* Insert only if the entry does not exist. */ +/***********************************************************************/ +bool UNZIPUTL::IsInsertOk(PGLOBAL g, PCSZ fn) +{ + bool ok = true, b = open(g, fn); + + if (!b) { + if (!target || *target == 0) { + unz_global_info64 ginfo; + int err = unzGetGlobalInfo64(zipfile, &ginfo); + + ok = !(err == UNZ_OK && ginfo.number_entry > 0); + } else // Check if the target exist + ok = (unzLocateFile(zipfile, target, 0) != UNZ_OK); + + unzClose(zipfile); + } // endif b + + return ok; +} // end of IsInsertOk + +/***********************************************************************/ /* Open target in zip file. */ /***********************************************************************/ bool UNZIPUTL::openEntry(PGLOBAL g) @@ -625,18 +668,24 @@ bool UNZIPUTL::openEntry(PGLOBAL g) if (rc != UNZ_OK) { sprintf(g->Message, "unzGetCurrentFileInfo64 rc=%d", rc); return true; - } else if ((rc = unzOpenCurrentFile(zipfile)) != UNZ_OK) { + } else if ((rc = unzOpenCurrentFilePassword(zipfile, pwd)) != UNZ_OK) { sprintf(g->Message, "unzOpen fn=%s rc=%d", fn, rc); return true; } // endif rc size = finfo.uncompressed_size; - memory = new char[size + 1]; + + try { + memory = new char[size + 1]; + } catch (...) { + strcpy(g->Message, "Out of memory"); + return true; + } // end try/catch if ((rc = unzReadCurrentFile(zipfile, memory, size)) < 0) { sprintf(g->Message, "unzReadCurrentFile rc = %d", rc); unzCloseCurrentFile(zipfile); - free(memory); + delete[] memory; memory = NULL; entryopen = false; } else { @@ -661,7 +710,7 @@ void UNZIPUTL::closeEntry() } // endif entryopen if (memory) { - free(memory); + delete[] memory; memory = NULL; } // endif memory @@ -675,15 +724,17 @@ void UNZIPUTL::closeEntry() UNZFAM::UNZFAM(PDOSDEF tdp) : MAPFAM(tdp) { zutp = NULL; - target = tdp->GetEntry(); - mul = tdp->GetMul(); + tdfp = tdp; + //target = tdp->GetEntry(); + //mul = tdp->GetMul(); } // end of UNZFAM standard constructor UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp) { zutp = txfp->zutp; - target = txfp->target; - mul = txfp->mul; + tdfp = txfp->tdfp; + //target = txfp->target; + //mul = txfp->mul; } // end of UNZFAM copy constructor /***********************************************************************/ @@ -711,7 +762,13 @@ int UNZFAM::Cardinality(PGLOBAL g) int card = -1; int len = GetFileLength(g); - card = (len / (int)Lrecl) * 2; // Estimated ??? + if (len) { + // Estimated ??? + card = (len / (int)Lrecl) * 2; + card = card ? card : 10; // Lrecl can be too big + } else + card = 0; + return card; } // end of Cardinality @@ -726,7 +783,7 @@ bool UNZFAM::OpenTableFile(PGLOBAL g) /*********************************************************************/ /* Allocate the ZIP utility class. */ /*********************************************************************/ - zutp = new(g) UNZIPUTL(target, mul); + zutp = new(g) UNZIPUTL(tdfp); // We used the file name relative to recorded datapath PlugSetPath(filename, To_File, Tdbp->GetPath()); @@ -841,17 +898,19 @@ void UNZFAM::CloseTableFile(PGLOBAL g, bool) UZXFAM::UZXFAM(PDOSDEF tdp) : MPXFAM(tdp) { zutp = NULL; - target = tdp->GetEntry(); - mul = tdp->GetMul(); + tdfp = tdp; + //target = tdp->GetEntry(); + //mul = tdp->GetMul(); //Lrecl = tdp->GetLrecl(); } // end of UZXFAM standard constructor UZXFAM::UZXFAM(PUZXFAM txfp) : MPXFAM(txfp) { zutp = txfp->zutp; - target = txfp->target; - mul = txfp->mul; -//Lrecl = txfp->Lrecl; + tdfp = txfp->tdfp; + //target = txfp->target; + //mul = txfp->mul; + //Lrecl = txfp->Lrecl; } // end of UZXFAM copy constructor /***********************************************************************/ @@ -907,7 +966,7 @@ bool UZXFAM::OpenTableFile(PGLOBAL g) /* Allocate the ZIP utility class. */ /*********************************************************************/ if (!zutp) - zutp = new(g)UNZIPUTL(target, mul); + zutp = new(g)UNZIPUTL(tdfp); // We used the file name relative to recorded datapath PlugSetPath(filename, To_File, Tdbp->GetPath()); @@ -969,6 +1028,25 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; MODE mode = Tdbp->GetMode(); + int len = TXTFAM::GetFileLength(g); + + // We used the file name relative to recorded datapath + PlugSetPath(filename, To_File, Tdbp->GetPath()); + + if (len < 0) + return true; + else if (!append && len > 0) { + strcpy(g->Message, "No insert into existing zip file"); + return true; + } else if (append && len > 0) { + UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + + if (!zutp->IsInsertOk(g, filename)) { + strcpy(g->Message, "No insert into existing entry"); + return true; + } // endif Ok + + } // endif's /*********************************************************************/ /* Allocate the ZIP utility class. */ @@ -1028,15 +1106,31 @@ ZPXFAM::ZPXFAM(PDOSDEF tdp) : FIXFAM(tdp) target = tdp->GetEntry(); append = tdp->GetAppend(); //Lrecl = tdp->GetLrecl(); -} // end of UZXFAM standard constructor +} // end of ZPXFAM standard constructor /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ bool ZPXFAM::OpenTableFile(PGLOBAL g) { - char filename[_MAX_PATH]; - MODE mode = Tdbp->GetMode(); + char filename[_MAX_PATH]; + MODE mode = Tdbp->GetMode(); + int len = TXTFAM::GetFileLength(g); + + if (len < 0) + return true; + else if (!append && len > 0) { + strcpy(g->Message, "No insert into existing zip file"); + return true; + } else if (append && len > 0) { + UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + + if (!zutp->IsInsertOk(g, filename)) { + strcpy(g->Message, "No insert into existing entry"); + return true; + } // endif Ok + + } // endif's /*********************************************************************/ /* Allocate the ZIP utility class. */ diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index 3160703bd20..be17d954728 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -1,5 +1,5 @@ /************** filamzip H Declares Source Code File (.H) **************/ -/* Name: filamzip.h Version 1.1 */ +/* Name: filamzip.h Version 1.2 */ /* */ /* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ /* */ @@ -27,16 +27,13 @@ typedef class ZPXFAM *PZPXFAM; class DllExport ZIPUTIL : public BLOCK { public: // Constructor - ZIPUTIL(PSZ tgt); + ZIPUTIL(PCSZ tgt); //ZIPUTIL(ZIPUTIL *zutp); - // Implementation - //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); } - // Methods - bool OpenTable(PGLOBAL g, MODE mode, char *fn, bool append); - bool open(PGLOBAL g, char *fn, bool append); - bool addEntry(PGLOBAL g, char *entry); + bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append); + bool open(PGLOBAL g, PCSZ fn, bool append); + bool addEntry(PGLOBAL g, PCSZ entry); void close(void); void closeEntry(void); int writeEntry(PGLOBAL g, char *buf, int len); @@ -44,15 +41,10 @@ class DllExport ZIPUTIL : public BLOCK { // Members zipFile zipfile; // The ZIP container file - PSZ target; // The target file name -//unz_file_info finfo; // The current file info + PCSZ target; // The target file name + PCSZ pwd; // The ZIP file password PFBLOCK fp; -//char *memory; -//uint size; -//int multiple; // Multiple targets bool entryopen; // True when open current entry -//char fn[FILENAME_MAX]; // The current entry file name -//char mapCaseTable[256]; }; // end of ZIPUTIL /***********************************************************************/ @@ -61,25 +53,27 @@ class DllExport ZIPUTIL : public BLOCK { class DllExport UNZIPUTL : public BLOCK { public: // Constructor - UNZIPUTL(PSZ tgt, bool mul); -//UNZIPUTL(UNZIPUTL *zutp); + UNZIPUTL(PCSZ tgt, bool mul); + UNZIPUTL(PDOSDEF tdp); // Implementation //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); } // Methods - bool OpenTable(PGLOBAL g, MODE mode, char *fn); - bool open(PGLOBAL g, char *fn); + bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn); + bool open(PGLOBAL g, PCSZ fn); bool openEntry(PGLOBAL g); void close(void); void closeEntry(void); - bool WildMatch(PSZ pat, PSZ str); + bool WildMatch(PCSZ pat, PCSZ str); int findEntry(PGLOBAL g, bool next); int nextEntry(PGLOBAL g); + bool IsInsertOk(PGLOBAL g, PCSZ fn); // Members unzFile zipfile; // The ZIP container file - PSZ target; // The target file name + PCSZ target; // The target file name + PCSZ pwd; // The ZIP file password unz_file_info finfo; // The current file info PFBLOCK fp; char *memory; @@ -119,8 +113,7 @@ class DllExport UNZFAM : public MAPFAM { protected: // Members UNZIPUTL *zutp; - PSZ target; - bool mul; + PDOSDEF tdfp; }; // end of UNZFAM /***********************************************************************/ @@ -147,8 +140,7 @@ class DllExport UZXFAM : public MPXFAM { protected: // Members UNZIPUTL *zutp; - PSZ target; - bool mul; + PDOSDEF tdfp; }; // end of UZXFAM /***********************************************************************/ @@ -175,8 +167,9 @@ class DllExport ZIPFAM : public DOSFAM { protected: // Members ZIPUTIL *zutp; - PSZ target; + PCSZ target; bool append; +//bool replace; }; // end of ZIPFAM /***********************************************************************/ @@ -200,7 +193,7 @@ class DllExport ZPXFAM : public FIXFAM { protected: // Members ZIPUTIL *zutp; - PSZ target; + PCSZ target; bool append; }; // end of ZPXFAM diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 262d6b58a70..da44b129ccb 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -1,7 +1,7 @@ /***************** Filter C++ Class Filter Code (.CPP) *****************/ -/* Name: FILTER.CPP Version 3.9 */ +/* Name: FILTER.CPP Version 4.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains the class FILTER function code. */ /***********************************************************************/ @@ -87,8 +87,8 @@ BYTE OpBmp(PGLOBAL g, OPVAL opc) case OP_EXIST: bt = 0x00; break; default: sprintf(g->Message, MSG(BAD_FILTER_OP), opc); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); - } // endswitch opc + throw TYPE_ARRAY; + } // endswitch opc return bt; } // end of OpBmp @@ -1193,7 +1193,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) Arg(0) = pXVOID; } // endif void - // pass thru + // fall through case OP_IN: // For IN operator do optimize if operand is an array if (GetArgType(1) != TYPE_ARRAY) @@ -1260,6 +1260,7 @@ bool FILTER::Eval(PGLOBAL g) } // endif Opm // For modified operators, pass thru + /* fall through */ case OP_IN: case OP_EXIST: // For IN operations, special processing is done here @@ -1408,7 +1409,7 @@ PFIL FILTER::Copy(PTABS t) /*********************************************************************/ /* Make file output of FILTER contents. */ /*********************************************************************/ -void FILTER::Print(PGLOBAL g, FILE *f, uint n) +void FILTER::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -1430,7 +1431,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n) if (lin && fp->GetArgType(i) == TYPE_FILTER) fprintf(f, "%s Filter at %p\n", m, fp->Arg(i)); else - fp->Arg(i)->Print(g, f, n + 2); + fp->Arg(i)->Printf(g, f, n + 2); } // endfor i @@ -1441,7 +1442,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of TABLE contents (z should be checked). */ /***********************************************************************/ -void FILTER::Print(PGLOBAL g, char *ps, uint z) +void FILTER::Prints(PGLOBAL g, char *ps, uint z) { #define FLEN 100 @@ -1469,7 +1470,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z) bcp = bxp; p = bcp->Cold; n = FLEN; - fp->Arg(0)->Print(g, p, n); + fp->Arg(0)->Prints(g, p, n); n = FLEN - strlen(p); switch (fp->Opc) { @@ -1515,7 +1516,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z) n = FLEN - strlen(p); p += strlen(p); - fp->Arg(1)->Print(g, p, n); + fp->Arg(1)->Prints(g, p, n); } else if (!bcp) { strncat(ps, "???", z); @@ -1711,7 +1712,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) break; // Remove eventual ending separator(s) // if (fp->Convert(g, having)) -// longjmp(g->jumper[g->jump_level], TYPE_FILTER); +// throw TYPE_ARRAY; filp = fp; fp = fp->Next; @@ -1744,7 +1745,7 @@ DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) // return TRUE; if (filp->Eval(g)) - longjmp(g->jumper[g->jump_level], TYPE_FILTER); + throw TYPE_FILTER; if (trace > 1) htrc("PlugFilter filp=%p result=%d\n", diff --git a/storage/connect/filter.h b/storage/connect/filter.h index 15730e2cc44..22d1e4ed4be 100644 --- a/storage/connect/filter.h +++ b/storage/connect/filter.h @@ -61,8 +61,8 @@ class DllExport FILTER : public XOBJECT { /* Filter description block */ //virtual PXOB CheckSubQuery(PGLOBAL, PSQL); //virtual bool CheckLocal(PTDB); //virtual int CheckSpcCol(PTDB tdbp, int n); - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // PFIL Linearize(bool nosep); // PFIL Link(PGLOBAL g, PFIL fil2); // PFIL RemoveLastSep(void); diff --git a/storage/connect/global.h b/storage/connect/global.h index 4d01a3ff05b..a2030fdb5d0 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -1,6 +1,6 @@ /***********************************************************************/ /* GLOBAL.H: Declaration file used by all CONNECT implementations. */ -/* (C) Copyright Olivier Bertrand 1993-2014 */ +/* (C) Copyright Olivier Bertrand 1993-2017 */ /***********************************************************************/ /***********************************************************************/ @@ -59,7 +59,7 @@ #define NO_IVAL -95684275 /* Used by GetIntegerOption */ #define VMLANG 370 /* Size of olf VM lang blocks */ #define MAX_JUMP 24 /* Maximum jump level number */ -#define MAX_STR 1024 /* Maximum string length */ +#define MAX_STR 4160 /* Maximum message length */ #define STR_SIZE 501 /* Length of char strings. */ #define STD_INPUT 0 /* Standard language input */ #define STD_OUTPUT 1 /* Standard language output */ @@ -229,9 +229,10 @@ typedef struct _parm { typedef struct _global { /* Global structure */ void *Sarea; /* Points to work area */ uint Sarea_Size; /* Work area size */ - PACTIVITY Activityp, ActivityStart; + PACTIVITY Activityp; char Message[MAX_STR]; - int Createas; /* To pass info to created table */ + ulong More; /* Used by jsonudf */ + int Createas; /* To pass info to created table */ void *Xchk; /* indexes in create/alter */ short Alchecked; /* Checked for ALTER */ short Mrr; /* True when doing mrr */ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 1b6078cc3d9..887d692ba69 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -172,9 +172,9 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.05.0003 February 27, 2017"; + char version[]= "Version 1.06.0001 April 17, 2017"; #if defined(__WIN__) - char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__; + char compver[]= "Version 1.06.0001 " __DATE__ " " __TIME__; char slash= '\\'; #else // !__WIN__ char slash= '/'; @@ -213,10 +213,11 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info); PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v); void PushWarning(PGLOBAL g, THD *thd, int level); -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port); -bool ZipLoadFile(PGLOBAL, char*, char*, char*, bool, bool); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db, + PCSZ tab, PCSZ src, int port); +bool ZipLoadFile(PGLOBAL, PCSZ, PCSZ, PCSZ, bool, bool); bool ExactInfo(void); +void mongo_init(bool); USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); @@ -509,7 +510,7 @@ ha_create_table_option connect_table_option_list[]= HA_TOPTION_NUMBER("LRECL", lrecl, 0, 0, INT_MAX32, 1), HA_TOPTION_NUMBER("BLOCK_SIZE", elements, 0, 0, INT_MAX32, 1), //HA_TOPTION_NUMBER("ESTIMATE", estimate, 0, 0, INT_MAX32, 1), - HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 2, 1), + HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 3, 1), HA_TOPTION_NUMBER("HEADER", header, 0, 0, 3, 1), HA_TOPTION_NUMBER("QUOTED", quoted, (ulonglong) -1, 0, 3, 1), HA_TOPTION_NUMBER("ENDING", ending, (ulonglong) -1, 0, INT_MAX32, 1), @@ -670,7 +671,7 @@ static int connect_init_func(void *p) sql_print_information("CONNECT: %s", version); #endif // !__WIN__ -#ifdef LIBXML2_SUPPORT +#if defined(LIBXML2_SUPPORT) XmlInitParserLib(); #endif // LIBXML2_SUPPORT @@ -1018,15 +1019,14 @@ ulonglong ha_connect::table_flags() const /****************************************************************************/ /* Return the value of an option specified in an option list. */ /****************************************************************************/ -char *GetListOption(PGLOBAL g, const char *opname, - const char *oplist, const char *def) +PCSZ GetListOption(PGLOBAL g, PCSZ opname, PCSZ oplist, PCSZ def) { if (!oplist) return (char*)def; char key[16], val[256]; char *pk, *pv, *pn; - char *opval= (char*)def; + PCSZ opval= def; int n; for (pk= (char*)oplist; pk; pk= ++pn) { @@ -1062,9 +1062,9 @@ char *GetListOption(PGLOBAL g, const char *opname, /****************************************************************************/ /* Return the value of a string option or NULL if not specified. */ /****************************************************************************/ -char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef) +PCSZ GetStringTableOption(PGLOBAL g, PTOS options, PCSZ opname, PCSZ sdef) { - const char *opval= NULL; + PCSZ opval= NULL; if (!options) return sdef; @@ -1107,10 +1107,10 @@ char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef) /****************************************************************************/ /* Return the value of a Boolean option or bdef if not specified. */ /****************************************************************************/ -bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef) +bool GetBooleanTableOption(PGLOBAL g, PTOS options, PCSZ opname, bool bdef) { - bool opval= bdef; - char *pv; + bool opval= bdef; + PCSZ pv; if (!options) return bdef; @@ -1138,7 +1138,7 @@ bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef) /****************************************************************************/ /* Return the value of an integer option or NO_IVAL if not specified. */ /****************************************************************************/ -int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef) +int GetIntegerTableOption(PGLOBAL g, PTOS options, PCSZ opname, int idef) { ulonglong opval= (ulonglong) NO_IVAL; @@ -1160,10 +1160,10 @@ int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef) opval= (options->compressed); if ((ulonglong) opval == (ulonglong)NO_IVAL) { - char *pv; + PCSZ pv; if ((pv= GetListOption(g, opname, options->oplist))) - opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); + opval= CharToNumber((char*)pv, strlen(pv), ULONGLONG_MAX, true); else return idef; @@ -1188,7 +1188,7 @@ PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s) /****************************************************************************/ /* Return the string eventually formatted with partition name. */ /****************************************************************************/ -char *ha_connect::GetRealString(const char *s) +char *ha_connect::GetRealString(PCSZ s) { char *sv; @@ -1205,10 +1205,10 @@ char *ha_connect::GetRealString(const char *s) /****************************************************************************/ /* Return the value of a string option or sdef if not specified. */ /****************************************************************************/ -char *ha_connect::GetStringOption(char *opname, char *sdef) +PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef) { - char *opval= NULL; - PTOS options= GetTableOptionStruct(); + PCSZ opval= NULL; + PTOS options= GetTableOptionStruct(); if (!stricmp(opname, "Connect")) { LEX_CSTRING cnc= (tshp) ? tshp->connect_string @@ -1267,7 +1267,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) /****************************************************************************/ /* Return the value of a Boolean option or bdef if not specified. */ /****************************************************************************/ -bool ha_connect::GetBooleanOption(char *opname, bool bdef) +bool ha_connect::GetBooleanOption(PCSZ opname, bool bdef) { bool opval; PTOS options= GetTableOptionStruct(); @@ -1284,7 +1284,7 @@ bool ha_connect::GetBooleanOption(char *opname, bool bdef) /* Set the value of the opname option (does not work for oplist options) */ /* Currently used only to set the Sepindex value. */ /****************************************************************************/ -bool ha_connect::SetBooleanOption(char *opname, bool b) +bool ha_connect::SetBooleanOption(PCSZ opname, bool b) { PTOS options= GetTableOptionStruct(); @@ -1302,7 +1302,7 @@ bool ha_connect::SetBooleanOption(char *opname, bool b) /****************************************************************************/ /* Return the value of an integer option or NO_IVAL if not specified. */ /****************************************************************************/ -int ha_connect::GetIntegerOption(char *opname) +int ha_connect::GetIntegerOption(PCSZ opname) { int opval; PTOS options= GetTableOptionStruct(); @@ -1322,7 +1322,7 @@ int ha_connect::GetIntegerOption(char *opname) /* Set the value of the opname option (does not work for oplist options) */ /* Currently used only to set the Lrecl value. */ /****************************************************************************/ -bool ha_connect::SetIntegerOption(char *opname, int n) +bool ha_connect::SetIntegerOption(PCSZ opname, int n) { PTOS options= GetTableOptionStruct(); @@ -1427,7 +1427,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: pcf->Flags |= U_VAR; - /* no break */ + /* fall through */ default: pcf->Type= MYSQLtoPLG(fp->type(), &v); break; @@ -1522,7 +1522,7 @@ PXOS ha_connect::GetIndexOptionStruct(KEY *kp) /****************************************************************************/ /* Return a Boolean index option or false if not specified. */ /****************************************************************************/ -bool ha_connect::GetIndexOption(KEY *kp, char *opname) +bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname) { bool opval= false; PXOS options= GetIndexOptionStruct(kp); @@ -1534,7 +1534,7 @@ bool ha_connect::GetIndexOption(KEY *kp, char *opname) opval= options->mapped; } else if (kp->comment.str && kp->comment.length) { - char *pv, *oplist= strz(xp->g, kp->comment); + PCSZ pv, oplist= strz(xp->g, kp->comment); if ((pv= GetListOption(xp->g, opname, oplist))) opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0); @@ -1739,9 +1739,9 @@ void ha_connect::AddColName(char *cp, Field *fp) /***********************************************************************/ /* This function sets the current database path. */ /***********************************************************************/ -void ha_connect::SetDataPath(PGLOBAL g, const char *path) +bool ha_connect::SetDataPath(PGLOBAL g, const char *path) { - datapath= SetPath(g, path); + return (!(datapath= SetPath(g, path))); } // end of SetDataPath /****************************************************************************/ @@ -1895,40 +1895,36 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) bool ha_connect::CheckColumnList(PGLOBAL g) { // Check the list of used fields (columns) - int rc; bool brc= false; PCOL colp; Field* *field; Field* fp; MY_BITMAP *map= table->read_set; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) { + try { for (field= table->field; fp= *field; field++) if (bitmap_is_set(map, fp->field_index)) { if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name.str, 0))) { sprintf(g->Message, "Column %s not found in %s", fp->field_name.str, tdbp->GetName()); - brc= true; - goto fin; - } // endif colp + throw 1; + } // endif colp if ((brc= colp->InitValue(g))) - goto fin; + throw 2; colp->AddColUse(U_P); // For PLG tables } // endif - } else - brc= true; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + brc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + brc = true; + } // end catch - fin: - g->jump_level--; return brc; } // end of CheckColumnList @@ -1965,7 +1961,8 @@ int ha_connect::CloseTable(PGLOBAL g) /***********************************************************************/ int ha_connect::MakeRecord(char *buf) { - char *p, *fmt, val[32]; + PCSZ fmt; + char *p, val[32]; int rc= 0; Field* *field; Field *fp; @@ -2101,7 +2098,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) { char attr_buffer[1024]; char data_buffer[1024]; - char *fmt; + PCSZ fmt; int rc= 0; PCOL colp; PVAL value, sdvalin; @@ -2274,7 +2271,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, const uchar *ptr; //uint i, rem, len, klen, stlen; uint i, rem, len, stlen; - bool nq, both, oom= false; + bool nq, both, oom; OPVAL op; Field *fp; const key_range *ranges[2]; @@ -2302,9 +2299,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, continue; if (both && i > 0) - oom|= qry->Append(") AND ("); + qry->Append(") AND ("); else - oom|= qry->Append(" WHERE ("); + qry->Append(" WHERE ("); // klen= len= ranges[i]->length; len= ranges[i]->length; @@ -2317,14 +2314,14 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, nq= fp->str_needs_quotes(); if (kpart != kfp->key_part) - oom|= qry->Append(" AND "); + qry->Append(" AND "); if (q) { - oom|= qry->Append(q); - oom|= qry->Append((PSZ)fp->field_name.str); - oom|= qry->Append(q); + qry->Append(q); + qry->Append((PSZ)fp->field_name.str); + qry->Append(q); } else - oom|= qry->Append((PSZ)fp->field_name.str); + qry->Append((PSZ)fp->field_name.str); switch (ranges[i]->flag) { case HA_READ_KEY_EXACT: @@ -2349,10 +2346,10 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, goto err; } // endswitch flag - oom|= qry->Append((PSZ)GetValStr(op, false)); + qry->Append((PSZ)GetValStr(op, false)); if (nq) - oom|= qry->Append('\''); + qry->Append('\''); if (kpart->key_part_flag & HA_VAR_LENGTH_PART) { String varchar; @@ -2360,17 +2357,17 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, varchar.set_quick((char*)ptr + HA_KEY_BLOB_LENGTH, var_length, &my_charset_bin); - oom|= qry->Append(varchar.ptr(), varchar.length(), nq); + qry->Append(varchar.ptr(), varchar.length(), nq); } else { char strbuff[MAX_FIELD_WIDTH]; String str(strbuff, sizeof(strbuff), kpart->field->charset()), *res; res= fp->val_str(&str, ptr); - oom|= qry->Append(res->ptr(), res->length(), nq); + qry->Append(res->ptr(), res->length(), nq); } // endif flag if (nq) - oom |= qry->Append('\''); + qry->Append('\''); if (stlen >= len) break; @@ -2385,7 +2382,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, } // endfor i - if ((oom|= qry->Append(")"))) + qry->Append(')'); + + if ((oom= qry->IsTruncated())) strcpy(g->Message, "Out of memory"); dbug_tmp_restore_column_map(table->write_set, old_map); @@ -2698,6 +2697,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if (x) return NULL; + else + pb0= pb1= pb2= ph0= ph1= ph2= NULL; if (trace) htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), @@ -2800,6 +2801,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) case Item_func::LIKE_FUNC: vop= OP_LIKE; break; case Item_func::ISNOTNULL_FUNC: neg = true; + /* fall through */ case Item_func::ISNULL_FUNC: vop= OP_NULL; break; case Item_func::IN_FUNC: vop= OP_IN; case Item_func::BETWEEN: @@ -3053,7 +3055,6 @@ const COND *ha_connect::cond_push(const COND *cond) DBUG_ENTER("ha_connect::cond_push"); if (tdbp) { - int rc; PGLOBAL& g= xp->g; AMT tty= tdbp->GetAmType(); bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); @@ -3061,58 +3062,66 @@ const COND *ha_connect::cond_push(const COND *cond) tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || tty == TYPE_AM_PLG || tty == TYPE_AM_JDBC || x); - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - DBUG_RETURN(cond); - } // endif jump_level + // This should never happen but is done to avoid crashing + try { + if (b) { + PCFIL filp; + int rc; - // This should never happen but is done to avoid crashing - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) - goto fin; + if ((filp = tdbp->GetCondFil()) && tdbp->GetCond() == cond && + filp->Idx == active_index && filp->Type == tty) + goto fin; - if (b) { - PCFIL filp; - int rc; + filp = new(g) CONDFIL(active_index, tty); + rc = filp->Init(g, this); - if ((filp= tdbp->GetCondFil()) && filp->Cond == cond && - filp->Idx == active_index && filp->Type == tty) - goto fin; // Already done + if (rc == RC_INFO) { + filp->Having = (char*)PlugSubAlloc(g, NULL, 256); + *filp->Having = 0; + } else if (rc == RC_FX) + goto fin; - filp= new(g) CONDFIL(cond, active_index, tty); - rc = filp->Init(g, this); + filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); + *filp->Body = 0; - if (rc == RC_INFO) { - filp->Having = (char*)PlugSubAlloc(g, NULL, 256); - *filp->Having = 0; - } else if (rc == RC_FX) - goto fin; + if (CheckCond(g, filp, cond)) { + if (filp->Having && strlen(filp->Having) > 255) + goto fin; // Memory collapse - filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); - *filp->Body = 0; + if (trace) + htrc("cond_push: %s\n", filp->Body); - if (CheckCond(g, filp, cond)) { - if (filp->Having && strlen(filp->Having) > 255) - goto fin; // Memory collapse + tdbp->SetCond(cond); - if (trace) - htrc("cond_push: %s\n", filp->Body); + if (!x) + PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); + else + cond = NULL; // Does this work? - if (!x) - PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); - else - cond= NULL; // Does this work? + tdbp->SetCondFil(filp); + } else if (x && cond) + tdbp->SetCondFil(filp); // Wrong filter - tdbp->SetCondFil(filp); - } else if (x && cond) - tdbp->SetCondFil(filp); // Wrong filter + } else if (tdbp->CanBeFiltered()) { + if (!tdbp->GetCond() || tdbp->GetCond() != cond) { + tdbp->SetFilter(CondFilter(g, (Item *)cond)); - } else if (tty != TYPE_AM_JSN && tty != TYPE_AM_JSON) - tdbp->SetFilter(CondFilter(g, (Item *)cond)); + if (tdbp->GetFilter()) + tdbp->SetCond(cond); - fin: - g->jump_level--; - } // endif tdbp + } // endif cond + + } // endif tty + + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch + + fin:; + } // endif tdbp // Let MySQL do the filtering DBUG_RETURN(cond); @@ -3262,26 +3271,36 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) PGLOBAL& g= xp->g; PDBUSER dup= PlgGetUser(g); - // Ignore error on the opt file - dup->Check &= ~CHK_OPT; - tdbp= GetTDB(g); - dup->Check |= CHK_OPT; + try { + // Ignore error on the opt file + dup->Check &= ~CHK_OPT; + tdbp = GetTDB(g); + dup->Check |= CHK_OPT; - if (tdbp && !tdbp->IsRemote()) { - bool dop= IsTypeIndexable(GetRealType(NULL)); - bool dox= (tdbp->GetDef()->Indexable() == 1); + if (tdbp && !tdbp->IsRemote()) { + bool dop = IsTypeIndexable(GetRealType(NULL)); + bool dox = (tdbp->GetDef()->Indexable() == 1); - if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) { - if (rc == RC_INFO) { - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - rc= 0; - } else - rc= HA_ERR_INTERNAL_ERROR; + if ((rc = ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) { + if (rc == RC_INFO) { + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + rc = 0; + } else + rc = HA_ERR_INTERNAL_ERROR; - } // endif rc + } // endif rc - } else if (!tdbp) - rc= HA_ERR_INTERNAL_ERROR; + } else if (!tdbp) + rc = HA_ERR_INTERNAL_ERROR; + + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = HA_ERR_INTERNAL_ERROR; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = HA_ERR_INTERNAL_ERROR; + } // end catch return rc; } // end of optimize @@ -3976,8 +3995,12 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) tdbp->SetFilter(NULL); rc= rnd_next(buf); - } else - rc= HA_ERR_KEY_NOT_FOUND; + } else { + PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp); + strcpy(g->Message, "Not supported by this table type"); + my_message(ER_ILLEGAL_HA, g->Message, MYF(0)); + rc= HA_ERR_INTERNAL_ERROR; + } // endif SetRecpos DBUG_RETURN(rc); } // end of rnd_pos @@ -4043,9 +4066,13 @@ int ha_connect::info(uint flag) } // endif xmod // This is necessary for getting file length - if (table) - SetDataPath(g, table->s->db.str); - else + if (table) { + if (SetDataPath(g, table->s->db.str)) { + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } // endif SetDataPath + + } else DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen if (!(tdbp= GetTDB(g))) @@ -4195,35 +4222,36 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool case TAB_INI: case TAB_VEC: case TAB_JSON: - if (options->filename && *options->filename) { - if (!quick) { - char *s, path[FN_REFLEN], dbpath[FN_REFLEN]; + if (options->filename && *options->filename) { + if (!quick) { + char path[FN_REFLEN], dbpath[FN_REFLEN]; + + strcpy(dbpath, mysql_real_data_home); + + if (db) #if defined(__WIN__) - s= "\\"; + strcat(strcat(dbpath, db), "\\"); #else // !__WIN__ - s= "/"; + strcat(strcat(dbpath, db), "/"); #endif // !__WIN__ - strcpy(dbpath, mysql_real_data_home); - - if (db) - strcat(strcat(dbpath, db), s); - (void) fn_format(path, options->filename, dbpath, "", - MY_RELATIVE_PATH | MY_UNPACK_FILENAME); + (void)fn_format(path, options->filename, dbpath, "", + MY_RELATIVE_PATH | MY_UNPACK_FILENAME); - if (!is_secure_file_path(path)) { - my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); - return true; - } // endif path - } - } else + if (!is_secure_file_path(path)) { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + return true; + } // endif path + } + } else return false; - /* Fall through to check FILE_ACL */ + /* check FILE_ACL */ + /* fall through */ case TAB_ODBC: case TAB_JDBC: case TAB_MYSQL: - case TAB_DIR: + case TAB_DIR: case TAB_MAC: case TAB_WMI: case TAB_ZIP: @@ -4528,9 +4556,11 @@ int ha_connect::external_lock(THD *thd, int lock_type) DBUG_RETURN(0); } else if (g->Xchk) { if (!tdbp) { - if (!(tdbp= GetTDB(g))) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - else if (!tdbp->GetDef()->Indexable()) { + if (!(tdbp = GetTDB(g))) { +// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + DBUG_RETURN(0); + } else if (!tdbp->GetDef()->Indexable()) { sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName()); // DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); @@ -4614,7 +4644,9 @@ int ha_connect::external_lock(THD *thd, int lock_type) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; - } // endif MakeIndex + //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //rc= HA_ERR_INTERNAL_ERROR; + } // endif MakeIndex } else if (tdbp->GetDef()->Indexable() == 3) { if (CheckVirtualIndex(NULL)) { @@ -4635,9 +4667,12 @@ int ha_connect::external_lock(THD *thd, int lock_type) // Make it a warning to avoid crash push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; - } // endif Close + //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //rc = HA_ERR_INTERNAL_ERROR; + } // endif Close locked= 0; +// m_lock_type= lock_type; xmod= MODE_ANY; // For info commands DBUG_RETURN(rc); } // endif MODE_ANY @@ -4995,8 +5030,8 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, } // end of records_in_range // Used to check whether a MYSQL table is created on itself -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port) +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, + PCSZ db, PCSZ tab, PCSZ src, int port) { if (src) return false; @@ -5232,41 +5267,41 @@ static int connect_assisted_discovery(handlerton *, THD* thd, TABLE_SHARE *table_s, HA_CREATE_INFO *create_info) { - char v=0; - const char *fncn= "?"; - const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src; - const char *col, *ocl, *rnk, *pic, *fcl, *skc, *zfn; - char *tab, *dsn, *shm, *dpath; + char v=0; + PCSZ fncn= "?"; + PCSZ user, fn, db, host, pwd, sep, tbl, src; + PCSZ col, ocl, rnk, pic, fcl, skc, zfn; + char *tab, *dsn, *shm, *dpath; #if defined(__WIN__) - char *nsp= NULL, *cls= NULL; + PCSZ nsp= NULL, cls= NULL; #endif // __WIN__ -//int hdr, mxe; - int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0; +//int hdr, mxe; + int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0; #if defined(ODBC_SUPPORT) - POPARM sop= NULL; - char *ucnc= NULL; - bool cnc= false; - int cto= -1, qto= -1; + POPARM sop= NULL; + PCSZ ucnc= NULL; + bool cnc= false; + int cto= -1, qto= -1; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - PJPARM sjp= NULL; - char *driver= NULL; - char *url= NULL; -//char *prop= NULL; - char *tabtyp= NULL; + PJPARM sjp= NULL; + PCSZ driver= NULL; + char *url= NULL; +//char *prop= NULL; + PCSZ tabtyp= NULL; #endif // JDBC_SUPPORT - uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL); - bool bif, ok= false, dbf= false; - TABTYPE ttp= TAB_UNDEF; - PQRYRES qrp= NULL; - PCOLRES crp; - PCONNECT xp= NULL; - PGLOBAL g= GetPlug(thd, xp); - PDBUSER dup= PlgGetUser(g); - PCATLG cat= (dup) ? dup->Catalog : NULL; - PTOS topt= table_s->option_struct; - char buf[1024]; - String sql(buf, sizeof(buf), system_charset_info); + uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL); + bool bif, ok= false, dbf= false; + TABTYPE ttp= TAB_UNDEF; + PQRYRES qrp= NULL; + PCOLRES crp; + PCONNECT xp= NULL; + PGLOBAL g= GetPlug(thd, xp); + PDBUSER dup= PlgGetUser(g); + PCATLG cat= (dup) ? dup->Catalog : NULL; + PTOS topt= table_s->option_struct; + char buf[1024]; + String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); @@ -5291,7 +5326,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (topt->oplist) { host= GetListOption(g, "host", topt->oplist, "localhost"); user= GetListOption(g, "user", topt->oplist, - (ttp == TAB_ODBC ? NULL : "root")); + ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root")); // Default value db can come from the DBNAME=xxx option. db= GetListOption(g, "database", topt->oplist, db); col= GetListOption(g, "colist", topt->oplist, col); @@ -5328,132 +5363,122 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // ZIP_SUPPORT } else { host= "localhost"; - user= (ttp == TAB_ODBC ? NULL : "root"); + user= ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root"); } // endif option_list if (!(shm= (char*)db)) db= table_s->db.str; // Default value - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - goto jer; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif rc - - // Check table type - if (ttp == TAB_UNDEF) { - topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; - ttp= GetTypeID(topt->type); - sprintf(g->Message, "No table_type. Was set to %s", topt->type); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - } else if (ttp == TAB_NIY) { - sprintf(g->Message, "Unsupported table type %s", topt->type); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif ttp - - if (!tab) { - if (ttp == TAB_TBL) { - // Make tab the first table of the list - char *p; - - if (!tbl) { - strcpy(g->Message, "Missing table list"); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif tbl - - tab= PlugDup(g, tbl); - - if ((p= strchr(tab, ','))) - *p= 0; - - if ((p=strchr(tab, '.'))) { - *p= 0; - db= tab; - tab= p + 1; - } // endif p - - } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) - tab= (char*) table_s->table_name.str; // Default value - - } // endif tab - - switch (ttp) { + try { + // Check table type + if (ttp == TAB_UNDEF) { + topt->type = (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; + ttp = GetTypeID(topt->type); + sprintf(g->Message, "No table_type. Was set to %s", topt->type); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } else if (ttp == TAB_NIY) { + sprintf(g->Message, "Unsupported table type %s", topt->type); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif ttp + + if (!tab) { + if (ttp == TAB_TBL) { + // Make tab the first table of the list + char *p; + + if (!tbl) { + strcpy(g->Message, "Missing table list"); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif tbl + + tab = PlugDup(g, tbl); + + if ((p = strchr(tab, ','))) + *p = 0; + + if ((p = strchr(tab, '.'))) { + *p = 0; + db = tab; + tab = p + 1; + } // endif p + + } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) + tab = (char *) table_s->table_name.str; // Default value + + } // endif tab + + switch (ttp) { #if defined(ODBC_SUPPORT) - case TAB_ODBC: - dsn= strz(g, create_info->connect_string); + case TAB_ODBC: + dsn = strz(g, create_info->connect_string); - if (fnc & (FNC_DSN | FNC_DRIVER)) { - ok= true; + if (fnc & (FNC_DSN | FNC_DRIVER)) { + ok = true; #if defined(PROMPT_OK) - } else if (!stricmp(thd->main_security_ctx.host, "localhost") - && cop == 1) { - if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) { - thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn)); - ok= true; - } // endif dsn + } else if (!stricmp(thd->main_security_ctx.host, "localhost") + && cop == 1) { + if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) { + thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn)); + ok = true; + } // endif dsn #endif // PROMPT_OK - } else if (!dsn) { - sprintf(g->Message, "Missing %s connection string", topt->type); - } else { - // Store ODBC additional parameters - sop= (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM)); - sop->User= (char*)user; - sop->Pwd= (char*)pwd; - sop->Cto= cto; - sop->Qto= qto; - sop->UseCnc= cnc; - ok= true; - } // endif's - - supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER); - break; + } else if (!dsn) { + sprintf(g->Message, "Missing %s connection string", topt->type); + } else { + // Store ODBC additional parameters + sop = (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM)); + sop->User = (char*)user; + sop->Pwd = (char*)pwd; + sop->Cto = cto; + sop->Qto = qto; + sop->UseCnc = cnc; + ok = true; + } // endif's + + supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER); + break; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - case TAB_JDBC: - if (fnc & FNC_DRIVER) { - ok= true; - } else if (!(url= strz(g, create_info->connect_string))) { - strcpy(g->Message, "Missing URL"); - } else { - // Store JDBC additional parameters - int rc; - PJDBCDEF jdef= new(g) JDBCDEF(); - - jdef->SetName(create_info->alias); - sjp= (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM)); - sjp->Driver= driver; -// sjp->Properties = prop; - sjp->Fsize= 0; - sjp->Scrollable= false; - - if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) { - sjp->Url= url; - sjp->User= (char*)user; - sjp->Pwd= (char*)pwd; - ok= true; - } else if (rc == RC_NF) { - if (jdef->GetTabname()) - tab= jdef->GetTabname(); - - ok= jdef->SetParms(sjp); - } // endif rc - - } // endif's - - supfnc |= (FNC_DRIVER | FNC_TABLE); - break; + case TAB_JDBC: + if (fnc & FNC_DRIVER) { + ok = true; + } else if (!(url = strz(g, create_info->connect_string))) { + strcpy(g->Message, "Missing URL"); + } else { + // Store JDBC additional parameters + int rc; + PJDBCDEF jdef = new(g) JDBCDEF(); + + jdef->SetName(create_info->alias); + sjp = (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM)); + sjp->Driver = driver; + // sjp->Properties = prop; + sjp->Fsize = 0; + sjp->Scrollable = false; + + if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) { + sjp->Url = url; + sjp->User = (char*)user; + sjp->Pwd = (char*)pwd; + ok = true; + } else if (rc == RC_NF) { + if (jdef->GetTabname()) + tab = (char*)jdef->GetTabname(); + + ok = jdef->SetParms(sjp); + } // endif rc + + } // endif's + + supfnc |= (FNC_DRIVER | FNC_TABLE); + break; #endif // JDBC_SUPPORT case TAB_DBF: dbf= true; - // Passthru + // fall through case TAB_CSV: if (!fn && fnc != FNC_NO) sprintf(g->Message, "Missing %s file name", topt->type); @@ -5462,415 +5487,372 @@ static int connect_assisted_discovery(handlerton *, THD* thd, else ok= true; - break; - case TAB_MYSQL: - ok= true; + break; + case TAB_MYSQL: + ok = true; - if (create_info->connect_string.str && - create_info->connect_string.length) { - PMYDEF mydef= new(g) MYSQLDEF(); + if (create_info->connect_string.str && + create_info->connect_string.length) { + PMYDEF mydef = new(g) MYSQLDEF(); - dsn= strz(g, create_info->connect_string); - mydef->SetName(create_info->alias); + dsn = strz(g, create_info->connect_string); + mydef->SetName(create_info->alias); - if (!mydef->ParseURL(g, dsn, false)) { - if (mydef->GetHostname()) - host= mydef->GetHostname(); + if (!mydef->ParseURL(g, dsn, false)) { + if (mydef->GetHostname()) + host = mydef->GetHostname(); - if (mydef->GetUsername()) - user= mydef->GetUsername(); + if (mydef->GetUsername()) + user = mydef->GetUsername(); - if (mydef->GetPassword()) - pwd= mydef->GetPassword(); + if (mydef->GetPassword()) + pwd = mydef->GetPassword(); - if (mydef->GetTabschema()) - db = mydef->GetTabschema(); + if (mydef->GetTabschema()) + db = mydef->GetTabschema(); - if (mydef->GetTabname()) - tab= mydef->GetTabname(); + if (mydef->GetTabname()) + tab = (char*)mydef->GetTabname(); - if (mydef->GetPortnumber()) - port= mydef->GetPortnumber(); + if (mydef->GetPortnumber()) + port = mydef->GetPortnumber(); - } else - ok= false; + } else + ok = false; - } else if (!user) - user= "root"; + } else if (!user) + user = "root"; - if (ok && CheckSelf(g, table_s, host, db, tab, src, port)) - ok= false; + if (ok && CheckSelf(g, table_s, host, db, tab, src, port)) + ok = false; - break; + break; #if defined(__WIN__) - case TAB_WMI: - ok= true; - break; + case TAB_WMI: + ok = true; + break; #endif // __WIN__ #if defined(PIVOT_SUPPORT) - case TAB_PIVOT: - supfnc= FNC_NO; + case TAB_PIVOT: + supfnc = FNC_NO; #endif // PIVOT_SUPPORT - case TAB_PRX: - case TAB_TBL: - case TAB_XCL: - case TAB_OCCUR: - if (!src && !stricmp(tab, create_info->alias) && - (!db || !stricmp(db, table_s->db.str))) - sprintf(g->Message, "A %s table cannot refer to itself", topt->type); - else - ok= true; + case TAB_PRX: + case TAB_TBL: + case TAB_XCL: + case TAB_OCCUR: + if (!src && !stricmp(tab, create_info->alias) && + (!db || !stricmp(db, table_s->db.str))) + sprintf(g->Message, "A %s table cannot refer to itself", topt->type); + else + ok = true; - break; - case TAB_OEM: - if (topt->module && topt->subtype) - ok= true; - else - strcpy(g->Message, "Missing OEM module or subtype"); + break; + case TAB_OEM: + if (topt->module && topt->subtype) + ok = true; + else + strcpy(g->Message, "Missing OEM module or subtype"); - break; + break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) - case TAB_XML: + case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT - case TAB_JSON: - if (!fn && !zfn && !mul) - sprintf(g->Message, "Missing %s file name", topt->type); - else - ok= true; - - break; - case TAB_VIR: - ok= true; - break; - default: - sprintf(g->Message, "Cannot get column info for table type %s", topt->type); - break; - } // endif ttp + case TAB_JSON: + if (!fn && !zfn && !mul) + sprintf(g->Message, "Missing %s file name", topt->type); + else + ok = true; - // Check for supported catalog function - if (ok && !(supfnc & fnc)) { - sprintf(g->Message, "Unsupported catalog function %s for table type %s", - fncn, topt->type); - ok= false; - } // endif supfnc - - if (src && fnc != FNC_NO) { - strcpy(g->Message, "Cannot make catalog table from srcdef"); - ok= false; - } // endif src - - if (ok) { - char *cnm, *rem, *dft, *xtra, *key, *fmt; - int i, len, prec, dec, typ, flg; - -// if (cat) -// cat->SetDataPath(g, table_s->db.str); -// else -// return HA_ERR_INTERNAL_ERROR; // Should never happen - - dpath= SetPath(g, table_s->db.str); + break; + case TAB_VIR: + ok = true; + break; + default: + sprintf(g->Message, "Cannot get column info for table type %s", topt->type); + break; + } // endif ttp + + // Check for supported catalog function + if (ok && !(supfnc & fnc)) { + sprintf(g->Message, "Unsupported catalog function %s for table type %s", + fncn, topt->type); + ok = false; + } // endif supfnc + + if (src && fnc != FNC_NO) { + strcpy(g->Message, "Cannot make catalog table from srcdef"); + ok = false; + } // endif src + + if (ok) { + const char *cnm, *rem; + char *dft, *xtra, *key, *fmt; + int i, len, prec, dec, typ, flg; + + if (!(dpath = SetPath(g, table_s->db.str))) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif dpath - if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) { - qrp= SrcColumns(g, host, db, user, pwd, src, port); + if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) { + qrp = SrcColumns(g, host, db, user, pwd, src, port); - if (qrp && ttp == TAB_OCCUR) - if (OcrSrcCols(g, qrp, col, ocl, rnk)) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif OcrSrcCols + if (qrp && ttp == TAB_OCCUR) + if (OcrSrcCols(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrSrcCols - } else switch (ttp) { - case TAB_DBF: - qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL); - break; + } else switch (ttp) { + case TAB_DBF: + qrp = DBFColumns(g, dpath, fn, fnc == FNC_COL); + break; #if defined(ODBC_SUPPORT) - case TAB_ODBC: - switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp= ODBCSrcCols(g, dsn, (char*)src, sop); - src= NULL; // for next tests - } else - qrp= ODBCColumns(g, dsn, shm, tab, NULL, - mxr, fnc == FNC_COL, sop); + case TAB_ODBC: + switch (fnc) { + case FNC_NO: + case FNC_COL: + if (src) { + qrp = ODBCSrcCols(g, dsn, (char*)src, sop); + src = NULL; // for next tests + } else + qrp = ODBCColumns(g, dsn, shm, tab, NULL, + mxr, fnc == FNC_COL, sop); - break; - case FNC_TABLE: - qrp= ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); - break; - case FNC_DSN: - qrp= ODBCDataSources(g, mxr, true); - break; - case FNC_DRIVER: - qrp= ODBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; - } // endswitch info + break; + case FNC_TABLE: + qrp = ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); + break; + case FNC_DSN: + qrp = ODBCDataSources(g, mxr, true); + break; + case FNC_DRIVER: + qrp = ODBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; + } // endswitch info - break; + break; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - case TAB_JDBC: - switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp= JDBCSrcCols(g, (char*)src, sjp); - src= NULL; // for next tests - } else - qrp= JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); + case TAB_JDBC: + switch (fnc) { + case FNC_NO: + case FNC_COL: + if (src) { + qrp = JDBCSrcCols(g, (char*)src, sjp); + src = NULL; // for next tests + } else + qrp = JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); - break; - case FNC_TABLE: - qrp= JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); - break; + break; + case FNC_TABLE: + qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); + break; #if 0 - case FNC_DSN: - qrp= JDBCDataSources(g, mxr, true); - break; + case FNC_DSN: + qrp = JDBCDataSources(g, mxr, true); + break; #endif // 0 - case FNC_DRIVER: - qrp= JDBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; - } // endswitch info + case FNC_DRIVER: + qrp = JDBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; + } // endswitch info - break; + break; #endif // JDBC_SUPPORT - case TAB_MYSQL: - qrp= MyColumns(g, thd, host, db, user, pwd, tab, - NULL, port, fnc == FNC_COL); - break; - case TAB_CSV: - qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL); - break; + case TAB_MYSQL: + qrp = MyColumns(g, thd, host, db, user, pwd, tab, + NULL, port, fnc == FNC_COL); + break; + case TAB_CSV: + qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL); + break; #if defined(__WIN__) - case TAB_WMI: - qrp= WMIColumns(g, nsp, cls, fnc == FNC_COL); - break; + case TAB_WMI: + qrp = WMIColumns(g, nsp, cls, fnc == FNC_COL); + break; #endif // __WIN__ - case TAB_PRX: - case TAB_TBL: - case TAB_XCL: - case TAB_OCCUR: - bif= fnc == FNC_COL; - qrp= TabColumns(g, thd, db, tab, bif); - - if (!qrp && bif && fnc != FNC_COL) // tab is a view - qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); - - if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) - if (OcrColumns(g, qrp, col, ocl, rnk)) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif OcrColumns + case TAB_PRX: + case TAB_TBL: + case TAB_XCL: + case TAB_OCCUR: + bif = fnc == FNC_COL; + qrp = TabColumns(g, thd, db, tab, bif); + + if (!qrp && bif && fnc != FNC_COL) // tab is a view + qrp = MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); + + if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) + if (OcrColumns(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrColumns - break; + break; #if defined(PIVOT_SUPPORT) - case TAB_PIVOT: - qrp= PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); - break; + case TAB_PIVOT: + qrp = PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); + break; #endif // PIVOT_SUPPORT - case TAB_VIR: - qrp= VirColumns(g, fnc == FNC_COL); - break; - case TAB_JSON: - qrp= JSONColumns(g, (char*)db, topt, fnc == FNC_COL); - break; + case TAB_VIR: + qrp = VirColumns(g, fnc == FNC_COL); + break; + case TAB_JSON: + qrp = JSONColumns(g, (char*)db, topt, fnc == FNC_COL); + break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) - case TAB_XML: - qrp= XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); - break; + case TAB_XML: + qrp = XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); + break; #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT - case TAB_OEM: - qrp= OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); - break; - default: - strcpy(g->Message, "System error during assisted discovery"); - break; - } // endswitch ttp - - if (!qrp) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif !qrp - - if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { - // Catalog like table - for (crp= qrp->Colresp; !rc && crp; crp= crp->Next) { - cnm= (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); - typ= crp->Type; - len= crp->Length; - dec= crp->Prec; - flg= crp->Flag; - v= (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; - tm= (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; - - if (!len && typ == TYPE_STRING) - len= 256; // STRBLK's have 0 length - - // Now add the field - if (add_field(&sql, cnm, typ, len, dec, NULL, tm, - NULL, NULL, NULL, NULL, flg, dbf, v)) - rc= HA_ERR_OUT_OF_MEM; - } // endfor crp - - } else { - char *schem= NULL; - char *tn= NULL; - - // Not a catalog table - if (!qrp->Nblin) { - if (tab) - sprintf(g->Message, "Cannot get columns from %s", tab); - else - strcpy(g->Message, "Fail to retrieve columns"); - - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif !nblin - - for (i= 0; !rc && i < qrp->Nblin; i++) { - typ= len= prec= dec= 0; - tm= NOT_NULL_FLAG; - cnm= (char*)"noname"; - dft= xtra= key= fmt= tn= NULL; - v= ' '; - rem= NULL; - - for (crp= qrp->Colresp; crp; crp= crp->Next) - switch (crp->Fld) { - case FLD_NAME: - if (ttp == TAB_PRX || - (ttp == TAB_CSV && topt->data_charset && - (!stricmp(topt->data_charset, "UTF8") || - !stricmp(topt->data_charset, "UTF-8")))) - cnm= crp->Kdata->GetCharValue(i); - else - cnm= encode(g, crp->Kdata->GetCharValue(i)); - - break; - case FLD_TYPE: - typ= crp->Kdata->GetIntValue(i); - v = (crp->Nulls) ? crp->Nulls[i] : 0; - break; - case FLD_TYPENAME: - tn= crp->Kdata->GetCharValue(i); - break; - case FLD_PREC: - // PREC must be always before LENGTH - len= prec= crp->Kdata->GetIntValue(i); - break; - case FLD_LENGTH: - len= crp->Kdata->GetIntValue(i); - break; - case FLD_SCALE: - dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1; - break; - case FLD_NULL: - if (crp->Kdata->GetIntValue(i)) - tm= 0; // Nullable - - break; - case FLD_FORMAT: - fmt= (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; - break; - case FLD_REM: - rem= crp->Kdata->GetCharValue(i); - break; -// case FLD_CHARSET: - // No good because remote table is already translated -// if (*(csn= crp->Kdata->GetCharValue(i))) -// cs= get_charset_by_name(csn, 0); - -// break; - case FLD_DEFAULT: - dft= crp->Kdata->GetCharValue(i); - break; - case FLD_EXTRA: - xtra= crp->Kdata->GetCharValue(i); - - // Auto_increment is not supported yet - if (!stricmp(xtra, "AUTO_INCREMENT")) - xtra= NULL; - - break; - case FLD_KEY: - if (ttp == TAB_VIR) - key= crp->Kdata->GetCharValue(i); + case TAB_OEM: + qrp = OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); + break; + default: + strcpy(g->Message, "System error during assisted discovery"); + break; + } // endswitch ttp - break; - case FLD_SCHEM: + if (!qrp) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif !qrp + + if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { + // Catalog like table + for (crp = qrp->Colresp; !rc && crp; crp = crp->Next) { + cnm = (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); + typ = crp->Type; + len = crp->Length; + dec = crp->Prec; + flg = crp->Flag; + v = (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; + tm = (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; + + if (!len && typ == TYPE_STRING) + len = 256; // STRBLK's have 0 length + + // Now add the field + if (add_field(&sql, cnm, typ, len, dec, NULL, tm, + NULL, NULL, NULL, NULL, flg, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor crp + + } else { + char *schem = NULL; + char *tn = NULL; + + // Not a catalog table + if (!qrp->Nblin) { + if (tab) + sprintf(g->Message, "Cannot get columns from %s", tab); + else + strcpy(g->Message, "Fail to retrieve columns"); + + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif !nblin + + for (i = 0; !rc && i < qrp->Nblin; i++) { + typ = len = prec = dec = 0; + tm = NOT_NULL_FLAG; + cnm = (char*)"noname"; + dft = xtra = key = fmt = tn = NULL; + v = ' '; + rem = NULL; + + for (crp = qrp->Colresp; crp; crp = crp->Next) + switch (crp->Fld) { + case FLD_NAME: + if (ttp == TAB_PRX || + (ttp == TAB_CSV && topt->data_charset && + (!stricmp(topt->data_charset, "UTF8") || + !stricmp(topt->data_charset, "UTF-8")))) + cnm = crp->Kdata->GetCharValue(i); + else + cnm = encode(g, crp->Kdata->GetCharValue(i)); + + break; + case FLD_TYPE: + typ = crp->Kdata->GetIntValue(i); + v = (crp->Nulls) ? crp->Nulls[i] : 0; + break; + case FLD_TYPENAME: + tn = crp->Kdata->GetCharValue(i); + break; + case FLD_PREC: + // PREC must be always before LENGTH + len = prec = crp->Kdata->GetIntValue(i); + break; + case FLD_LENGTH: + len = crp->Kdata->GetIntValue(i); + break; + case FLD_SCALE: + dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1; + break; + case FLD_NULL: + if (crp->Kdata->GetIntValue(i)) + tm = 0; // Nullable + + break; + case FLD_FORMAT: + fmt = (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; + break; + case FLD_REM: + rem = crp->Kdata->GetCharValue(i); + break; + // case FLD_CHARSET: + // No good because remote table is already translated + // if (*(csn= crp->Kdata->GetCharValue(i))) + // cs= get_charset_by_name(csn, 0); + + // break; + case FLD_DEFAULT: + dft = crp->Kdata->GetCharValue(i); + break; + case FLD_EXTRA: + xtra = crp->Kdata->GetCharValue(i); + + // Auto_increment is not supported yet + if (!stricmp(xtra, "AUTO_INCREMENT")) + xtra = NULL; + + break; + case FLD_KEY: + if (ttp == TAB_VIR) + key = crp->Kdata->GetCharValue(i); + + break; + case FLD_SCHEM: #if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT) - if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { - if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { - sprintf(g->Message, - "Several %s tables found, specify DBNAME", tab); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } else if (!schem) - schem= crp->Kdata->GetCharValue(i); - - } // endif ttp + if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { + if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { + sprintf(g->Message, + "Several %s tables found, specify DBNAME", tab); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } else if (!schem) + schem = crp->Kdata->GetCharValue(i); + + } // endif ttp #endif // ODBC_SUPPORT || JDBC_SUPPORT - default: - break; // Ignore - } // endswitch Fld + default: + break; // Ignore + } // endswitch Fld #if defined(ODBC_SUPPORT) - if (ttp == TAB_ODBC) { - int plgtyp; - bool w= false; // Wide character type - - // typ must be PLG type, not SQL type - if (!(plgtyp= TranslateSQLType(typ, dec, prec, v, w))) { - if (GetTypeConv() == TPC_SKIP) { - // Skip this column - sprintf(g->Message, "Column %s skipped (unsupported type %d)", - cnm, typ); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - continue; - } else { - sprintf(g->Message, "Unsupported SQL type %d", typ); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif type_conv - - } else - typ= plgtyp; - - switch (typ) { - case TYPE_STRING: - if (w) { - sprintf(g->Message, "Column %s is wide characters", cnm); - push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); - } // endif w - - break; - case TYPE_DOUBLE: - // Some data sources do not count dec in length (prec) - prec += (dec + 2); // To be safe - break; - case TYPE_DECIM: - prec= len; - break; - default: - dec= 0; - } // endswitch typ - - } else -#endif // ODBC_SUPPORT -#if defined(JDBC_SUPPORT) - if (ttp == TAB_JDBC) { + if (ttp == TAB_ODBC) { int plgtyp; + bool w = false; // Wide character type // typ must be PLG type, not SQL type - if (!(plgtyp= TranslateJDBCType(typ, tn, dec, prec, v))) { + if (!(plgtyp = TranslateSQLType(typ, dec, prec, v, w))) { if (GetTypeConv() == TPC_SKIP) { // Skip this column sprintf(g->Message, "Column %s skipped (unsupported type %d)", @@ -5879,55 +5861,107 @@ static int connect_assisted_discovery(handlerton *, THD* thd, continue; } else { sprintf(g->Message, "Unsupported SQL type %d", typ); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc = HA_ERR_INTERNAL_ERROR; goto err; } // endif type_conv } else - typ= plgtyp; + typ = plgtyp; switch (typ) { - case TYPE_DOUBLE: - case TYPE_DECIM: - // Some data sources do not count dec in length (prec) - prec += (dec + 2); // To be safe - break; - default: - dec= 0; + case TYPE_STRING: + if (w) { + sprintf(g->Message, "Column %s is wide characters", cnm); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); + } // endif w + + break; + case TYPE_DOUBLE: + // Some data sources do not count dec in length (prec) + prec += (dec + 2); // To be safe + break; + case TYPE_DECIM: + prec = len; + break; + default: + dec = 0; } // endswitch typ } else #endif // ODBC_SUPPORT - // Make the arguments as required by add_fields - if (typ == TYPE_DOUBLE) - prec= len; +#if defined(JDBC_SUPPORT) + if (ttp == TAB_JDBC) { + int plgtyp; + + // typ must be PLG type, not SQL type + if (!(plgtyp = TranslateJDBCType(typ, tn, dec, prec, v))) { + if (GetTypeConv() == TPC_SKIP) { + // Skip this column + sprintf(g->Message, "Column %s skipped (unsupported type %d)", + cnm, typ); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + continue; + } else { + sprintf(g->Message, "Unsupported SQL type %d", typ); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif type_conv + + } else + typ = plgtyp; + + switch (typ) { + case TYPE_DOUBLE: + case TYPE_DECIM: + // Some data sources do not count dec in length (prec) + prec += (dec + 2); // To be safe + break; + default: + dec = 0; + } // endswitch typ - if (typ == TYPE_DATE) - prec= 0; + } else +#endif // ODBC_SUPPORT + // Make the arguments as required by add_fields + if (typ == TYPE_DOUBLE) + prec = len; - // Now add the field - if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, - fmt, 0, dbf, v)) - rc= HA_ERR_OUT_OF_MEM; - } // endfor i + if (typ == TYPE_DATE) + prec = 0; - } // endif fnc + // Now add the field + if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, + fmt, 0, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor i - if (!rc) - rc= init_table_share(thd, table_s, create_info, &sql); + } // endif fnc - g->jump_level--; - PopUser(xp); - return rc; - } // endif ok + if (!rc) + rc = init_table_share(thd, table_s, create_info, &sql); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //g->jump_level--; + //PopUser(xp); + //return rc; + } else { + rc = HA_ERR_UNSUPPORTED; + } // endif ok + + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = HA_ERR_INTERNAL_ERROR; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = HA_ERR_INTERNAL_ERROR; + } // end catch err: - g->jump_level--; - jer: + if (rc) + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + PopUser(xp); - return HA_ERR_INTERNAL_ERROR; + return rc; } // end of connect_assisted_discovery /** @@ -6092,8 +6126,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (!part_info) #endif // WITH_PARTITION_STORAGE_ENGINE {const char *src= options->srcdef; - char *host, *db, *tab= (char*)options->tabname; - int port; + PCSZ host, db, tab= options->tabname; + int port; host= GetListOption(g, "host", options->oplist, NULL); db= GetStringOption("database", NULL); @@ -6137,8 +6171,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endswitch ttp if (type == TAB_XML) { - bool dom; // True: MS-DOM, False libxml2 - char *xsup= GetListOption(g, "Xmlsup", options->oplist, "*"); + bool dom; // True: MS-DOM, False libxml2 + PCSZ xsup= GetListOption(g, "Xmlsup", options->oplist, "*"); // Note that if no support is specified, the default is MS-DOM // on Windows and libxml2 otherwise @@ -6398,15 +6432,15 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (options->zipped) { // Check whether the zip entry must be made from a file - char *fn = GetListOption(g, "Load", options->oplist, NULL); + PCSZ fn = GetListOption(g, "Load", options->oplist, NULL); if (fn) { - char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH]; - char *entry = GetListOption(g, "Entry", options->oplist, NULL); - char *a = GetListOption(g, "Append", options->oplist, "NO"); - bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON"); - char *m = GetListOption(g, "Mulentries", options->oplist, "NO"); - bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON"); + char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH]; + PCSZ entry = GetListOption(g, "Entry", options->oplist, NULL); + PCSZ a = GetListOption(g, "Append", options->oplist, "NO"); + bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON"); + PCSZ m = GetListOption(g, "Mulentries", options->oplist, "NO"); + bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON"); if (!entry && !mul) { my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0)); @@ -6474,11 +6508,10 @@ int ha_connect::create(const char *name, TABLE *table_arg, PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; - SetDataPath(g, table_arg->s->db.str); - - if (cat) { -// cat->SetDataPath(g, table_arg->s->db.str); - + if (SetDataPath(g, table_arg->s->db.str)) { + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc = HA_ERR_INTERNAL_ERROR; + } else if (cat) { #if defined(WITH_PARTITION_STORAGE_ENGINE) if (part_info) strncpy(partname, @@ -6534,8 +6567,9 @@ bool ha_connect::FileExists(const char *fn, bool bf) return true; if (table) { - char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH]; - bool b= false; + const char *s; + char tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH]; + bool b= false; int n; struct stat info; @@ -6592,9 +6626,9 @@ bool ha_connect::CheckString(const char *str1, const char *str2) /** check whether a string option have changed */ -bool ha_connect::SameString(TABLE *tab, char *opn) +bool ha_connect::SameString(TABLE *tab, PCSZ opn) { - char *str1, *str2; + PCSZ str1, str2; tshp= tab->s; // The altered table str1= GetStringOption(opn); @@ -6606,7 +6640,7 @@ bool ha_connect::SameString(TABLE *tab, char *opn) /** check whether a Boolean option have changed */ -bool ha_connect::SameBool(TABLE *tab, char *opn) +bool ha_connect::SameBool(TABLE *tab, PCSZ opn) { bool b1, b2; @@ -6620,7 +6654,7 @@ bool ha_connect::SameBool(TABLE *tab, char *opn) /** check whether an integer option have changed */ -bool ha_connect::SameInt(TABLE *tab, char *opn) +bool ha_connect::SameInt(TABLE *tab, PCSZ opn) { int i1, i2; @@ -6799,7 +6833,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // Conversion to outward table is only allowed for file based // tables whose file does not exist. tshp= altered_table->s; - char *fn= GetStringOption("filename"); + PCSZ fn= GetStringOption("filename"); tshp= NULL; if (FileExists(fn, false)) { @@ -7044,10 +7078,10 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0105, /* version number (1.05) */ + 0x0106, /* version number (1.05) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.05.0003", /* string version */ - MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ + "1.06.0001", /* string version */ + MariaDB_PLUGIN_MATURITY_BETA /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index de735668133..3788a3882b6 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -61,7 +61,7 @@ public: oldopn= newopn= NULL; oldpix= newpix= NULL;} - inline char *SetName(PGLOBAL g, char *name) {return PlugDup(g, name);} + inline char *SetName(PGLOBAL g, PCSZ name) {return PlugDup(g, name);} bool oldsep; // Sepindex before create/alter bool newsep; // Sepindex after create/alter @@ -168,18 +168,18 @@ public: static bool connect_init(void); static bool connect_end(void); TABTYPE GetRealType(PTOS pos= NULL); - char *GetRealString(const char *s); - char *GetStringOption(char *opname, char *sdef= NULL); + char *GetRealString(PCSZ s); + PCSZ GetStringOption(PCSZ opname, PCSZ sdef= NULL); PTOS GetTableOptionStruct(TABLE_SHARE *s= NULL); - bool GetBooleanOption(char *opname, bool bdef); - bool SetBooleanOption(char *opname, bool b); - int GetIntegerOption(char *opname); - bool GetIndexOption(KEY *kp, char *opname); - bool CheckString(const char *str1, const char *str2); - bool SameString(TABLE *tab, char *opn); - bool SetIntegerOption(char *opname, int n); - bool SameInt(TABLE *tab, char *opn); - bool SameBool(TABLE *tab, char *opn); + bool GetBooleanOption(PCSZ opname, bool bdef); + bool SetBooleanOption(PCSZ opname, bool b); + int GetIntegerOption(PCSZ opname); + bool GetIndexOption(KEY *kp, PCSZ opname); + bool CheckString(PCSZ str1, PCSZ str2); + bool SameString(TABLE *tab, PCSZ opn); + bool SetIntegerOption(PCSZ opname, int n); + bool SameInt(TABLE *tab, PCSZ opn); + bool SameBool(TABLE *tab, PCSZ opn); bool FileExists(const char *fn, bool bf); bool NoFieldOptionChange(TABLE *tab); PFOS GetFieldOptionStruct(Field *fp); @@ -187,8 +187,8 @@ public: PXOS GetIndexOptionStruct(KEY *kp); PIXDEF GetIndexInfo(TABLE_SHARE *s= NULL); bool CheckVirtualIndex(TABLE_SHARE *s); - const char *GetDBName(const char *name); - const char *GetTableName(void); + PCSZ GetDBName(PCSZ name); + PCSZ GetTableName(void); char *GetPartName(void); //int GetColNameLen(Field *fp); //char *GetColName(Field *fp); @@ -197,9 +197,9 @@ public: bool IsSameIndex(PIXDEF xp1, PIXDEF xp2); bool IsPartitioned(void); bool IsUnique(uint n); - char *GetDataPath(void) {return (char*)datapath;} + PCSZ GetDataPath(void) {return datapath;} - void SetDataPath(PGLOBAL g, const char *path); + bool SetDataPath(PGLOBAL g, PCSZ path); PTDB GetTDB(PGLOBAL g); int OpenTable(PGLOBAL g, bool del= false); bool CheckColumnList(PGLOBAL g); @@ -513,7 +513,7 @@ protected: ulong hnum; // The number of this handler query_id_t valid_query_id; // The one when tdbp was allocated query_id_t creat_query_id; // The one when handler was allocated - char *datapath; // Is the Path of DB data directory + PCSZ datapath; // Is the Path of DB data directory PTDB tdbp; // To table class object PVAL sdvalin1; // Used to convert date values PVAL sdvalin2; // Used to convert date values diff --git a/storage/connect/jdbccat.h b/storage/connect/jdbccat.h index 7108aa376ce..0b87df8bb51 100644 --- a/storage/connect/jdbccat.h +++ b/storage/connect/jdbccat.h @@ -4,10 +4,10 @@ typedef struct jdbc_parms { int CheckSize(int rows); - char *Driver; // JDBC driver - char *Url; // Driver URL - char *User; // User connect info - char *Pwd; // Password connect info + PCSZ Driver; // JDBC driver + PCSZ Url; // Driver URL + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info //char *Properties; // Connection property list //int Cto; // Connect timeout //int Qto; // Query timeout @@ -19,12 +19,12 @@ typedef struct jdbc_parms { /* JDBC catalog function prototypes. */ /***********************************************************************/ #if defined(PROMPT_OK) -char *JDBCCheckConnection(PGLOBAL g, char *dsn, int cop); +char *JDBCCheckConnection(PGLOBAL g, PCSZ dsn, int cop); #endif // PROMPT_OK //PQRYRES JDBCDataSources(PGLOBAL g, int maxres, bool info); -PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, - char *colpat, int maxres, bool info, PJPARM sop); -PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sop); -PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, PJPARM sop); +PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, PJPARM sop); +PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sop); +PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, + PCSZ tabtyp, int maxres, bool info, PJPARM sop); PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info); diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index c1d077406b7..f162a7ae645 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -189,8 +189,8 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) /***********************************************************************/ /* Allocate the structure used to refer to the result set. */ /***********************************************************************/ -static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db, - char *tab, PQRYRES qrp) +static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, PCSZ db, + PCSZ tab, PQRYRES qrp) { JCATPARM *cap; @@ -213,7 +213,7 @@ static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db, /* JDBCColumns: constructs the result blocks containing all columns */ /* of a JDBC table that will be retrieved by GetData commands. */ /***********************************************************************/ -PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat, +PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat, int maxres, bool info, PJPARM sjp) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, @@ -316,7 +316,7 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat, /* JDBCSrcCols: constructs the result blocks containing the */ /* description of all the columns of a Srcdef option. */ /**************************************************************************/ -PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) +PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sjp) { char *sqry; PQRYRES qrp; @@ -330,7 +330,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 2); sprintf(sqry, src, "1=1"); // dummy where clause } else - sqry = src; + sqry = (char*)src; qrp = jcp->GetMetaData(g, sqry); jcp->Close(); @@ -341,7 +341,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) /* JDBCTables: constructs the result blocks containing all tables in */ /* an JDBC database that will be retrieved by GetData commands. */ /**************************************************************************/ -PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, char *tabtyp, +PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp, int maxres, bool info, PJPARM sjp) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, @@ -1059,7 +1059,7 @@ int JDBConn::Open(PJPARM sop) /***********************************************************************/ /* Execute an SQL command. */ /***********************************************************************/ -int JDBConn::ExecSQLcommand(char *sql) +int JDBConn::ExecSQLcommand(PCSZ sql) { int rc; jint n; @@ -1142,7 +1142,7 @@ int JDBConn::Fetch(int pos) /***********************************************************************/ /* Restart from beginning of result set */ /***********************************************************************/ -int JDBConn::Rewind(char *sql) +int JDBConn::Rewind(PCSZ sql) { int rbuf = -1; @@ -1200,7 +1200,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (rank == 0) if (!name || (jn = env->NewStringUTF(name)) == nullptr) { sprintf(g->Message, "Fail to allocate jstring %s", SVP(name)); - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); + throw TYPE_AM_JDBC; } // endif name // Returns 666 is case of error @@ -1208,7 +1208,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (Check((ctyp == 666) ? -1 : 1)) { sprintf(g->Message, "Getting ctyp: %s", Msg); - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); + throw TYPE_AM_JDBC; } // endif Check if (val->GetNullable()) @@ -1227,7 +1227,8 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) case 12: // VARCHAR case -1: // LONGVARCHAR case 1: // CHAR - if (jb) + case 3: // DECIMAL + if (jb && ctyp != 3) cn = (jstring)jb; else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) cn = (jstring)env->CallObjectMethod(job, chrfldid, (jint)rank, jn); @@ -1253,7 +1254,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) break; case 8: // DOUBLE case 2: // NUMERIC - case 3: // DECIMAL +//case 3: // DECIMAL if (!gmID(g, dblfldid, "DoubleField", "(ILjava/lang/String;)D")) val->SetValue((double)env->CallDoubleMethod(job, dblfldid, rank, jn)); else @@ -1314,7 +1315,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) env->DeleteLocalRef(jn); sprintf(g->Message, "SetColumnValue: %s rank=%d ctyp=%d", Msg, rank, (int)ctyp); - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); + throw TYPE_AM_JDBC; } // endif Check if (rank == 0) @@ -1325,7 +1326,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) /***********************************************************************/ /* Prepare an SQL statement for insert. */ /***********************************************************************/ -bool JDBConn::PrepareSQL(char *sql) +bool JDBConn::PrepareSQL(PCSZ sql) { bool b = true; PGLOBAL& g = m_G; @@ -1348,7 +1349,7 @@ bool JDBConn::PrepareSQL(char *sql) /***********************************************************************/ /* Execute an SQL query that returns a result set. */ /***********************************************************************/ -int JDBConn::ExecuteQuery(char *sql) +int JDBConn::ExecuteQuery(PCSZ sql) { int rc = RC_FX; jint ncol; @@ -1376,7 +1377,7 @@ int JDBConn::ExecuteQuery(char *sql) /***********************************************************************/ /* Execute an SQL query and get the affected rows. */ /***********************************************************************/ -int JDBConn::ExecuteUpdate(char *sql) +int JDBConn::ExecuteUpdate(PCSZ sql) { int rc = RC_FX; jint n; @@ -1404,7 +1405,7 @@ int JDBConn::ExecuteUpdate(char *sql) /***********************************************************************/ /* Get the number of lines of the result set. */ /***********************************************************************/ -int JDBConn::GetResultSize(char *sql, JDBCCOL *colp) +int JDBConn::GetResultSize(PCSZ sql, JDBCCOL *colp) { int rc, n = 0; @@ -1642,7 +1643,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) /* GetMetaData: constructs the result blocks containing the */ /* description of all the columns of an SQL command. */ /**************************************************************************/ - PQRYRES JDBConn::GetMetaData(PGLOBAL g, char *src) + PQRYRES JDBConn::GetMetaData(PGLOBAL g, PCSZ src) { static int buftyp[] = {TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_INT, TYPE_INT}; @@ -1844,7 +1845,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) PGLOBAL& g = m_G; // void *buffer; int i, ncol; - PSZ fnc = "Unknown"; + PCSZ fnc = "Unknown"; uint n; short len, tp; int crow = 0; diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h index 9d428142839..73271c8f5be 100644 --- a/storage/connect/jdbconn.h +++ b/storage/connect/jdbconn.h @@ -46,9 +46,9 @@ enum JCATINFO { typedef struct tagJCATPARM { JCATINFO Id; // Id to indicate function PQRYRES Qrp; // Result set pointer - char *DB; // Database (Schema) - char *Tab; // Table name or pattern - char *Pat; // Table type or column pattern + PCSZ DB; // Database (Schema) + PCSZ Tab; // Table name or pattern + PCSZ Pat; // Table type or column pattern } JCATPARM; typedef jint(JNICALL *CRTJVM) (JavaVM **, void **, void *); @@ -77,7 +77,7 @@ public: JDBConn(PGLOBAL g, TDBJDBC *tdbp); int Open(PJPARM sop); - int Rewind(char *sql); + int Rewind(PCSZ sql); void Close(void); PQRYRES AllocateResult(PGLOBAL g); @@ -96,19 +96,19 @@ public: //void SetQueryTimeout(DWORD sec) {m_QueryTimeout = sec;} //void SetUserName(PSZ user) {m_User = user;} //void SetUserPwd(PSZ pwd) {m_Pwd = pwd;} - int GetResultSize(char *sql, JDBCCOL *colp); - int ExecuteQuery(char *sql); - int ExecuteUpdate(char *sql); + int GetResultSize(PCSZ sql, JDBCCOL *colp); + int ExecuteQuery(PCSZ sql); + int ExecuteUpdate(PCSZ sql); int Fetch(int pos = 0); - bool PrepareSQL(char *sql); + bool PrepareSQL(PCSZ sql); int ExecuteSQL(void); bool SetParam(JDBCCOL *colp); - int ExecSQLcommand(char *sql); + int ExecSQLcommand(PCSZ sql); void SetColumnValue(int rank, PSZ name, PVAL val); int GetCatInfo(JCATPARM *cap); //bool GetDataSources(PQRYRES qrp); bool GetDrivers(PQRYRES qrp); - PQRYRES GetMetaData(PGLOBAL g, char *src); + PQRYRES GetMetaData(PGLOBAL g, PCSZ src); public: // Set static variables @@ -174,16 +174,10 @@ protected: jmethodID timfldid; // The TimeField method ID jmethodID tspfldid; // The TimestampField method ID jmethodID bigfldid; // The BigintField method ID - //DWORD m_LoginTimeout; -//DWORD m_QueryTimeout; -//DWORD m_UpdateOptions; - char *Msg; + PCSZ Msg; char *m_Wrap; char m_IDQuoteChar[2]; -//PSZ m_Driver; -//PSZ m_Url; -//PSZ m_User; - PSZ m_Pwd; + PCSZ m_Pwd; int m_Ncol; int m_Aff; int m_Rows; diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index b473871e9f7..f8b1caa13e2 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -60,7 +60,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e); /***********************************************************************/ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) { - int i, rc, pretty = (ptyp) ? *ptyp : 3; + int i, pretty = (ptyp) ? *ptyp : 3; bool b = false, pty[3] = {true, true, true}; PJSON jsp = NULL; STRG src; @@ -81,117 +81,91 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) pty[0] = false; - - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - -#if defined(SE_CATCH) - // Let's try to recover from any kind of interrupt - _se_translator_function f = _set_se_translator(trans_func); - try { -#endif // SE_CATCH --------------------- try section -------------------- - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; - } // endif rc - -#if defined(SE_CATCH) // ------------- end of try section ----------------- - } catch (SE_Exception e) { - sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)", - GetExceptionDesc(g, e.nSE), e.nSE); - _set_se_translator(f); - goto err; - } catch (...) { - strcpy(g->Message, "Exception doing setjmp"); - _set_se_translator(f); - goto err; - } // end of try-catches - - _set_se_translator(f); -#endif // SE_CATCH - - for (i = 0; i < len; i++) - switch (s[i]) { - case '[': - if (jsp) - goto tryit; - else if (!(jsp = ParseArray(g, ++i, src, pty))) - goto err; - - break; - case '{': - if (jsp) - goto tryit; - else if (!(jsp = ParseObject(g, ++i, src, pty))) - goto err; - - break; - case ' ': - case '\t': - case '\n': - case '\r': - break; - case ',': - if (jsp && (pretty == 1 || pretty == 3)) { - if (comma) - *comma = true; - - pty[0] = pty[2] = false; - break; - } // endif pretty - - sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); - goto err; - case '(': - b = true; - break; - case ')': - if (b) { - b = false; - break; - } // endif b - - default: - if (jsp) - goto tryit; - else if (!(jsp = ParseValue(g, i, src, pty))) - goto err; - - break; - }; // endswitch s[i] - - if (!jsp) - sprintf(g->Message, "Invalid Json string '%.*s'", 50, s); - else if (ptyp && pretty == 3) { - *ptyp = 3; // Not recognized pretty - - for (i = 0; i < 3; i++) - if (pty[i]) { - *ptyp = i; - break; - } // endif pty - - } // endif ptyp - - g->jump_level--; - return jsp; + for (i = 0; i < len; i++) + switch (s[i]) { + case '[': + if (jsp) + goto tryit; + else if (!(jsp = ParseArray(g, ++i, src, pty))) + throw 1; + + break; + case '{': + if (jsp) + goto tryit; + else if (!(jsp = ParseObject(g, ++i, src, pty))) + throw 2; + + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (jsp && (pretty == 1 || pretty == 3)) { + if (comma) + *comma = true; + + pty[0] = pty[2] = false; + break; + } // endif pretty + + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b + + default: + if (jsp) + goto tryit; + else if (!(jsp = ParseValue(g, i, src, pty))) + throw 4; + + break; + }; // endswitch s[i] + + if (!jsp) + sprintf(g->Message, "Invalid Json string '%.*s'", 50, s); + else if (ptyp && pretty == 3) { + *ptyp = 3; // Not recognized pretty + + for (i = 0; i < 3; i++) + if (pty[i]) { + *ptyp = i; + break; + } // endif pty + + } // endif ptyp + + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + jsp = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + jsp = NULL; + } // end catch + + return jsp; tryit: if (pty[0] && (!pretty || pretty > 2)) { if ((jsp = ParseArray(g, (i = 0), src, pty)) && ptyp && pretty == 3) *ptyp = (pty[0]) ? 0 : 3; - g->jump_level--; return jsp; } else strcpy(g->Message, "More than one item in file"); -err: - g->jump_level--; - return NULL; + return NULL; } // end of ParseJson /***********************************************************************/ @@ -335,16 +309,16 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty) PJVAL jvp = new(g) JVALUE; for (; i < len; i++) - switch (s[i]) { - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': - case '\t': - break; - default: - goto suite; - } // endswitch + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch suite: switch (s[i]) { @@ -533,7 +507,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src) if (!has_e) goto err; - // passthru + // fall through case '-': if (found_digit) goto err; @@ -585,78 +559,75 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src) PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) { PSZ str = NULL; - bool b = false, err = true; - JOUT *jp; + bool b = false, err = true; + JOUT *jp; FILE *fs = NULL; g->Message[0] = 0; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level])) { - str = NULL; - goto fin; - } // endif jmp - - if (!jsp) { - strcpy(g->Message, "Null json tree"); - goto fin; - } else if (!fn) { - // Serialize to a string - jp = new(g) JOUTSTR(g); - b = pretty == 1; - } else { - if (!(fs = fopen(fn, "wb"))) { - sprintf(g->Message, MSG(OPEN_MODE_ERROR), - "w", (int)errno, fn); - strcat(strcat(g->Message, ": "), strerror(errno)); - goto fin;; - } else if (pretty >= 2) { - // Serialize to a pretty file - jp = new(g)JOUTPRT(g, fs); + try { + if (!jsp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; } else { - // Serialize to a flat file - b = true; - jp = new(g)JOUTFILE(g, fs, pretty); - } // endif's - - } // endif's - - switch (jsp->GetType()) { - case TYPE_JAR: - err = SerializeArray(jp, (PJAR)jsp, b); - break; - case TYPE_JOB: - err = ((b && jp->Prty()) && jp->WriteChr('\t')); - err |= SerializeObject(jp, (PJOB)jsp); - break; - case TYPE_JVAL: - err = SerializeValue(jp, (PJVAL)jsp); - break; - default: - strcpy(g->Message, "Invalid json tree"); - } // endswitch Type + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + switch (jsp->GetType()) { + case TYPE_JAR: + err = SerializeArray(jp, (PJAR)jsp, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= SerializeObject(jp, (PJOB)jsp); + break; + case TYPE_JVAL: + err = SerializeValue(jp, (PJVAL)jsp); + break; + default: + strcpy(g->Message, "Invalid json tree"); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else { + if (!g->Message[0]) + strcpy(g->Message, "Error in Serialize"); - if (fs) { - fputs(EL, fs); - fclose(fs); - str = (err) ? NULL : strcpy(g->Message, "Ok"); - } else if (!err) { - str = ((JOUTSTR*)jp)->Strp; - jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); - } else { - if (!g->Message[0]) - strcpy(g->Message, "Error in Serialize"); + } // endif's - } // endif's + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch -fin: - g->jump_level--; return str; } // end of Serialize @@ -826,7 +797,7 @@ bool JOUTSTR::Escape(const char *s) case '\r': case '\b': case '\f': WriteChr('\\'); - // passthru + // fall through default: WriteChr(s[i]); break; @@ -965,7 +936,7 @@ return false; /***********************************************************************/ /* Add a new pair to an Object. */ /***********************************************************************/ -PJPR JOBJECT::AddPair(PGLOBAL g, PSZ key) +PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { PJPR jpp = new(g) JPAIR(key); @@ -1051,7 +1022,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key) +void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { PJPR jp; @@ -1071,7 +1042,7 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key) /***********************************************************************/ /* Delete a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::DeleteKey(PSZ key) +void JOBJECT::DeleteKey(PCSZ key) { PJPR jp, *pjp = &First; @@ -1250,10 +1221,10 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() /***********************************************************************/ /* Constructor for a given string. */ /***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PSZ strp) : JSON() +JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { Jsp = NULL; - Value = AllocateValue(g, strp, TYPE_STRING); + Value = AllocateValue(g, (void*)strp, TYPE_STRING); Next = NULL; Del = false; } // end of JVALUE constructor @@ -1374,7 +1345,7 @@ void JVALUE::SetTiny(PGLOBAL g, char n) { Value = AllocateValue(g, &n, TYPE_TINY); Jsp = NULL; -} // end of SetInteger +} // end of SetTiny /***********************************************************************/ /* Set the Value's value as the given big integer. */ @@ -1408,6 +1379,6 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c) /***********************************************************************/ bool JVALUE::IsNull(void) { - return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsZero() : true; + return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; } // end of IsNull diff --git a/storage/connect/json.h b/storage/connect/json.h index 4ea169e1b18..49675ce8559 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -125,14 +125,14 @@ class JPAIR : public BLOCK { friend PJOB ParseObject(PGLOBAL, int&, STRG&, bool*); friend bool SerializeObject(JOUT *, PJOB); public: - JPAIR(PSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} + JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} - inline PSZ GetKey(void) {return Key;} + inline PCSZ GetKey(void) {return Key;} inline PJVAL GetVal(void) {return Val;} inline PJPR GetNext(void) {return Next;} protected: - PSZ Key; // This pair key name + PCSZ Key; // This pair key name PJVAL Val; // To the value of the pair PJPR Next; // To the next pair }; // end of class JPAIR @@ -150,7 +150,7 @@ class JSON : public BLOCK { virtual JTYP GetValType(void) {X return TYPE_JSON;} virtual void InitArray(PGLOBAL g) {X} //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} - virtual PJPR AddPair(PGLOBAL g, PSZ key) {X return NULL;} + virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;} virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;} virtual PJVAL GetValue(const char *key) {X return NULL;} virtual PJOB GetObject(void) {return NULL;} @@ -166,13 +166,13 @@ class JSON : public BLOCK { virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;} virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } - virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key) {X} + virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X} virtual void SetValue(PVAL valp) {X} virtual void SetValue(PJSON jsp) {X} virtual void SetString(PGLOBAL g, PSZ s, short c) {X} virtual void SetInteger(PGLOBAL g, int n) {X} virtual void SetFloat(PGLOBAL g, double f) {X} - virtual void DeleteKey(char *k) {X} + virtual void DeleteKey(PCSZ k) {X} virtual bool DeleteValue(int i) {X return true;} virtual bool IsNull(void) {X return true;} @@ -195,14 +195,14 @@ class JOBJECT : public JSON { virtual void Clear(void) {First = Last = NULL; Size = 0;} virtual JTYP GetType(void) {return TYPE_JOB;} virtual PJPR GetFirst(void) {return First;} - virtual PJPR AddPair(PGLOBAL g, PSZ key); + virtual PJPR AddPair(PGLOBAL g, PCSZ key); virtual PJOB GetObject(void) {return this;} virtual PJVAL GetValue(const char* key); virtual PJAR GetKeyList(PGLOBAL g); virtual PSZ GetText(PGLOBAL g, PSZ text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key); - virtual void DeleteKey(char *k); + virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); + virtual void DeleteKey(PCSZ k); virtual bool IsNull(void); protected: @@ -253,7 +253,7 @@ class JVALUE : public JSON { JVALUE(PJSON jsp) : JSON() {Jsp = jsp; Value = NULL; Next = NULL; Del = false;} JVALUE(PGLOBAL g, PVAL valp); - JVALUE(PGLOBAL g, PSZ strp); + JVALUE(PGLOBAL g, PCSZ strp); using JSON::GetValue; using JSON::SetValue; diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 360d0d1a82a..87e818e6108 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1104,7 +1104,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, } // endif g g->Mrr = (args->arg_count && args->args[0]) ? 1 : 0; - g->ActivityStart = (PACTIVITY)more; + g->More = more; initid->maybe_null = mbn; initid->max_length = reslen; initid->ptr = (char*)g; @@ -1448,13 +1448,13 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, } // endif b - ml += (unsigned long)g->ActivityStart; // more + ml += g->More; if (ml > g->Sarea_Size) { free(g->Sarea); if (!(g->Sarea = PlugAllocMem(g, ml))) { - char errmsg[256]; + char errmsg[MAX_STR]; sprintf(errmsg, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); @@ -1495,7 +1495,7 @@ static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) /*********************************************************************************/ /* Make a valid key from the passed argument. */ /*********************************************************************************/ -static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) +static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) { if (args->arg_count > (unsigned)i) { int j = 0, n = args->attribute_lengths[i]; @@ -2253,7 +2253,8 @@ my_bool json_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { - char *key, *str = NULL; + PCSZ key; + char *str = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; if (g->Xchk) { @@ -2358,7 +2359,7 @@ char *json_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif Xchk if (!CheckMemory(g, initid, args, 1, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJSON jsp, top; PJVAL jvp = MakeValue(g, args, 0, &top); @@ -2914,7 +2915,6 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *) { char *p, *path, *str = NULL; - int rc; PJSON jsp; PJSNX jsx; PJVAL jvp; @@ -2922,68 +2922,64 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, if (g->N) { str = (char*)g->Activityp; - goto fin; + goto err; } else if (initid->const_item) g->N = 1; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *is_null = 1; - return NULL; - } // endif jump_level + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else + jvp = MakeValue(g, args, 0); - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - PUSH_WARNING(g->Message); - str = NULL; - goto err; - } // endif rc + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true)) { - PUSH_WARNING("CheckMemory error"); - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); + jsp = (PJSON)g->Xchk; - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr + path = MakePSZ(g, args, 1); + jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); - } else - jsp = (PJSON)g->Xchk; + if (jsx->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } // endif SetJpath - path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); + jsx->ReadValue(g); - if (jsx->SetJpath(g, path)) { - PUSH_WARNING(g->Message); - goto err; - } // endif SetJpath + if (!jsx->GetValue()->IsNull()) + str = jsx->GetValue()->GetCharValue(); - jsx->ReadValue(g); - - if (!jsx->GetValue()->IsNull()) - str = jsx->GetValue()->GetCharValue(); + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)str; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch err: - g->jump_level--; - - fin: if (!str) { *is_null = 1; *res_length = 0; @@ -3254,7 +3250,7 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path = NULL; - int k, rc; + int k; PJVAL jvp, jvp2; PJSON jsp; PJSNX jsx; @@ -3274,61 +3270,58 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - *is_null = 1; - return NULL; - } // endif jump_level + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + jvp = MakeValue(g, args, 0); - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - goto err; - } // endif rc + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, !g->Xchk)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); + jsp = (PJSON)g->Xchk; - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr - - } else - jsp = (PJSON)g->Xchk; + // The item to locate + jvp2 = MakeValue(g, args, 1); - // The item to locate - jvp2 = MakeValue(g, args, 1); + k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; - k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; + jsx = new(g) JSNX(g, jsp, TYPE_STRING); + path = jsx->Locate(g, jsp, jvp2, k); - jsx = new(g) JSNX(g, jsp, TYPE_STRING); - path = jsx->Locate(g, jsp, jvp2, k); + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch err: - g->jump_level--; - if (!path) { *res_length = 0; *is_null = 1; @@ -3379,7 +3372,7 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path = NULL; - int rc, mx = 10; + int mx = 10; PJVAL jvp, jvp2; PJSON jsp; PJSNX jsx; @@ -3400,62 +3393,59 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - *is_null = 1; - return NULL; - } // endif jump_level + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + jvp = MakeValue(g, args, 0); - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - goto err; - } // endif rc + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); - - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr + jsp = (PJSON)g->Xchk; - } else - jsp = (PJSON)g->Xchk; + // The item to locate + jvp2 = MakeValue(g, args, 1); - // The item to locate - jvp2 = MakeValue(g, args, 1); + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; - if (args->arg_count > 2) - mx = (int)*(long long*)args->args[2]; + jsx = new(g) JSNX(g, jsp, TYPE_STRING); + path = jsx->LocateAll(g, jsp, jvp2, mx); - jsx = new(g) JSNX(g, jsp, TYPE_STRING); - path = jsx->LocateAll(g, jsp, jvp2, mx); + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch err: - g->jump_level--; - if (!path) { *res_length = 0; *is_null = 1; @@ -3637,7 +3627,7 @@ static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path, *str = NULL; - int w, rc; + int w; my_bool b = true; PJSON jsp; PJSNX jsx; @@ -3659,78 +3649,73 @@ static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, w = 2; else { PUSH_WARNING("Logical error, please contact CONNECT developer"); - goto err; + goto fin; } // endelse - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - goto fin; - } // endif jump_level + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + PUSH_WARNING("CheckMemory error"); + throw 1; + } else + jvp = MakeValue(g, args, 0); - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - PUSH_WARNING(g->Message); - str = NULL; - goto err; - } // endif rc + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + throw 2; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true, false, true)) { - PUSH_WARNING("CheckMemory error"); - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); + jsp = (PJSON)g->Xchk; - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr + jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); - } else - jsp = (PJSON)g->Xchk; - - jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); - - for (uint i = 1; i+1 < args->arg_count; i += 2) { - jvp = MakeValue(gb, args, i); - path = MakePSZ(g, args, i+1); + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = MakeValue(gb, args, i); + path = MakePSZ(g, args, i + 1); - if (jsx->SetJpath(g, path, false)) { - PUSH_WARNING(g->Message); - continue; - } // endif SetJpath + if (jsx->SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath - if (w) { - jsx->ReadValue(g); - b = jsx->GetValue()->IsNull(); - b = (w == 1) ? b : !b; - } // endif w + if (w) { + jsx->ReadValue(g); + b = jsx->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w - if (b && jsx->WriteValue(gb, jvp)) - PUSH_WARNING(g->Message); + if (b && jsx->WriteValue(gb, jvp)) + PUSH_WARNING(g->Message); - } // endfor i + } // endfor i - // In case of error or file, return unchanged argument - if (!(str = MakeResult(g, args, jsp, INT_MAX32))) - str = MakePSZ(g, args, 0); + // In case of error or file, return unchanged argument + if (!(str = MakeResult(g, args, jsp, INT_MAX32))) + str = MakePSZ(g, args, 0); - if (g->N) - // Keep result of constant function - g->Activityp = (PACTIVITY)str; + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; -err: - g->jump_level--; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch fin: if (!str) { @@ -4557,7 +4542,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif bsp if (!CheckMemory(g, initid, args, 2, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJVAL jvp = MakeValue(g, args, 0, &top); PJSON jsp = jvp->GetJson(); @@ -4637,7 +4622,7 @@ char *jbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif bsp if (!CheckMemory(g, initid, args, 1, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJVAL jvp = MakeValue(g, args, 0, &top); PJSON jsp = jvp->GetJson(); diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index d2890421c62..5f4b98a0652 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -232,7 +232,7 @@ extern "C" { /*********************************************************************************/ typedef struct _jpn { enum JTYP Type; - PSZ Key; + PCSZ Key; int N; } JPN, *PJPN; diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp index 2470d37c353..700d247da38 100644 --- a/storage/connect/libdoc.cpp +++ b/storage/connect/libdoc.cpp @@ -68,9 +68,9 @@ class LIBXMLDOC : public XMLDOCUMENT { virtual void SetNofree(bool b) {Nofreelist = b;} // Methods - virtual bool Initialize(PGLOBAL g, char *entry, bool zipped); + virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped); virtual bool ParseFile(PGLOBAL g, char *fn); - virtual bool NewDoc(PGLOBAL g, char *ver); + virtual bool NewDoc(PGLOBAL g, PCSZ ver); virtual void AddComment(PGLOBAL g, char *com); virtual PXNODE GetRoot(PGLOBAL g); virtual PXNODE NewRoot(PGLOBAL g, char *name); @@ -119,9 +119,9 @@ class XML2NODE : public XMLNODE { virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp); virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np); virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap); - virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np); + virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np); virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap); - virtual void AddText(PGLOBAL g, char *txtp); + virtual void AddText(PGLOBAL g, PCSZ txtp); virtual void DeleteChild(PGLOBAL g, PXNODE dnp); protected: @@ -373,7 +373,7 @@ LIBXMLDOC::LIBXMLDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp) /******************************************************************/ /* Initialize XML parser and check library compatibility. */ /******************************************************************/ -bool LIBXMLDOC::Initialize(PGLOBAL g, char *entry, bool zipped) +bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) { if (zipped && InitZip(g, entry)) return true; @@ -434,7 +434,7 @@ PFBLOCK LIBXMLDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn) /******************************************************************/ /* Construct and add the XML processing instruction node. */ /******************************************************************/ -bool LIBXMLDOC::NewDoc(PGLOBAL g, char *ver) +bool LIBXMLDOC::NewDoc(PGLOBAL g, PCSZ ver) { if (trace) htrc("NewDoc\n"); @@ -863,14 +863,13 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) xmlFree(Content); if ((Content = xmlNodeGetContent(Nodep))) { - char *extra = " \t\r\n"; char *p1 = (char*)Content, *p2 = buf; bool b = false; // Copy content eliminating extra characters for (; *p1; p1++) if ((p2 - buf) < len) { - if (strchr(extra, *p1)) { + if (strchr(" \t\r\n", *p1)) { if (b) { // This to have one blank between sub-nodes *p2++ = ' '; @@ -1020,19 +1019,19 @@ PXATTR XML2NODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new child node to this node and return it. */ /******************************************************************/ -PXNODE XML2NODE::AddChildNode(PGLOBAL g, char *name, PXNODE np) +PXNODE XML2NODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) { - char *p, *pn, *pf = NULL; + char *p, *pn, *pf = NULL, *nmp = PlugDup(g, name); if (trace) htrc("AddChildNode: %s\n", name); // Is a prefix specified - if ((pn = strchr(name, ':'))) { - pf = name; + if ((pn = strchr(nmp, ':'))) { + pf = nmp; *pn++ = '\0'; // Separate name from prefix } else - pn = name; + pn = nmp; // If name has the format m[n] only m is taken as node name if ((p = strchr(pn, '['))) @@ -1096,7 +1095,7 @@ PXATTR XML2NODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new text node to this node. */ /******************************************************************/ -void XML2NODE::AddText(PGLOBAL g, char *txtp) +void XML2NODE::AddText(PGLOBAL g, PCSZ txtp) { if (trace) htrc("AddText: %s\n", txtp); diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp index f5d3bb11fe9..b9600bdac2e 100644 --- a/storage/connect/macutil.cpp +++ b/storage/connect/macutil.cpp @@ -192,7 +192,7 @@ bool MACINFO::GetOneInfo(PGLOBAL g, int flag, void *v, int lv) case 23: break; default: - p = ""; + p = PlugDup(g, ""); } // endswitch flag } else switch (flag) { diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 1fcd8ac78da..750cf3c0639 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -161,7 +161,7 @@ TABTYPE GetTypeID(const char *type) #ifdef ZIP_SUPPORT : (!stricmp(type, "ZIP")) ? TAB_ZIP #endif - : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; + : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; } // end of GetTypeID /***********************************************************************/ @@ -477,39 +477,6 @@ void MYCAT::Reset(void) { } // end of Reset -#if 0 -/***********************************************************************/ -/* This function sets the current database path. */ -/***********************************************************************/ -void MYCAT::SetPath(PGLOBAL g, LPCSTR *datapath, const char *path) - { - if (path) { - size_t len= strlen(path) + (*path != '.' ? 4 : 1); - char *buf= (char*)PlugSubAlloc(g, NULL, len); - - if (PlugIsAbsolutePath(path)) - { - strcpy(buf, path); - *datapath= buf; - return; - } - - if (*path != '.') { -#if defined(__WIN__) - char *s= "\\"; -#else // !__WIN__ - char *s= "/"; -#endif // !__WIN__ - strcat(strcat(strcat(strcpy(buf, "."), s), path), s); - } else - strcpy(buf, path); - - *datapath= buf; - } // endif path - - } // end of SetDataPath -#endif // 0 - /***********************************************************************/ /* GetTableDesc: retrieve a table descriptor. */ /* Look for a table descriptor matching the name and type. */ diff --git a/storage/connect/mycat.h b/storage/connect/mycat.h index a3682b31f17..b6bdd5e5e11 100644 --- a/storage/connect/mycat.h +++ b/storage/connect/mycat.h @@ -98,10 +98,7 @@ class MYCAT : public CATALOG { // Methods void Reset(void); -//void SetDataPath(PGLOBAL g, const char *path) -// {SetPath(g, &DataPath, path);} bool StoreIndex(PGLOBAL, PTABDEF) {return false;} // Temporary -// PRELDEF GetTableDesc(PGLOBAL g, LPCSTR name, PRELDEF GetTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR type, PRELDEF *prp = NULL); PTDB GetTable(PGLOBAL g, PTABLE tablep, @@ -109,9 +106,7 @@ class MYCAT : public CATALOG { void ClearDB(PGLOBAL g); protected: -// PRELDEF MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am); PRELDEF MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am); - //void SetPath(PGLOBAL g, LPCSTR *datapath, const char *path); // Members ha_connect *Hc; // The Connect handler diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index d05254a32a6..e68489faad5 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -135,10 +135,13 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, FLD_KEY, FLD_SCALE, FLD_RADIX, FLD_NULL, FLD_REM, FLD_NO, FLD_DEFAULT, FLD_EXTRA, FLD_CHARSET}; - unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; - char *fld, *colname, *chset, *fmt, v, buf[128], uns[16], zero[16]; + //unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; + unsigned int length[] = {0, 4, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; + PCSZ fmt; + char *fld, *colname, *chset, v, buf[128], uns[16], zero[16]; int i, n, nf, ncol = sizeof(buftyp) / sizeof(int); int len, type, prec, rc, k = 0; + bool b; PQRYRES qrp; PCOLRES crp; MYSQLC myc; @@ -157,7 +160,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, /* Do an evaluation of the result size. */ /********************************************************************/ STRING cmd(g, 64, "SHOW FULL COLUMNS FROM "); - bool b = cmd.Append((PSZ)table); + b = cmd.Append((PSZ)table); b |= cmd.Append(" FROM "); b |= cmd.Append((PSZ)(db ? db : PlgGetUser(g)->DBName)); @@ -232,11 +235,31 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, fld = myc.GetCharField(1); prec = 0; len = 0; - v = (chset && !strcmp(chset, "binary")) ? 'B' : 0; +// v = (chset && !strcmp(chset, "binary")) ? 'B' : 0; + v = 0; *uns = 0; *zero = 0; - - switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) { + b = false; + + if (!strnicmp(fld, "enum", 4)) { + char *p2, *p1 = fld + 6; // to skip enum(' + + while (true) { + p2 = strchr(p1, '\''); + len = MY_MAX(len, p2 - p1); + if (*++p2 != ',') break; + p1 = p2 + 2; + } // endwhile + + v = (len > 255) ? 'V' : 0; + strcpy(buf, "enum"); + b = true; + } else if (!strnicmp(fld, "set", 3)) { + len = (int)strlen(fld) - 2; + v = 'V'; + strcpy(buf, "set"); + b = true; + } else switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) { case 3: nf = sscanf(fld, "%[^(](%d,%d) %s %s", buf, &len, &prec, uns, zero); break; @@ -271,7 +294,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, colname, len); PushWarning(g, thd); v = 'V'; - } else + } else len = MY_MIN(len, 4096); } // endif type @@ -286,6 +309,9 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, default: crp->Nulls[i] = v; break; } // endswitch nf + if (b) // enum or set + nf = sscanf(fld, "%s ", buf); // get values + crp = crp->Next; // Type_Name crp->Kdata->SetValue(buf, i); @@ -849,7 +875,8 @@ MYSQL_FIELD *MYSQLC::GetNextField(void) /***********************************************************************/ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) { - char *fmt, v; + PCSZ fmt; + char *name, v; int n; bool uns; PCOLRES *pcrp, crp; @@ -887,8 +914,9 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) memset(crp, 0, sizeof(COLRES)); crp->Ncol = ++qrp->Nbcol; - crp->Name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1); - strcpy(crp->Name, fld->name); + name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1); + strcpy(name, fld->name); + crp->Name = name; if ((crp->Type = MYSQLtoPLG(fld->type, &v)) == TYPE_ERROR) { sprintf(g->Message, "Type %d not supported for column %s", diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def index 0e5a5fc64e3..64d7ece3fe1 100644 --- a/storage/connect/mysql-test/connect/disabled.def +++ b/storage/connect/mysql-test/connect/disabled.def @@ -9,8 +9,8 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -#jdbc : Variable settings depend on machine configuration -#jdbc_new : Variable settings depend on machine configuration +jdbc : Variable settings depend on machine configuration +jdbc_new : Variable settings depend on machine configuration jdbc_oracle : Variable settings depend on machine configuration jdbc_postgresql : Variable settings depend on machine configuration json : TABLE_TYPE = JSON conflicts with the SQL syntax diff --git a/storage/connect/mysql-test/connect/r/jdbc_new.result b/storage/connect/mysql-test/connect/r/jdbc_new.result index 5cc4826213d..6f977166598 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_new.result +++ b/storage/connect/mysql-test/connect/r/jdbc_new.result @@ -14,9 +14,7 @@ NULL NULL SET GLOBAL time_zone='+1:00'; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=unknown'; -SELECT * FROM t1; -ERROR HY000: Got error 174 'Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2' from CONNECT -DROP TABLE t1; +ERROR HY000: Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2 CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/unknown?user=root'; ERROR HY000: Connecting: java.sql.SQLSyntaxErrorException: Unknown database 'unknown' rc=-2 diff --git a/storage/connect/mysql-test/connect/t/jdbc_new.test b/storage/connect/mysql-test/connect/t/jdbc_new.test index 5586cf8c027..86c4ad57c5f 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_new.test +++ b/storage/connect/mysql-test/connect/t/jdbc_new.test @@ -24,11 +24,9 @@ SET GLOBAL time_zone='+1:00'; # Bad user name # Suppress "mysql_real_connect failed:" (printed in _DEBUG build) --replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " "" +--error ER_UNKNOWN_ERROR eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=unknown'; ---error ER_GET_ERRMSG -SELECT * FROM t1; -DROP TABLE t1; # Bad database name --replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " "" diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index d4416e188c8..c2053f1c832 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -42,7 +42,8 @@ int MYSQLtoPLG(char *typname, char *var) type = TYPE_INT; else if (!stricmp(typname, "smallint")) type = TYPE_SHORT; - else if (!stricmp(typname, "char") || !stricmp(typname, "varchar")) + else if (!stricmp(typname, "char") || !stricmp(typname, "varchar") || + !stricmp(typname, "enum") || !stricmp(typname, "set")) type = TYPE_STRING; else if (!stricmp(typname, "double") || !stricmp(typname, "float") || !stricmp(typname, "real")) @@ -87,10 +88,12 @@ int MYSQLtoPLG(char *typname, char *var) else if (!stricmp(typname, "year")) *var = 'Y'; - } else if (type == TYPE_STRING && !stricmp(typname, "varchar")) - // This is to make the difference between CHAR and VARCHAR - *var = 'V'; - else if (type == TYPE_ERROR && xconv == TPC_SKIP) + } else if (type == TYPE_STRING) { + if (!stricmp(typname, "varchar")) + // This is to make the difference between CHAR and VARCHAR + *var = 'V'; + + } else if (type == TYPE_ERROR && xconv == TPC_SKIP) *var = 'K'; else *var = 0; @@ -266,9 +269,9 @@ int MYSQLtoPLG(int mytype, char *var) /************************************************************************/ /* Returns the format corresponding to a MySQL date type number. */ /************************************************************************/ -char *MyDateFmt(int mytype) +PCSZ MyDateFmt(int mytype) { - char *fmt; + PCSZ fmt; switch (mytype) { case MYSQL_TYPE_TIMESTAMP: @@ -294,9 +297,9 @@ char *MyDateFmt(int mytype) /************************************************************************/ /* Returns the format corresponding to a MySQL date type name. */ /************************************************************************/ -char *MyDateFmt(char *typname) +PCSZ MyDateFmt(char *typname) { - char *fmt; + PCSZ fmt; if (!stricmp(typname, "datetime") || !stricmp(typname, "timestamp")) fmt = "YYYY-MM-DD hh:mm:ss"; diff --git a/storage/connect/myutil.h b/storage/connect/myutil.h index 9c22cfef118..6991172b39e 100644 --- a/storage/connect/myutil.h +++ b/storage/connect/myutil.h @@ -6,9 +6,9 @@ enum enum_field_types PLGtoMYSQL(int type, bool dbf, char var = 0); const char *PLGtoMYSQLtype(int type, bool dbf, char var = 0); -int MYSQLtoPLG(char *typname, char *var = NULL); -int MYSQLtoPLG(int mytype, char *var = NULL); -char *MyDateFmt(int mytype); -char *MyDateFmt(char *typname); +int MYSQLtoPLG(char *typname, char *var = NULL); +int MYSQLtoPLG(int mytype, char *var = NULL); +PCSZ MyDateFmt(int mytype); +PCSZ MyDateFmt(char *typname); #endif // __MYUTIL__H diff --git a/storage/connect/odbccat.h b/storage/connect/odbccat.h index 3b729bcb4bb..05b82e49727 100644 --- a/storage/connect/odbccat.h +++ b/storage/connect/odbccat.h @@ -3,11 +3,11 @@ #define DEFAULT_QUERY_TIMEOUT -1 // means do not set typedef struct odbc_parms { - char *User; // User connect info - char *Pwd; // Password connect info - int Cto; // Connect timeout - int Qto; // Query timeout - bool UseCnc; // Use SQLConnect (!SQLDriverConnect) + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info + int Cto; // Connect timeout + int Qto; // Query timeout + bool UseCnc; // Use SQLConnect (!SQLDriverConnect) } ODBCPARM, *POPARM; /***********************************************************************/ @@ -17,9 +17,9 @@ typedef struct odbc_parms { char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop); #endif // PROMPT_OK PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info); -PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, - char *colpat, int maxres, bool info, POPARM sop); +PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, POPARM sop); PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop); -PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, POPARM sop); +PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, + PCSZ tabtyp, int maxres, bool info, POPARM sop); PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info); diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 433e392eace..3dbc2d577d5 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -1,7 +1,7 @@ -/************ Odbconn C++ Functions Source Code File (.CPP) ************/ -/* Name: ODBCONN.CPP Version 2.2 */ +/***********************************************************************/ +/* Name: ODBCONN.CPP Version 2.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains the ODBC connection classes functions. */ /***********************************************************************/ @@ -239,47 +239,43 @@ char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop) /***********************************************************************/ /* Allocate the structure used to refer to the result set. */ /***********************************************************************/ -static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, char *db, - char *tab, PQRYRES qrp) - { - size_t i, m, n; - CATPARM *cap; +static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, PCSZ db, + PCSZ tab, PQRYRES qrp) +{ + size_t i, m, n; + CATPARM *cap; #if defined(_DEBUG) - assert(qrp); + assert(qrp); #endif - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - cap = NULL; - goto fin; - } // endif rc - - m = (size_t)qrp->Maxres; - n = (size_t)qrp->Nbcol; - cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM)); - memset(cap, 0, sizeof(CATPARM)); - cap->Id = fid; - cap->Qrp = qrp; - cap->DB = (PUCHAR)db; - cap->Tab = (PUCHAR)tab; - cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *)); - - for (i = 0; i < n; i++) - cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN)); - - cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); - - fin: - g->jump_level--; - return cap; - } // end of AllocCatInfo + try { + m = (size_t)qrp->Maxres; + n = (size_t)qrp->Nbcol; + cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM)); + memset(cap, 0, sizeof(CATPARM)); + cap->Id = fid; + cap->Qrp = qrp; + cap->DB = db; + cap->Tab = tab; + cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *)); + + for (i = 0; i < n; i++) + cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN)); + + cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); + + } catch (int n) { + htrc("Exeption %d: %s\n", n, g->Message); + cap = NULL; + } catch (const char *msg) { + htrc(g->Message, msg); + printf("%s\n", g->Message); + cap = NULL; + } // end catch + + return cap; +} // end of AllocCatInfo #if 0 /***********************************************************************/ @@ -309,8 +305,8 @@ static void ResetNullValues(CATPARM *cap) /* ODBCColumns: constructs the result blocks containing all columns */ /* of an ODBC table that will be retrieved by GetData commands. */ /***********************************************************************/ -PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, - char *colpat, int maxres, bool info, POPARM sop) +PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, POPARM sop) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, @@ -383,7 +379,7 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, if (!(cap = AllocCatInfo(g, CAT_COL, db, table, qrp))) return NULL; - cap->Pat = (PUCHAR)colpat; + cap->Pat = colpat; /************************************************************************/ /* Now get the results into blocks. */ @@ -618,8 +614,8 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info) /* ODBCTables: constructs the result blocks containing all tables in */ /* an ODBC database that will be retrieved by GetData commands. */ /**************************************************************************/ -PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, POPARM sop) +PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp, + int maxres, bool info, POPARM sop) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING}; @@ -681,7 +677,7 @@ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, if (!(cap = AllocCatInfo(g, CAT_TAB, db, tabpat, qrp))) return NULL; - cap->Pat = (PUCHAR)tabtyp; + cap->Pat = tabtyp; if (trace) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); @@ -879,7 +875,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat, /***********************************************************************/ /* Implementation of DBX class. */ /***********************************************************************/ -DBX::DBX(RETCODE rc, PSZ msg) +DBX::DBX(RETCODE rc, PCSZ msg) { m_RC = rc; m_Msg = msg; @@ -1020,7 +1016,7 @@ bool ODBConn::Check(RETCODE rc) /***********************************************************************/ /* DB exception throw routines. */ /***********************************************************************/ -void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt) +void ODBConn::ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt) { DBX* xp = new(m_G) DBX(rc, msg); @@ -1030,7 +1026,7 @@ void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt) } // end of ThrowDBX -void ODBConn::ThrowDBX(PSZ msg) +void ODBConn::ThrowDBX(PCSZ msg) { DBX* xp = new(m_G) DBX(0, "Error"); @@ -1110,7 +1106,7 @@ void ODBConn::OnSetOptions(HSTMT hstmt) /***********************************************************************/ /* Open: connect to a data source. */ /***********************************************************************/ -int ODBConn::Open(PSZ ConnectString, POPARM sop, DWORD options) +int ODBConn::Open(PCSZ ConnectString, POPARM sop, DWORD options) { PGLOBAL& g = m_G; //ASSERT_VALID(this); @@ -1192,7 +1188,7 @@ void ODBConn::AllocConnect(DWORD Options) #if defined(_DEBUG) if (Options & traceSQL) { - SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (DWORD)"xodbc.out"); + SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (SQLULEN)"xodbc.out"); SQLSetConnectOption(m_hdbc, SQL_OPT_TRACE, 1); } // endif #endif // _DEBUG @@ -1215,7 +1211,7 @@ void ODBConn::AllocConnect(DWORD Options) // Turn on cursor lib support if (Options & useCursorLib) - rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_ODBC); + rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_DRIVER); return; } // end of AllocConnect @@ -1921,7 +1917,7 @@ bool ODBConn::ExecSQLcommand(char *sql) /* GetMetaData: constructs the result blocks containing the */ /* description of all the columns of an SQL command. */ /**************************************************************************/ -PQRYRES ODBConn::GetMetaData(PGLOBAL g, char *dsn, char *src) +PQRYRES ODBConn::GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_INT, TYPE_SHORT, TYPE_SHORT}; @@ -2244,7 +2240,7 @@ int ODBConn::GetCatInfo(CATPARM *cap) void *buffer; int i, irc; bool b; - PSZ fnc = "Unknown"; + PCSZ fnc = "Unknown"; UWORD n; SWORD ncol, len, tp; SQLULEN crow = 0; @@ -2283,22 +2279,20 @@ int ODBConn::GetCatInfo(CATPARM *cap) // Now do call the proper ODBC API switch (cap->Id) { case CAT_TAB: -// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID, -// (SQLPOINTER)false, 0); fnc = "SQLTables"; rc = SQLTables(hstmt, name.ptr(2), name.length(2), name.ptr(1), name.length(1), name.ptr(0), name.length(0), - cap->Pat, cap->Pat ? SQL_NTS : 0); + (SQLCHAR *)cap->Pat, + cap->Pat ? SQL_NTS : 0); break; case CAT_COL: -// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID, -// (SQLPOINTER)true, 0); fnc = "SQLColumns"; rc = SQLColumns(hstmt, name.ptr(2), name.length(2), name.ptr(1), name.length(1), name.ptr(0), name.length(0), - cap->Pat, cap->Pat ? SQL_NTS : 0); + (SQLCHAR *)cap->Pat, + cap->Pat ? SQL_NTS : 0); break; case CAT_KEY: fnc = "SQLPrimaryKeys"; diff --git a/storage/connect/odbconn.h b/storage/connect/odbconn.h index 063985218ec..5abb8354160 100644 --- a/storage/connect/odbconn.h +++ b/storage/connect/odbconn.h @@ -54,9 +54,9 @@ enum CATINFO {CAT_TAB = 1, /* SQLTables */ typedef struct tagCATPARM { CATINFO Id; // Id to indicate function PQRYRES Qrp; // Result set pointer - PUCHAR DB; // Database (Schema) - PUCHAR Tab; // Table name or pattern - PUCHAR Pat; // Table type or column pattern + PCSZ DB; // Database (Schema) + PCSZ Tab; // Table name or pattern + PCSZ Pat; // Table type or column pattern SQLLEN* *Vlen; // To array of indicator values UWORD *Status; // To status block // For SQLStatistics @@ -80,23 +80,23 @@ class DBX : public BLOCK { friend class ODBConn; // Construction (by ThrowDBX only) -- destruction protected: - DBX(RETCODE rc, PSZ msg = NULL); + DBX(RETCODE rc, PCSZ msg = NULL); public: //virtual ~DBX() {} //void operator delete(void*, PGLOBAL, void*) {}; // Implementation (use ThrowDBX to create) RETCODE GetRC(void) {return m_RC;} - PSZ GetMsg(void) {return m_Msg;} - const char *GetErrorMessage(int i); + PCSZ GetMsg(void) {return m_Msg;} + PCSZ GetErrorMessage(int i); protected: bool BuildErrorMessage(ODBConn* pdb, HSTMT hstmt = SQL_NULL_HSTMT); // Attributes RETCODE m_RC; - PSZ m_Msg; - PSZ m_ErrMsg[MAX_NUM_OF_MSG]; + PCSZ m_Msg; + PCSZ m_ErrMsg[MAX_NUM_OF_MSG]; }; // end of DBX class definition /***********************************************************************/ @@ -119,7 +119,7 @@ class ODBConn : public BLOCK { noOdbcDialog = 0x0008, // Don't display ODBC Connect dialog forceOdbcDialog = 0x0010}; // Always display ODBC connect dialog - int Open(PSZ ConnectString, POPARM sop, DWORD Options = 0); + int Open(PCSZ ConnectString, POPARM sop, DWORD Options = 0); int Rewind(char *sql, ODBCCOL *tocols); void Close(void); PQRYRES AllocateResult(PGLOBAL g); @@ -131,7 +131,7 @@ class ODBConn : public BLOCK { bool IsOpen(void) {return m_hdbc != SQL_NULL_HDBC;} PSZ GetStringInfo(ushort infotype); int GetMaxValue(ushort infotype); - PSZ GetConnect(void) {return m_Connect;} + PCSZ GetConnect(void) {return m_Connect;} public: // Operations @@ -149,7 +149,7 @@ class ODBConn : public BLOCK { int GetCatInfo(CATPARM *cap); bool GetDataSources(PQRYRES qrp); bool GetDrivers(PQRYRES qrp); - PQRYRES GetMetaData(PGLOBAL g, char *dsn, char *src); + PQRYRES GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src); public: // Set special options @@ -162,8 +162,8 @@ class ODBConn : public BLOCK { // ODBC operations protected: bool Check(RETCODE rc); - void ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt = SQL_NULL_HSTMT); - void ThrowDBX(PSZ msg); + void ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt = SQL_NULL_HSTMT); + void ThrowDBX(PCSZ msg); void AllocConnect(DWORD dwOptions); void Connect(void); bool DriverConnect(DWORD Options); @@ -187,9 +187,9 @@ class ODBConn : public BLOCK { DWORD m_UpdateOptions; DWORD m_RowsetSize; char m_IDQuoteChar[2]; - PSZ m_Connect; - PSZ m_User; - PSZ m_Pwd; + PCSZ m_Connect; + PCSZ m_User; + PCSZ m_Pwd; int m_Catver; int m_Rows; int m_Fetch; diff --git a/storage/connect/os.h b/storage/connect/os.h index 2dc603fdcda..8056a272990 100644 --- a/storage/connect/os.h +++ b/storage/connect/os.h @@ -17,13 +17,16 @@ typedef off_t off64_t; #if defined(__WIN__) typedef __int64 BIGINT; +typedef _Null_terminated_ const char *PCSZ; #else // !__WIN__ typedef longlong BIGINT; #define FILE_BEGIN SEEK_SET #define FILE_CURRENT SEEK_CUR #define FILE_END SEEK_END +typedef const char *PCSZ; #endif // !__WIN__ + #if !defined(__WIN__) typedef const void *LPCVOID; typedef const char *LPCTSTR; diff --git a/storage/connect/osutil.c b/storage/connect/osutil.c index 2e9e120b0c8..66743c7403b 100644 --- a/storage/connect/osutil.c +++ b/storage/connect/osutil.c @@ -43,34 +43,6 @@ PSZ strlwr(PSZ p) return (p); } /* end of strlwr */ -#if defined(NOT_USED) /*&& !defined(sun) && !defined(LINUX) && !defined(AIX)*/ -/***********************************************************************/ -/* Define stricmp function not existing in some UNIX libraries. */ -/***********************************************************************/ -int stricmp(char *str1, char *str2) - { - register int i; - int n; - char c; - char *sup1 = malloc(strlen(str1) + 1); - char *sup2 = malloc(strlen(str2) + 1); - - for (i = 0; c = str1[i]; i++) - sup1[i] = toupper(c); - - sup1[i] = 0; - - for (i = 0; c = str2[i]; i++) - sup2[i] = toupper(c); - - sup2[i] = 0; - n = strcmp(sup1, sup2); - free(sup1); - free(sup2); - return (n); - } /* end of stricmp */ -#endif /* sun */ - /***********************************************************************/ /* Define the splitpath function not existing in the UNIX library. */ /***********************************************************************/ @@ -143,13 +115,6 @@ my_bool CloseFileHandle(HANDLE h) return (close(h)) ? TRUE : FALSE; } /* end of CloseFileHandle */ -#if 0 -void Sleep(DWORD time) - { - //FIXME: TODO - } /* end of Sleep */ -#endif - int GetLastError() { return errno; @@ -210,21 +175,4 @@ BOOL MessageBeep(uint i __attribute__((unused))) return TRUE; } /* end of MessageBeep */ -#if 0 -/* This function is ridiculous and should be revisited */ -DWORD FormatMessage(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId, - DWORD dwLanguageId, LPSTR lpBuffer, DWORD nSize, ...) - { - char buff[32]; - int n; - -//if (dwFlags & FORMAT_MESSAGE_ALLOCATE_BUFFER) -// return 0; /* means error */ - - n = sprintf(buff, "Error code: %d", (int) dwMessageId); - strncpy(lpBuffer, buff, nSize); - return min(n, nSize); - } /* end of FormatMessage */ -#endif - #endif // UNIX diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 800b1098d50..2198c44c200 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -80,7 +80,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */ TAB_DMY = 25, /* DMY Dummy tables NIY */ TAB_JDBC = 26, /* Table accessed via JDBC */ TAB_ZIP = 27, /* ZIP file info table */ - TAB_NIY = 28}; /* Table not implemented yet */ +// TAB_MONGO = 28, /* Table retrieved from MongoDB */ + TAB_NIY = 30}; /* Table not implemented yet */ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_ROWID = 1, /* ROWID type (special column) */ @@ -143,7 +144,8 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_MYX = 193, /* MYSQL EXEC access method type */ TYPE_AM_CAT = 195, /* Catalog access method type no */ TYPE_AM_ZIP = 198, /* ZIP access method type no */ - TYPE_AM_OUT = 200}; /* Output relations (storage) */ + TYPE_AM_MGO = 199, /* MGO access method type no */ + TYPE_AM_OUT = 200}; /* Output relations (storage) */ enum RECFM {RECFM_NAF = -2, /* Not a file */ RECFM_OEM = -1, /* OEM file access method */ @@ -553,7 +555,7 @@ typedef struct _qryres { typedef struct _colres { PCOLRES Next; /* To next result column */ PCOL Colp; /* To matching column block */ - PSZ Name; /* Column header */ + PCSZ Name; /* Column header */ PVBLK Kdata; /* Column block of values */ char *Nulls; /* Column null value array */ int Type; /* Internal type */ @@ -583,7 +585,7 @@ void PlugLineDB(PGLOBAL, PSZ, short, void *, uint); char *SetPath(PGLOBAL g, const char *path); char *ExtractFromPath(PGLOBAL, char *, char *, OPVAL); void AddPointer(PTABS, void *); -PDTP MakeDateFormat(PGLOBAL, PSZ, bool, bool, int); +PDTP MakeDateFormat(PGLOBAL, PCSZ, bool, bool, int); int ExtractDate(char *, PDTP, int, int val[6]); /**************************************************************************/ @@ -615,11 +617,10 @@ DllExport void *PlgDBrealloc(PGLOBAL, void *, MBLOCK&, size_t); DllExport void NewPointer(PTABS, void *, void *); //lExport char *GetIni(int n= 0); // Not used anymore DllExport void SetTrc(void); -DllExport char *GetListOption(PGLOBAL, const char *, const char *, - const char *def=NULL); -DllExport char *GetStringTableOption(PGLOBAL, PTOS, char *, char *); -DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, char *, bool); -DllExport int GetIntegerTableOption(PGLOBAL, PTOS, char *, int); +DllExport PCSZ GetListOption(PGLOBAL, PCSZ, PCSZ, PCSZ def=NULL); +DllExport PCSZ GetStringTableOption(PGLOBAL, PTOS, PCSZ, PCSZ); +DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, PCSZ, bool); +DllExport int GetIntegerTableOption(PGLOBAL, PTOS, PCSZ, int); #define MSGID_NONE 0 #define MSGID_CANNOT_OPEN 1 diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 1910cdcdec8..b6f59bac8cf 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -238,88 +238,84 @@ void ptrc(char const *fmt, ...) PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, int *buftyp, XFLD *fldtyp, unsigned int *length, bool blank, bool nonull) - { +{ char cname[NAM_LEN+1]; int i; PCOLRES *pcrp, crp; PQRYRES qrp; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - qrp = NULL; - goto fin; - } // endif rc - - /************************************************************************/ - /* Allocate the structure used to contain the result set. */ - /************************************************************************/ - qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES)); - pcrp = &qrp->Colresp; - qrp->Continued = false; - qrp->Truncated = false; - qrp->Info = false; - qrp->Suball = true; - qrp->Maxres = maxres; - qrp->Maxsize = 0; - qrp->Nblin = 0; - qrp->Nbcol = 0; // will be ncol - qrp->Cursor = 0; - qrp->BadLines = 0; - - for (i = 0; i < ncol; i++) { - *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); - crp = *pcrp; - pcrp = &crp->Next; - memset(crp, 0, sizeof(COLRES)); - crp->Colp = NULL; - crp->Ncol = ++qrp->Nbcol; - crp->Type = buftyp[i]; - crp->Length = length[i]; - crp->Clen = GetTypeSize(crp->Type, length[i]); - crp->Prec = 0; - - if (ids > 0) { + try { + /**********************************************************************/ + /* Allocate the structure used to contain the result set. */ + /**********************************************************************/ + qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES)); + pcrp = &qrp->Colresp; + qrp->Continued = false; + qrp->Truncated = false; + qrp->Info = false; + qrp->Suball = true; + qrp->Maxres = maxres; + qrp->Maxsize = 0; + qrp->Nblin = 0; + qrp->Nbcol = 0; // will be ncol + qrp->Cursor = 0; + qrp->BadLines = 0; + + for (i = 0; i < ncol; i++) { + *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); + crp = *pcrp; + pcrp = &crp->Next; + memset(crp, 0, sizeof(COLRES)); + crp->Colp = NULL; + crp->Ncol = ++qrp->Nbcol; + crp->Type = buftyp[i]; + crp->Length = length[i]; + crp->Clen = GetTypeSize(crp->Type, length[i]); + crp->Prec = 0; + + if (ids > 0) { #if defined(XMSG) - // Get header from message file - strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN); - cname[NAM_LEN] = 0; // for truncated long names + // Get header from message file + strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN); + cname[NAM_LEN] = 0; // for truncated long names #else // !XMSG - GetRcString(ids + crp->Ncol, cname, sizeof(cname)); + GetRcString(ids + crp->Ncol, cname, sizeof(cname)); #endif // !XMSG - crp->Name = (PSZ)PlugDup(g, cname); - } else - crp->Name = NULL; // Will be set by caller + crp->Name = (PSZ)PlugDup(g, cname); + } else + crp->Name = NULL; // Will be set by caller - if (fldtyp) - crp->Fld = fldtyp[i]; - else - crp->Fld = FLD_NO; + if (fldtyp) + crp->Fld = fldtyp[i]; + else + crp->Fld = FLD_NO; - // Allocate the Value Block that will contain data - if (crp->Length || nonull) - crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres, - crp->Length, 0, true, blank, false); - else - crp->Kdata = NULL; + // Allocate the Value Block that will contain data + if (crp->Length || nonull) + crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres, + crp->Length, 0, true, blank, false); + else + crp->Kdata = NULL; - if (trace) - htrc("Column(%d) %s type=%d len=%d value=%p\n", - crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata); + if (trace) + htrc("Column(%d) %s type=%d len=%d value=%p\n", + crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata); - } // endfor i + } // endfor i - *pcrp = NULL; + *pcrp = NULL; - fin: - g->jump_level--; - return qrp; - } // end of PlgAllocResult + } catch (int n) { + htrc("Exception %d: %s\n", n, g->Message); + qrp = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + htrc("%s\n", g->Message); + qrp = NULL; + } // end catch + + return qrp; +} // end of PlgAllocResult /***********************************************************************/ /* Allocate and initialize the new DB User Block. */ @@ -365,8 +361,8 @@ PCATLG PlgGetCatalog(PGLOBAL g, bool jump) if (!cat && jump) { // Raise exception so caller doesn't have to check return value strcpy(g->Message, MSG(NO_ACTIVE_DB)); - longjmp(g->jumper[g->jump_level], 1); - } // endif cat + throw 1; + } // endif cat return cat; } // end of PlgGetCatalog @@ -391,26 +387,27 @@ char *SetPath(PGLOBAL g, const char *path) char *buf= NULL; if (path) { - size_t len= strlen(path) + (*path != '.' ? 4 : 1); + size_t len = strlen(path) + (*path != '.' ? 4 : 1); + + if (!(buf = (char*)PlgDBSubAlloc(g, NULL, len))) + return NULL; - buf= (char*)PlugSubAlloc(g, NULL, len); - if (PlugIsAbsolutePath(path)) { - strcpy(buf, path); - return buf; - } // endif path + strcpy(buf, path); + return buf; + } // endif path if (*path != '.') { #if defined(__WIN__) - char *s= "\\"; + const char *s = "\\"; #else // !__WIN__ - char *s= "/"; + const char *s = "/"; #endif // !__WIN__ strcat(strcat(strcat(strcpy(buf, "."), s), path), s); } else strcpy(buf, path); - } // endif path + } // endif path return buf; } // end of SetPath @@ -448,7 +445,7 @@ char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op) static bool PlugCheckPattern(PGLOBAL g, LPCSTR string, LPCSTR pat) { if (pat && strlen(pat)) { - // This leaves 512 bytes (MAX_STR / 2) for each components + // This leaves 2048 bytes (MAX_STR / 2) for each components LPSTR name = g->Message + MAX_STR / 2; strlwr(strcpy(name, string)); @@ -476,8 +473,8 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; else if (!(tp = new char[strlen(pat) + strlen(strg) + 2])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); - longjmp(g->jumper[g->jump_level], OP_LIKE); - } /* endif tp */ + throw OP_LIKE; + } /* endif tp */ sp = tp + strlen(pat) + 1; strlwr(strcpy(tp, pat)); /* Make a lower case copy of pat */ @@ -487,8 +484,8 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; /* Use this as temporary work space. */ else if (!(tp = new char[strlen(pat) + 1])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); - longjmp(g->jumper[g->jump_level], OP_LIKE); - } /* endif tp */ + throw OP_LIKE; + } /* endif tp */ strcpy(tp, pat); /* Make a copy to be worked into */ sp = (char*)strg; @@ -676,7 +673,7 @@ void PlugConvertConstant(PGLOBAL g, void* & value, short& type) /* format and a Strftime output format. Flag if not 0 indicates that */ /* non quoted blanks are not included in the output format. */ /***********************************************************************/ -PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) +PDTP MakeDateFormat(PGLOBAL g, PCSZ dfmt, bool in, bool out, int flag) { int rc; PDTP pdp = (PDTP)PlugSubAlloc(g, NULL, sizeof(DATPAR)); @@ -685,7 +682,7 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) htrc("MakeDateFormat: dfmt=%s\n", dfmt); memset(pdp, 0, sizeof(DATPAR)); - pdp->Format = pdp->Curp = dfmt; + pdp->Format = pdp->Curp = PlugDup(g, dfmt); pdp->Outsize = 2 * strlen(dfmt) + 1; if (in) @@ -727,10 +724,11 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) /***********************************************************************/ int ExtractDate(char *dts, PDTP pdp, int defy, int val[6]) { - char *fmt, c, d, e, W[8][12]; - int i, k, m, numval; - int n, y = 30; - bool b = true; // true for null dates + PCSZ fmt; + char c, d, e, W[8][12]; + int i, k, m, numval; + int n, y = 30; + bool b = true; // true for null dates if (pdp) fmt = pdp->InFmt; @@ -917,7 +915,7 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all) fp->Memory = NULL; fp->Mode = MODE_ANY; - // Passthru + // fall through case TYPE_FB_HANDLE: if (fp->Handle && fp->Handle != INVALID_HANDLE_VALUE) if (CloseFileHandle(fp->Handle)) @@ -1255,7 +1253,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp) // in the area, do allocate from virtual storage. #if defined(__WIN__) if (mp.Size >= BIGMEM) - mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT, PAGE_READWRITE); + mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); else #endif mp.Memp = malloc(mp.Size); @@ -1494,7 +1492,7 @@ void PlugPutOut(PGLOBAL g, FILE *f, short t, void *v, uint n) case TYPE_TABLE: case TYPE_TDB: case TYPE_XOBJECT: - ((PBLOCK)v)->Print(g, f, n-2); + ((PBLOCK)v)->Printf(g, f, n-2); break; default: @@ -1520,8 +1518,8 @@ DllExport void NewPointer(PTABS t, void *oldv, void *newv) PGLOBAL g = t->G; sprintf(g->Message, "NewPointer: %s", MSG(MEM_ALLOC_ERROR)); - longjmp(g->jumper[g->jump_level], 3); - } else { + throw 3; + } else { tp->Next = t->P1; tp->Num = 0; t->P1 = tp; @@ -1557,15 +1555,15 @@ int FileComp(PGLOBAL g, char *file1, char *file2) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "rb", (int)errno, fn[i]); strcat(strcat(g->Message, ": "), strerror(errno)); - longjmp(g->jumper[g->jump_level], 666); -// } else + throw 666; + // } else // len[i] = 0; // File does not exist yet } else { if ((len[i] = _filelength(h[i])) < 0) { sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", fn[i]); - longjmp(g->jumper[g->jump_level], 666); - } // endif len + throw 666; + } // endif len } // endif h diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp index eb31e24235b..f3d3a010266 100644 --- a/storage/connect/plgxml.cpp +++ b/storage/connect/plgxml.cpp @@ -45,7 +45,7 @@ XMLDOCUMENT::XMLDOCUMENT(char *nsl, char *nsdf, char *enc) /******************************************************************/ /* Initialize zipped file processing. */ /******************************************************************/ -bool XMLDOCUMENT::InitZip(PGLOBAL g, char *entry) +bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry) { #if defined(ZIP_SUPPORT) bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false; @@ -173,7 +173,7 @@ void XMLNODE::Delete(PXNODE dnp) /******************************************************************/ /* Store a string in Buf, enventually reallocating it. */ /******************************************************************/ -char *XMLNODE::BufAlloc(PGLOBAL g, char *p, int n) +char *XMLNODE::BufAlloc(PGLOBAL g, const char *p, int n) { if (Len < n) { Len = n; diff --git a/storage/connect/plgxml.h b/storage/connect/plgxml.h index 6870764c503..82629e4c7db 100644 --- a/storage/connect/plgxml.h +++ b/storage/connect/plgxml.h @@ -76,9 +76,9 @@ class XMLDOCUMENT : public BLOCK { virtual void SetNofree(bool b) = 0; // Methods - virtual bool Initialize(PGLOBAL, char *, bool) = 0; + virtual bool Initialize(PGLOBAL, PCSZ, bool) = 0; virtual bool ParseFile(PGLOBAL, char *) = 0; - virtual bool NewDoc(PGLOBAL, char *) = 0; + virtual bool NewDoc(PGLOBAL, PCSZ) = 0; virtual void AddComment(PGLOBAL, char *) = 0; virtual PXNODE GetRoot(PGLOBAL) = 0; virtual PXNODE NewRoot(PGLOBAL, char *) = 0; @@ -95,7 +95,7 @@ class XMLDOCUMENT : public BLOCK { // Utility bool MakeNSlist(PGLOBAL g); - bool InitZip(PGLOBAL g, char *entry); + bool InitZip(PGLOBAL g, PCSZ entry); char *GetMemDoc(PGLOBAL g, char *fn); void CloseZip(void); @@ -131,15 +131,15 @@ class XMLNODE : public BLOCK { virtual PXLIST SelectNodes(PGLOBAL, char *, PXLIST = NULL) = 0; virtual PXNODE SelectSingleNode(PGLOBAL, char *, PXNODE = NULL) = 0; virtual PXATTR GetAttribute(PGLOBAL, char *, PXATTR = NULL) = 0; - virtual PXNODE AddChildNode(PGLOBAL, char *, PXNODE = NULL) = 0; + virtual PXNODE AddChildNode(PGLOBAL, PCSZ, PXNODE = NULL) = 0; virtual PXATTR AddProperty(PGLOBAL, char *, PXATTR = NULL) = 0; - virtual void AddText(PGLOBAL, char *) = 0; + virtual void AddText(PGLOBAL, PCSZ) = 0; virtual void DeleteChild(PGLOBAL, PXNODE) = 0; protected: PXNODE NewChild(PXNODE ncp); void Delete(PXNODE dnp); - char *BufAlloc(PGLOBAL g, char *p, int n); + char *BufAlloc(PGLOBAL g, const char *p, int n); // Constructor XMLNODE(PXDOC dp); diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.cpp index bfac8a5fd99..f0822526b98 100644 --- a/storage/connect/plugutil.c +++ b/storage/connect/plugutil.cpp @@ -139,31 +139,39 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) htrc("PlugInit: Language='%s'\n", ((!Language) ? "Null" : (char*)Language)); - if (!(g = malloc(sizeof(GLOBAL)))) { - fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); - return NULL; - } else { + try { + g = new GLOBAL; + } catch (...) { + fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); + return NULL; + } // end try/catch + + //if (!(g = (PGLOBAL)malloc(sizeof(GLOBAL)))) { + // fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); + // return NULL; + // } else { g->Sarea = NULL; g->Createas = 0; g->Alchecked = 0; g->Mrr = 0; - g->Activityp = g->ActivityStart = NULL; + g->Activityp = NULL; g->Xchk = NULL; g->N = 0; + g->More = 0; strcpy(g->Message, ""); /*******************************************************************/ /* Allocate the main work segment. */ /*******************************************************************/ if (worksize && !(g->Sarea = PlugAllocMem(g, worksize))) { - char errmsg[256]; + char errmsg[MAX_STR]; sprintf(errmsg, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); g->Sarea_Size = 0; } else g->Sarea_Size = worksize; - } /* endif g */ + //} /* endif g */ g->jump_level = -1; /* New setting to allow recursive call of Plug */ return(g); @@ -182,7 +190,7 @@ int PlugExit(PGLOBAL g) if (g->Sarea) free(g->Sarea); - free(g); + delete g; return rc; } /* end of PlugExit */ @@ -475,7 +483,7 @@ void *PlugAllocMem(PGLOBAL g, uint size) /***********************************************************************/ BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size) { - PPOOLHEADER pph = memp; + PPOOLHEADER pph = (PPOOLHEADER)memp; pph->To_Free = (OFFSET)sizeof(POOLHEADER); pph->FreeBlk = size - pph->To_Free; @@ -501,7 +509,6 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) /*******************************************************************/ memp = g->Sarea; -//size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */ pph = (PPOOLHEADER)memp; @@ -510,26 +517,24 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) memp, size, pph->To_Free, pph->FreeBlk); if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */ - char *pname = "Work"; + PCSZ pname = "Work"; sprintf(g->Message, "Not enough memory in %s area for request of %u (used=%d free=%d)", - pname, (uint) size, pph->To_Free, pph->FreeBlk); + pname, (uint)size, pph->To_Free, pph->FreeBlk); if (trace) htrc("PlugSubAlloc: %s\n", g->Message); - /* Nothing we can do if longjmp is not initialized. */ - assert(g->jump_level >= 0); - longjmp(g->jumper[g->jump_level], 1); + throw 1234; } /* endif size OS32 code */ /*********************************************************************/ /* Do the suballocation the simplest way. */ /*********************************************************************/ memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ - pph->To_Free += size; /* New offset of pool free block */ - pph->FreeBlk -= size; /* New size of pool free block */ + pph->To_Free += (OFFSET)size; /* New offset of pool free block */ + pph->FreeBlk -= (uint)size; /* New size of pool free block */ if (trace > 3) htrc("Done memp=%p used=%d free=%d\n", diff --git a/storage/connect/preparse.h b/storage/connect/preparse.h index 2892a958bdd..f16624548fb 100644 --- a/storage/connect/preparse.h +++ b/storage/connect/preparse.h @@ -7,14 +7,14 @@ /* Struct of variables used by the date format pre-parser. */ /***********************************************************************/ typedef struct _datpar { - char *Format; // Points to format to decode + const char *Format; // Points to format to decode char *Curp; // Points to current parsing position char *InFmt; // Start of input format char *OutFmt; // Start of output format int Index[8]; // Indexes of date values int Num; // Number of values to retrieve int Flag; // 1: Input, 2: Output, 4: no output blank - int Outsize; // Size of output buffers + int Outsize; // Size of output buffers } DATPAR, *PDTP; /***********************************************************************/ diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index c6878737f1d..5284e2ef856 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -88,7 +88,7 @@ PTOS RELDEF::GetTopt(void) /***********************************************************************/ /* This function sets an integer table information. */ /***********************************************************************/ -bool RELDEF::SetIntCatInfo(PSZ what, int n) +bool RELDEF::SetIntCatInfo(PCSZ what, int n) { return Hc->SetIntegerOption(what, n); } // end of SetIntCatInfo @@ -96,7 +96,7 @@ bool RELDEF::SetIntCatInfo(PSZ what, int n) /***********************************************************************/ /* This function returns integer table information. */ /***********************************************************************/ -int RELDEF::GetIntCatInfo(PSZ what, int idef) +int RELDEF::GetIntCatInfo(PCSZ what, int idef) { int n= Hc->GetIntegerOption(what); @@ -106,7 +106,7 @@ int RELDEF::GetIntCatInfo(PSZ what, int idef) /***********************************************************************/ /* This function returns Boolean table information. */ /***********************************************************************/ -bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef) +bool RELDEF::GetBoolCatInfo(PCSZ what, bool bdef) { bool b= Hc->GetBooleanOption(what, bdef); @@ -116,9 +116,10 @@ bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef) /***********************************************************************/ /* This function returns size catalog information. */ /***********************************************************************/ -int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) +int RELDEF::GetSizeCatInfo(PCSZ what, PCSZ sdef) { - char * s, c; + char c; + PCSZ s; int i, n= 0; if (!(s= Hc->GetStringOption(what))) @@ -128,6 +129,7 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) switch (toupper(c)) { case 'M': n *= 1024; + // fall through case 'K': n *= 1024; } // endswitch c @@ -138,9 +140,9 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) /***********************************************************************/ /* This function sets char table information in buf. */ /***********************************************************************/ -int RELDEF::GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size) +int RELDEF::GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size) { - char *s= Hc->GetStringOption(what); + PCSZ s= Hc->GetStringOption(what); strncpy(buf, ((s) ? s : sdef), size); return size; @@ -158,9 +160,10 @@ bool RELDEF::Partitioned(void) /* This function returns string table information. */ /* Default parameter is "*" to get the handler default. */ /***********************************************************************/ -char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) +char *RELDEF::GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef) { - char *name, *sval= NULL, *s= Hc->GetStringOption(what, sdef); + char *sval = NULL; + PCSZ name, s= Hc->GetStringOption(what, sdef); if (s) { if (!Hc->IsPartitioned() || @@ -168,12 +171,12 @@ char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) && stricmp(what, "connect"))) sval= PlugDup(g, s); else - sval= s; + sval= (char*)s; } else if (!stricmp(what, "filename")) { // Return default file name - char *ftype= Hc->GetStringOption("Type", "*"); - int i, n; + PCSZ ftype= Hc->GetStringOption("Type", "*"); + int i, n; if (IsFileType(GetTypeID(ftype))) { name= Hc->GetPartName(); @@ -251,9 +254,9 @@ bool TABDEF::Define(PGLOBAL g, PCATLG cat, /***********************************************************************/ /* This function returns the database data path. */ /***********************************************************************/ -PSZ TABDEF::GetPath(void) +PCSZ TABDEF::GetPath(void) { - return (Database) ? (PSZ)Database : (Hc) ? Hc->GetDataPath() : NULL; + return (Database) ? Database : (Hc) ? Hc->GetDataPath() : NULL; } // end of GetPath /***********************************************************************/ @@ -277,8 +280,13 @@ int TABDEF::GetColCatInfo(PGLOBAL g) // Take care of the column definitions i= poff= nof= nlg= 0; +#if defined(__WIN__) // Offsets of HTML and DIR tables start from 0, DBF at 1 - loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; + loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; +#else // !__WIN__ + // Offsets of HTML tables start from 0, DIR and DBF at 1 + loff = (tc == TAB_DBF || tc == TAB_DIR) ? 1 : (tc == TAB_XML) ? -1 : 0; +#endif // !__WIN__ while (true) { // Default Offset depends on table type @@ -610,9 +618,10 @@ bool OEMDEF::DefineAM(PGLOBAL g, LPCSTR, int) if (!*Module) Module = Subtype; - Desc = (char*)PlugSubAlloc(g, NULL, strlen(Module) - + strlen(Subtype) + 3); - sprintf(Desc, "%s(%s)", Module, Subtype); + char *desc = (char*)PlugSubAlloc(g, NULL, strlen(Module) + + strlen(Subtype) + 3); + sprintf(desc, "%s(%s)", Module, Subtype); + Desc = desc; return false; } // end of DefineAM diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h index 52a131dbf3d..8b19a413ade 100644 --- a/storage/connect/reldef.h +++ b/storage/connect/reldef.h @@ -42,13 +42,13 @@ class DllExport RELDEF : public BLOCK { // Relation definition block // Methods PTOS GetTopt(void); - bool GetBoolCatInfo(PSZ what, bool bdef); - bool SetIntCatInfo(PSZ what, int ival); + bool GetBoolCatInfo(PCSZ what, bool bdef); + bool SetIntCatInfo(PCSZ what, int ival); bool Partitioned(void); - int GetIntCatInfo(PSZ what, int idef); - int GetSizeCatInfo(PSZ what, PSZ sdef); - int GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size); - char *GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef); + int GetIntCatInfo(PCSZ what, int idef); + int GetSizeCatInfo(PCSZ what, PCSZ sdef); + int GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size); + char *GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef); virtual int Indexable(void) {return 0;} virtual bool Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR schema, LPCSTR am) = 0; @@ -84,7 +84,7 @@ public: void SetNext(PTABDEF tdfp) {Next = tdfp;} int GetMultiple(void) {return Multiple;} int GetPseudo(void) {return Pseudo;} - PSZ GetPath(void); + PCSZ GetPath(void); //PSZ GetPath(void) // {return (Database) ? (PSZ)Database : Cat->GetDataPath();} bool SepIndex(void) {return GetBoolCatInfo("SepIndex", false);} @@ -105,8 +105,8 @@ public: protected: // Members - PSZ Schema; /* Table schema (for ODBC) */ - PSZ Desc; /* Table description */ + PCSZ Schema; /* Table schema (for ODBC) */ + PCSZ Desc; /* Table description */ uint Catfunc; /* Catalog function ID */ int Card; /* (max) number of rows in table */ int Elemt; /* Number of rows in blocks or rowset */ diff --git a/storage/connect/tabcol.cpp b/storage/connect/tabcol.cpp index fde1baa6317..2740864a69b 100644 --- a/storage/connect/tabcol.cpp +++ b/storage/connect/tabcol.cpp @@ -73,7 +73,7 @@ PTABLE XTAB::Link(PTABLE tab2) /***********************************************************************/ /* Make file output of XTAB contents. */ /***********************************************************************/ -void XTAB::Print(PGLOBAL g, FILE *f, uint n) +void XTAB::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -91,7 +91,7 @@ void XTAB::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of XTAB contents. */ /***********************************************************************/ -void XTAB::Print(PGLOBAL, char *ps, uint z) +void XTAB::Prints(PGLOBAL, char *ps, uint z) { char buf[128]; int i, n = (int)z - 1; @@ -134,7 +134,7 @@ bool COLUMN::SetFormat(PGLOBAL g, FORMAT&) /***********************************************************************/ /* Make file output of COLUMN contents. */ /***********************************************************************/ -void COLUMN::Print(PGLOBAL g, FILE *f, uint n) +void COLUMN::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -154,7 +154,7 @@ void COLUMN::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of COLUMN contents. */ /***********************************************************************/ -void COLUMN::Print(PGLOBAL, char *ps, uint z) +void COLUMN::Prints(PGLOBAL, char *ps, uint z) { char buf[80]; diff --git a/storage/connect/tabcol.h b/storage/connect/tabcol.h index 3bfc37e69c1..e4657e2f261 100644 --- a/storage/connect/tabcol.h +++ b/storage/connect/tabcol.h @@ -38,8 +38,8 @@ class DllExport XTAB: public BLOCK { // Table Name-Schema-Srcdef block. // Methods PTABLE Link(PTABLE); - void Print(PGLOBAL g, FILE *f, uint n); - void Print(PGLOBAL g, char *ps, uint z); + void Printf(PGLOBAL g, FILE *f, uint n); + void Prints(PGLOBAL g, char *ps, uint z); protected: // Members @@ -78,8 +78,8 @@ class DllExport COLUMN: public XOBJECT { // Column Name/Qualifier block. void SetTo_Col(PCOL colp) {To_Col = colp;} // Methods - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // All methods below should never be used for COLUMN's virtual void Reset(void) {assert(false);} virtual bool Compare(PXOB) {assert(false); return false;} diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index d2bb3d7a4af..468966e79d9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1,11 +1,11 @@ /************* TabDos C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABDOS */ /* ------------- */ -/* Version 4.9.2 */ +/* Version 4.9.3 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -98,6 +98,7 @@ DOSDEF::DOSDEF(void) Ofn = NULL; Entry = NULL; To_Indx = NULL; + Pwd = NULL; Recfm = RECFM_VAR; Mapped = false; Zipped = false; @@ -131,7 +132,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) bool map = (am && (*am == 'M' || *am == 'm')); LPCSTR dfm = (am && (*am == 'F' || *am == 'f')) ? "F" : (am && (*am == 'B' || *am == 'b')) ? "B" - : (am && !stricmp(am, "DBF")) ? "D" : "V"; + : (am && !stricmp(am, "DBF")) ? "D" : "V"; if ((Zipped = GetBoolCatInfo("Zipped", false))) { Entry = GetStringCatInfo(g, "Entry", NULL); @@ -139,14 +140,15 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) : false; Mulentries = GetBoolCatInfo("Mulentries", Mulentries); Append = GetBoolCatInfo("Append", false); - } + Pwd = GetStringCatInfo(g, "Password", NULL); + } // endif Zipped Desc = Fn = GetStringCatInfo(g, "Filename", NULL); Ofn = GetStringCatInfo(g, "Optname", Fn); GetCharCatInfo("Recfm", (PSZ)dfm, buf, sizeof(buf)); Recfm = (toupper(*buf) == 'F') ? RECFM_FIX : (toupper(*buf) == 'B') ? RECFM_BIN : - (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR; + (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR; Lrecl = GetIntCatInfo("Lrecl", 0); if (Recfm != RECFM_DBF) @@ -180,7 +182,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) /***********************************************************************/ bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) { - char *ftype; + PCSZ ftype; switch (Recfm) { case RECFM_VAR: ftype = ".dop"; break; @@ -237,9 +239,9 @@ void DOSDEF::RemoveOptValues(PGLOBAL g) /***********************************************************************/ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) { - char *ftype; - char filename[_MAX_PATH]; - bool sep, rc = false; + PCSZ ftype; + char filename[_MAX_PATH]; + bool sep, rc = false; if (!To_Indx) return false; // No index @@ -351,7 +353,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) if (Zipped) { #if defined(ZIP_SUPPORT) if (Recfm == RECFM_VAR) { - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); @@ -362,7 +364,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) tdbp = new(g) TDBDOS(this, txfp); } else { - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UZXFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZPXFAM(this); @@ -1307,6 +1309,7 @@ PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp) } // endif !opm // if opm, pass thru + /* fall through */ case OP_IN: if (filp->GetArgType(0) == TYPE_COLBLK && filp->GetArgType(1) == TYPE_ARRAY) { @@ -1509,8 +1512,8 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) if (n == 8 && ctype != TYPE_LIST) { // Should never happen strcpy(g->Message, "Block opt: bad constant"); - longjmp(g->jumper[g->jump_level], 99); - } // endif Conv + throw 99; + } // endif Conv if (type[0] == 1) { // Make it always as Column-op-Value @@ -1790,8 +1793,8 @@ err: /* Make a dynamic index. */ /***********************************************************************/ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) - { - int k, rc; +{ + int k; volatile bool dynamic; bool brc; PCOL colp; @@ -1861,13 +1864,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } else // Column contains same values as ROWID kxp = new(g) XXROW(this); - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if (!(rc = setjmp(g->jumper[++g->jump_level])) != 0) { + try { if (dynamic) { ResetBlockFilter(g); kxp->SetDynamic(dynamic); @@ -1892,12 +1889,17 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } // endif brc - } else - brc = true; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + brc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + brc = true; + } // end catch - g->jump_level--; - return brc; - } // end of InitialyzeIndex + return brc; +} // end of InitialyzeIndex /***********************************************************************/ /* DOS GetProgMax: get the max value for progress information. */ @@ -2118,7 +2120,8 @@ bool TDBDOS::OpenDB(PGLOBAL g) return false; } // endif use - if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS) { + if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS + && Txfp->GetAmType() != TYPE_AM_MGO) { // Delete all lines. Not handled in MAP or block mode Txfp = new(g) DOSFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); @@ -2156,16 +2159,18 @@ bool TDBDOS::OpenDB(PGLOBAL g) To_BlkFil = InitBlockFilter(g, To_Filter); /*********************************************************************/ - /* Allocate the line buffer plus a null character. */ - /*********************************************************************/ - To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + /* Lrecl does not include line ending */ + /*********************************************************************/ + size_t linelen = Lrecl + ((PDOSDEF)To_Def)->Ending + 1; + + To_Line = (char*)PlugSubAlloc(g, NULL, linelen); if (Mode == MODE_INSERT) { // Spaces between fields must be filled with blanks memset(To_Line, ' ', Lrecl); To_Line[Lrecl] = '\0'; } else - memset(To_Line, 0, Lrecl + 1); + memset(To_Line, 0, linelen); if (trace) htrc("OpenDos: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); @@ -2304,8 +2309,8 @@ void TDBDOS::CloseDB(PGLOBAL g) /***********************************************************************/ /* DOSCOL public constructor (also called by MAPCOL). */ /***********************************************************************/ -DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) - : COLBLK(cdp, tp, i) +DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am) + : COLBLK(cdp, tp, i) { char *p; int prec = Format.Prec; @@ -2335,7 +2340,7 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) Dval = NULL; Buf = NULL; - if (txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { + if (txfp && txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { int nblk = txfp->GetBlock(); Clustered = (cdp->GetXdb2()) ? 2 : 1; @@ -2514,8 +2519,8 @@ void DOSCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } // endif + throw 11; + } // endif p = tdbp->To_Line + Deplac; field = Long; @@ -2570,8 +2575,8 @@ void DOSCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_RECFM), tdbp->Ftype); - longjmp(g->jumper[g->jump_level], 34); - } // endswitch Ftype + throw 34; + } // endswitch Ftype // Set null when applicable if (Nullable) @@ -2679,8 +2684,8 @@ void DOSCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, "Invalid field format for column %s", Name); - longjmp(g->jumper[g->jump_level], 31); - } // endswitch BufType + throw 31; + } // endswitch BufType p2 = Buf; } else // Standard CONNECT format @@ -2691,8 +2696,8 @@ void DOSCOL::WriteColumn(PGLOBAL g) if ((len = strlen(p2)) > field) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p2, Name, field); - longjmp(g->jumper[g->jump_level], 31); - } else if (Dsp) + throw 31; + } else if (Dsp) for (i = 0; i < len; i++) if (p2[i] == '.') p2[i] = Dsp; @@ -2866,9 +2871,9 @@ bool DOSCOL::AddDistinctValue(PGLOBAL g) /***********************************************************************/ /* Make file output of a Dos column descriptor block. */ /***********************************************************************/ -void DOSCOL::Print(PGLOBAL g, FILE *f, uint n) +void DOSCOL::Printf(PGLOBAL g, FILE *f, uint n) { - COLBLK::Print(g, f, n); + COLBLK::Printf(g, f, n); } // end of Print /* ------------------------------------------------------------------- */ diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 922d52ee399..9722cd3777d 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -39,9 +39,9 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ virtual PIXDEF GetIndx(void) {return To_Indx;} virtual void SetIndx(PIXDEF xdp) {To_Indx = xdp;} virtual bool IsHuge(void) {return Huge;} - PSZ GetFn(void) {return Fn;} - PSZ GetOfn(void) {return Ofn;} - PSZ GetEntry(void) {return Entry;} + PCSZ GetFn(void) {return Fn;} + PCSZ GetOfn(void) {return Ofn;} + PCSZ GetEntry(void) {return Entry;} bool GetMul(void) {return Mulentries;} bool GetAppend(void) {return Append;} void SetBlock(int block) { Block = block; } @@ -74,9 +74,10 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ //virtual bool Erase(char *filename); // Members - PSZ Fn; /* Path/Name of corresponding file */ - PSZ Ofn; /* Base Path/Name of matching index files*/ - PSZ Entry; /* Zip entry name or pattern */ + PCSZ Fn; /* Path/Name of corresponding file */ + PCSZ Ofn; /* Base Path/Name of matching index files*/ + PCSZ Entry; /* Zip entry name or pattern */ + PCSZ Pwd; /* Zip password */ PIXDEF To_Indx; /* To index definitions blocks */ RECFM Recfm; /* 0:VAR, 1:FIX, 2:BIN, 3:VCT, 6:DBF */ bool Mapped; /* 0: disk file, 1: memory mapped file */ @@ -132,8 +133,8 @@ class DllExport TDBDOS : public TDBASE { // Implementation virtual AMT GetAmType(void) {return Txfp->GetAmType();} - virtual PSZ GetFile(PGLOBAL) {return Txfp->To_File;} - virtual void SetFile(PGLOBAL, PSZ fn) {Txfp->To_File = fn;} + virtual PCSZ GetFile(PGLOBAL) {return Txfp->To_File;} + virtual void SetFile(PGLOBAL, PCSZ fn) {Txfp->To_File = fn;} virtual void SetAbort(bool b) {Abort = b;} virtual RECFM GetFtype(void) {return Ftype;} virtual bool SkipHeader(PGLOBAL) {return false;} @@ -213,7 +214,7 @@ class DllExport DOSCOL : public COLBLK { friend class TDBFIX; public: // Constructors - DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "DOS"); + DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "DOS"); DOSCOL(DOSCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -231,12 +232,12 @@ class DllExport DOSCOL : public COLBLK { virtual PVBLK GetDval(void) {return Dval;} // Methods - using COLBLK::Print; + //using COLBLK::Print; virtual bool VarSize(void); virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - virtual void Print(PGLOBAL g, FILE *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); protected: virtual bool SetMinMax(PGLOBAL g); diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index b099321eff5..4ef88fec746 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -35,9 +35,9 @@ /***********************************************************************/ /* CONDFIL Constructor. */ /***********************************************************************/ -CONDFIL::CONDFIL(const Item *cond, uint idx, AMT type) +CONDFIL::CONDFIL(uint idx, AMT type) { - Cond = cond; +//Cond = cond; Idx = idx; Type = type; Op = OP_XX; @@ -61,7 +61,7 @@ int CONDFIL::Init(PGLOBAL g, PHC hc) bool h; if (options) - alt = GetListOption(g, "Alias", options->oplist, NULL); + alt = (char*)GetListOption(g, "Alias", options->oplist, NULL); while (alt) { if (!(p = strchr(alt, '='))) { @@ -267,7 +267,7 @@ TDBEXT::TDBEXT(PTDBEXT tdbp) : TDB(tdbp) /******************************************************************/ /* Convert an UTF-8 string to latin characters. */ /******************************************************************/ -int TDBEXT::Decode(char *txt, char *buf, size_t n) +int TDBEXT::Decode(PCSZ txt, char *buf, size_t n) { uint dummy_errors; uint32 len = copy_and_convert(buf, n, &my_charset_latin1, @@ -285,16 +285,17 @@ int TDBEXT::Decode(char *txt, char *buf, size_t n) /***********************************************************************/ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len; - bool oom = false, first = true; + bool first = true; PTABLE tablep = To_Table; PCOL colp; if (Srcdef) { if ((catp = strstr(Srcdef, "%s"))) { char *fil1= 0, *fil2; - PSZ ph = ((EXTDEF*)To_Def)->Phpos; + PCSZ ph = ((EXTDEF*)To_Def)->Phpos; if (!ph) ph = (strstr(catp + 2, "%s")) ? const_cast<char*>("WH") : @@ -342,7 +343,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) for (colp = Columns; colp; colp = colp->GetNext()) if (!colp->IsSpecial()) { if (!first) - oom |= Query->Append(", "); + Query->Append(", "); else first = false; @@ -351,11 +352,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); ((PEXTCOL)colp)->SetRank(++Ncol); } // endif colp @@ -363,13 +364,13 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) } else // !Columns can occur for queries such that sql count(*) from... // for which we will count the rows from sql * from... - oom |= Query->Append('*'); + Query->Append('*'); } else // SQL statement used to retrieve the size of the result - oom |= Query->Append("count(*)"); + Query->Append("count(*)"); - oom |= Query->Append(" FROM "); + Query->Append(" FROM "); if (Catalog && *Catalog) catp = Catalog; @@ -381,17 +382,17 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) schmp = Schema; if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp // Table name can be encoded in UTF-8 @@ -399,18 +400,18 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); len = Query->GetLength(); if (To_CondFil) { if (Mode == MODE_READ) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); + Query->Append(" WHERE "); + Query->Append(To_CondFil->Body); len = Query->GetLength() + 1; } else len += (strlen(To_CondFil->Body) + 256); @@ -418,10 +419,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) } else len += ((Mode == MODE_READX) ? 256 : 1); - if (oom || Query->Resize(len)) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeSQL: Out of memory"); return true; - } // endif oom + } else + Query->Resize(len); if (trace) htrc("Query=%s\n", Query->GetStr()); @@ -435,15 +437,17 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) /***********************************************************************/ bool TDBEXT::MakeCommand(PGLOBAL g) { - char *p, *stmt, name[68], *body = NULL; + PCSZ schmp = NULL; + char *p, *stmt, name[132], *body = NULL; char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); bool qtd = Quoted > 0; + char q = qtd ? *Quote : ' '; int i = 0, k = 0; // Make a lower case copy of the originale query and change // back ticks to the data source identifier quoting character do { - qrystr[i] = (Qrystr[i] == '`') ? *Quote : tolower(Qrystr[i]); + qrystr[i] = (Qrystr[i] == '`') ? q : tolower(Qrystr[i]); } while (Qrystr[i++]); if (To_CondFil && (p = strstr(qrystr, " where "))) { @@ -460,27 +464,50 @@ bool TDBEXT::MakeCommand(PGLOBAL g) strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); if (strstr(" update delete low_priority ignore quick from ", name)) { - strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote)); - k += 2; + if (Quote) { + strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote)); + k += 2; + } else { + strcpy(g->Message, "Quoted must be specified"); + return true; + } // endif Quote + } else strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { for (i = 0; i < p - qrystr; i++) - stmt[i] = (Qrystr[i] == '`') ? *Quote : Qrystr[i]; + stmt[i] = (Qrystr[i] == '`') ? q : Qrystr[i]; stmt[i] = 0; + k += i + (int)strlen(Name); - if (qtd && *(p - 1) == ' ') + if (Schema && *Schema) + schmp = Schema; + + if (qtd && *(p - 1) == ' ') { + if (schmp) + strcat(strcat(stmt, schmp), "."); + strcat(strcat(strcat(stmt, Quote), TableName), Quote); - else + } else { + if (schmp) { + if (qtd && *(p - 1) != ' ') { + stmt[i - 1] = 0; + strcat(strcat(strcat(stmt, schmp), "."), Quote); + } else + strcat(strcat(stmt, schmp), "."); + + } // endif schmp + strcat(stmt, TableName); + } // endif's i = (int)strlen(stmt); do { - stmt[i++] = (Qrystr[k] == '`') ? *Quote : Qrystr[k]; + stmt[i++] = (Qrystr[k] == '`') ? q : Qrystr[k]; } while (Qrystr[k++]); if (body) @@ -539,7 +566,7 @@ int TDBEXT::GetProgMax(PGLOBAL g) /***********************************************************************/ /* EXTCOL public constructor. */ /***********************************************************************/ -EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h index 2ef20c89f2c..497b6074d48 100644 --- a/storage/connect/tabext.h +++ b/storage/connect/tabext.h @@ -7,7 +7,7 @@ /***********************************************************************/ #ifndef __TABEXT_H -#define __TABEXTF_H +#define __TABEXT_H #include "reldef.h" @@ -28,14 +28,14 @@ class ALIAS : public BLOCK { class CONDFIL : public BLOCK { public: // Constructor - CONDFIL(const Item *cond, uint idx, AMT type); + CONDFIL(uint idx, AMT type); // Functions int Init(PGLOBAL g, PHC hc); const char *Chk(const char *cln, bool *h); // Members - const Item *Cond; +//const Item *Cond; AMT Type; uint Idx; OPVAL Op; @@ -60,10 +60,10 @@ public: // Implementation virtual const char *GetType(void) { return "EXT"; } - inline PSZ GetTabname(void) { return Tabname; } - inline PSZ GetTabschema(void) { return Tabschema; } - inline PSZ GetUsername(void) { return Username; }; - inline PSZ GetPassword(void) { return Password; }; + inline PCSZ GetTabname(void) { return Tabname; } + inline PCSZ GetTabschema(void) { return Tabschema; } + inline PCSZ GetUsername(void) { return Username; }; + inline PCSZ GetPassword(void) { return Password; }; inline PSZ GetTabcat(void) { return Tabcat; } inline PSZ GetSrcdef(void) { return Srcdef; } inline char GetSep(void) { return (Sep) ? *Sep : 0; } @@ -76,10 +76,10 @@ public: protected: // Members - PSZ Tabname; /* External table name */ - PSZ Tabschema; /* External table schema */ - PSZ Username; /* User connect name */ - PSZ Password; /* Password connect info */ + PCSZ Tabname; /* External table name */ + PCSZ Tabschema; /* External table schema */ + PCSZ Username; /* User connect name */ + PCSZ Password; /* Password connect info */ PSZ Tabcat; /* External table catalog */ PSZ Tabtyp; /* Catalog table type */ PSZ Colpat; /* Catalog column pattern */ @@ -115,7 +115,7 @@ public: virtual bool IsRemote(void) { return true; } // Methods - virtual PSZ GetServer(void) { return "Remote"; } + virtual PCSZ GetServer(void) { return "Remote"; } virtual int GetRecpos(void); // Database routines @@ -127,19 +127,19 @@ protected: virtual bool MakeSQL(PGLOBAL g, bool cnt); //virtual bool MakeInsert(PGLOBAL g); virtual bool MakeCommand(PGLOBAL g); - int Decode(char *utf, char *buf, size_t n); + int Decode(PCSZ utf, char *buf, size_t n); // Members PQRYRES Qrp; // Points to storage result PSTRG Query; // Constructed SQL query - char *TableName; // Points to ODBC table name - char *Schema; // Points to ODBC table Schema - char *User; // User connect info - char *Pwd; // Password connect info + PCSZ TableName; // Points to ODBC table name + PCSZ Schema; // Points to ODBC table Schema + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info char *Catalog; // Points to ODBC table Catalog char *Srcdef; // The source table SQL definition char *Count; // Points to count(*) SQL statement - //char *Where; // Points to local where clause + //char *Where; // Points to local where clause char *Quote; // The identifier quoting character char *MulConn; // Used for multiple ODBC tables char *DBQ; // The address part of Connect string @@ -170,7 +170,7 @@ class DllExport EXTCOL : public COLBLK { friend class TDBEXT; public: // Constructor - EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am); + EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am); EXTCOL(PEXTCOL colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index bf123cd36c8..a78d5861e53 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -1,11 +1,11 @@ /************* TabFix C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABFIX */ /* ------------- */ -/* Version 4.9.1 */ +/* Version 4.9.2 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -373,7 +373,7 @@ int TDBFIX::WriteDB(PGLOBAL g) /***********************************************************************/ /* BINCOL public constructor. */ /***********************************************************************/ -BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) +BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am) : DOSCOL(g, cdp, tp, cp, i, am) { char c, *fmt = cdp->GetFmt(); @@ -411,8 +411,8 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) case 'D': M = sizeof(double); break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt + throw 11; + } // endswitch Fmt } else if (IsTypeChar(Buf_Type)) Eds = 0; @@ -486,8 +486,8 @@ void BINCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } // endif + throw 11; + } // endif p = tdbp->To_Line + Deplac; @@ -545,8 +545,8 @@ void BINCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt + throw 11; + } // endswitch Fmt // Set null when applicable if (Nullable) @@ -595,8 +595,8 @@ void BINCOL::WriteColumn(PGLOBAL g) } else if (Value->GetBinValue(p, Long, Status)) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, Value->GetSize(), Long); - longjmp(g->jumper[g->jump_level], 31); - } // endif p + throw 31; + } // endif p break; case 'S': // Short integer @@ -604,8 +604,8 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 32767LL || n < -32768LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) + throw 31; + } else if (Status) Value->GetValueNonAligned<short>(p, (short)n); break; @@ -614,8 +614,8 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 255LL || n < -256LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) + throw 31; + } else if (Status) *p = (char)n; break; @@ -624,8 +624,8 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > INT_MAX || n < INT_MIN) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) + throw 31; + } else if (Status) Value->GetValueNonAligned<int>(p, (int)n); break; @@ -648,8 +648,8 @@ void BINCOL::WriteColumn(PGLOBAL g) case 'C': // Characters if ((n = (signed)strlen(Value->GetCharString(Buf))) > Long) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, (int) n, Long); - longjmp(g->jumper[g->jump_level], 31); - } // endif n + throw 31; + } // endif n if (Status) { s = Value->GetCharString(Buf); @@ -660,8 +660,8 @@ void BINCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt + throw 31; + } // endswitch Fmt if (Eds && Status) { p = tdbp->To_Line + Deplac; diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h index 4b9f9689992..53c0af1c422 100644 --- a/storage/connect/tabfix.h +++ b/storage/connect/tabfix.h @@ -65,7 +65,7 @@ class DllExport BINCOL : public DOSCOL { friend class TDBFIX; public: // Constructors - BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "BIN"); + BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "BIN"); BINCOL(BINCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -108,7 +108,7 @@ class TDBDCL : public TDBCAT { {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);} // Members - char *Fn; // The DBF file (path) name + PCSZ Fn; // The DBF file (path) name }; // end of class TDBOCL diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 1a75d572ecd..13fbe7d33dd 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -81,7 +81,7 @@ USETEMP UseTemp(void); /* of types (TYPE_STRING < TYPE_DOUBLE < TYPE_INT) (1 < 2 < 7). */ /* If these values are changed, this will have to be revisited. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) +PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -153,7 +153,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) tdp->Lrecl = 4096; tdp->Multiple = GetIntegerTableOption(g, topt, "Multiple", 0); - p = GetStringTableOption(g, topt, "Separator", ","); + p = (char*)GetStringTableOption(g, topt, "Separator", ","); tdp->Sep = (strlen(p) == 2 && p[0] == '\\' && p[1] == 't') ? '\t' : *p; #if defined(__WIN__) @@ -167,7 +167,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) sep = tdp->Sep; tdp->Quoted = GetIntegerTableOption(g, topt, "Quoted", -1); - p = GetStringTableOption(g, topt, "Qchar", ""); + p = (char*)GetStringTableOption(g, topt, "Qchar", ""); tdp->Qot = *p; if (tdp->Qot && tdp->Quoted < 0) @@ -517,7 +517,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) /*******************************************************************/ if (Zipped) { #if defined(ZIP_SUPPORT) - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); @@ -1435,8 +1435,8 @@ void CSVCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 34); - } // endif + throw 34; + } // endif if (tdbp->Mode != MODE_UPDATE) { int colen = Long; // Column length @@ -1453,8 +1453,8 @@ void CSVCOL::ReadColumn(PGLOBAL g) Long = colen; // Restore column length sprintf(g->Message, MSG(FLD_TOO_LNG_FOR), Fldnum + 1, Name, To_Tdb->RowNumber(g), tdbp->GetFile(g)); - longjmp(g->jumper[g->jump_level], 34); - } // endif Long + throw 34; + } // endif Long // Now do the reading DOSCOL::ReadColumn(g); @@ -1516,8 +1516,8 @@ void CSVCOL::WriteColumn(PGLOBAL g) if ((signed)strlen(p) > flen) { sprintf(g->Message, MSG(BAD_FLD_LENGTH), Name, p, flen, tdbp->RowNumber(g), tdbp->GetFile(g)); - longjmp(g->jumper[g->jump_level], 34); - } else if (Dsp) + throw 34; + } else if (Dsp) for (int i = 0; p[i]; i++) if (p[i] == '.') p[i] = Dsp; @@ -1532,8 +1532,8 @@ void CSVCOL::WriteColumn(PGLOBAL g) if (Fldnum < 0) { // This can happen for wrong offset value in XDB files sprintf(g->Message, MSG(BAD_FIELD_RANK), Fldnum + 1, Name); - longjmp(g->jumper[g->jump_level], 34); - } else + throw 34; + } else strncpy(tdbp->Field[Fldnum], p, flen); if (trace > 1) diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h index e5655435be7..396bba568ff 100644 --- a/storage/connect/tabfmt.h +++ b/storage/connect/tabfmt.h @@ -13,7 +13,7 @@ typedef class TDBFMT *PTDBFMT; /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info); +PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info); /***********************************************************************/ /* CSV table. */ @@ -21,7 +21,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info); class DllExport CSVDEF : public DOSDEF { /* Logical table description */ friend class TDBCSV; friend class TDBCCL; - friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool); + friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool); public: // Constructor CSVDEF(void); @@ -53,7 +53,7 @@ public: class DllExport TDBCSV : public TDBDOS { friend class CSVCOL; friend class MAPFAM; - friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool); + friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool); public: // Constructor TDBCSV(PCSVDEF tdp, PTXF txfp); diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 5431e35e0ec..7c82a2fc138 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -227,41 +227,8 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (rc == RC_FX) // Error return true; -//else if (rc == RC_OK) { // Url was not a server name -// Tabname = GetStringCatInfo(g, "Name", -// (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); -// Tabname = GetStringCatInfo(g, "Tabname", Tabname); -// Username = GetStringCatInfo(g, "User", NULL); -// Password = GetStringCatInfo(g, "Password", NULL); -//} // endif rc - -//if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) -// Read_Only = true; Wrapname = GetStringCatInfo(g, "Wrapper", NULL); -//Prop = GetStringCatInfo(g, "Properties", NULL); -//Tabcat = GetStringCatInfo(g, "Qualifier", NULL); -//Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); -//Tabschema = GetStringCatInfo(g, "Dbname", NULL); -//Tabschema = GetStringCatInfo(g, "Schema", Tabschema); - -//if (Catfunc == FNC_COL) -// Colpat = GetStringCatInfo(g, "Colpat", NULL); - -//if (Catfunc == FNC_TABLE) -// Tabtyp = GetStringCatInfo(g, "Tabtype", NULL); - -//Qrystr = GetStringCatInfo(g, "Query_String", "?"); -//Sep = GetStringCatInfo(g, "Separator", NULL); -//Xsrc = GetBoolCatInfo("Execsrc", FALSE); -//Maxerr = GetIntCatInfo("Maxerr", 0); -//Maxres = GetIntCatInfo("Maxres", 0); -//Quoted = GetIntCatInfo("Quoted", 0); -// Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT); -// Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT); -//Scrollable = GetBoolCatInfo("Scrollable", false); -//Memory = GetIntCatInfo("Memory", 0); -//Pseudo = 2; // FILID is Ok but not ROWID return false; } // end of DefineAM @@ -341,9 +308,6 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp) WrapName = tdp->Wrapname; Ops.User = tdp->Username; Ops.Pwd = tdp->Password; -// Ops.Properties = tdp->Prop; -// Ops.Cto = tdp->Cto; -// Ops.Qto = tdp->Qto; Ops.Scrollable = tdp->Scrollable; } else { WrapName = NULL; @@ -351,13 +315,9 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp) Ops.Url = NULL; Ops.User = NULL; Ops.Pwd = NULL; -// Ops.Properties = NULL; -// Ops.Cto = DEFAULT_LOGIN_TIMEOUT; -// Ops.Qto = DEFAULT_QUERY_TIMEOUT; Ops.Scrollable = false; } // endif tdp -//Ncol = 0; Prepared = false; Werr = false; Rerr = false; @@ -370,7 +330,6 @@ TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBEXT(tdbp) Cnp = tdbp->Cnp; WrapName = tdbp->WrapName; Ops = tdbp->Ops; -//Ncol = tdbp->Ncol; Prepared = tdbp->Prepared; Werr = tdbp->Werr; Rerr = tdbp->Rerr; @@ -406,10 +365,11 @@ PCOL TDBJDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ bool TDBJDBC::MakeInsert(PGLOBAL g) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len = 0; uint pos; - bool b = false, oom = false; + bool b = false; PTABLE tablep = To_Table; PCOL colp; @@ -446,32 +406,32 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) Query = new(g)STRING(g, len, "INSERT INTO "); if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); - oom |= Query->Append('('); + Query->Append('('); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; @@ -480,15 +440,15 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); } // endfor colp - if ((oom |= Query->Append(") VALUES ("))) { + if ((Query->Append(") VALUES ("))) { strcpy(g->Message, "MakeInsert: Out of memory"); return true; } else // in case prepared statement fails @@ -496,9 +456,9 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) // Make prepared statement for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); - if (oom) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeInsert: Out of memory"); return true; } else @@ -737,18 +697,12 @@ bool TDBJDBC::SetRecpos(PGLOBAL g, int recpos) { if (Jcp->m_Full) { Fpos = 0; -// CurNum = 0; CurNum = 1; } else if (Memory == 3) { -// Fpos = recpos; -// CurNum = -1; Fpos = 0; CurNum = recpos; } else if (Ops.Scrollable) { // Is new position in the current row set? -// if (recpos >= Curpos && recpos < Curpos + Rbuf) { -// CurNum = recpos - Curpos; -// Fpos = 0; if (recpos > 0 && recpos <= Rbuf) { CurNum = recpos; Fpos = recpos; @@ -797,7 +751,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -911,7 +865,6 @@ int TDBJDBC::WriteDB(PGLOBAL g) // an insert query for each line to insert uint len = Query->GetLength(); char buf[64]; - bool oom = false; // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { @@ -919,28 +872,28 @@ int TDBJDBC::WriteDB(PGLOBAL g) char *s = colp->GetValue()->GetCharString(buf); if (colp->GetResultType() == TYPE_STRING) - oom |= Query->Append_quoted(s); + Query->Append_quoted(s); else if (colp->GetResultType() == TYPE_DATE) { DTVAL *dtv = (DTVAL*)colp->GetValue(); if (dtv->IsFormatted()) - oom |= Query->Append_quoted(s); + Query->Append_quoted(s); else - oom |= Query->Append(s); + Query->Append(s); } else - oom |= Query->Append(s); + Query->Append(s); } else - oom |= Query->Append("NULL"); + Query->Append("NULL"); - oom |= Query->Append(','); + Query->Append(','); } // endfor colp - if (unlikely(oom)) { + if (unlikely(Query->IsTruncated())) { strcpy(g->Message, "WriteDB: Out of memory"); return RC_FX; - } // endif oom + } // endif Query Query->RepLast(')'); @@ -990,11 +943,6 @@ int TDBJDBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ void TDBJDBC::CloseDB(PGLOBAL g) { - //if (To_Kindex) { - // To_Kindex->Close(); - // To_Kindex = NULL; - // } // endif - if (Jcp) Jcp->Close(); @@ -1019,7 +967,7 @@ void TDBJDBC::CloseDB(PGLOBAL g) /***********************************************************************/ /* JDBCCOL public constructor. */ /***********************************************************************/ -JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { } // end of JDBCCOL constructor @@ -1039,54 +987,6 @@ JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) { } // end of JDBCCOL copy constructor -#if 0 -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool JDBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) -{ - if (!(To_Val = value)) { - sprintf(g->Message, MSG(VALUE_ERROR), Name); - return true; - } else if (Buf_Type == value->GetType()) { - // Values are of the (good) column type - if (Buf_Type == TYPE_DATE) { - // If any of the date values is formatted - // output format must be set for the receiving table - if (GetDomain() || ((DTVAL *)value)->IsFormatted()) - goto newval; // This will make a new value; - - } else if (Buf_Type == TYPE_DOUBLE) - // Float values must be written with the correct (column) precision - // Note: maybe this should be forced by ShowValue instead of this ? - value->SetPrec(GetScale()); - - Value = value; // Directly access the external value - } else { - // Values are not of the (good) column type - if (check) { - sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name, - GetTypeName(Buf_Type), GetTypeName(value->GetType())); - return true; - } // endif check - - newval: - if (InitValue(g)) // Allocate the matching value block - return true; - - } // endif's Value, Buf_Type - - // Because Colblk's have been made from a copy of the original TDB in - // case of Update, we must reset them to point to the original one. - if (To_Tdb->GetOrig()) - To_Tdb = (PTDB)To_Tdb->GetOrig(); - - // Set the Column - Status = (ok) ? BUF_EMPTY : BUF_NO; - return false; -} // end of SetBuffer -#endif // 0 - /***********************************************************************/ /* ReadColumn: when SQLFetch is used there is nothing to do as the */ /* column buffer was bind to the record set. This is also the case */ @@ -1196,26 +1096,6 @@ PCMD TDBXJDC::MakeCMD(PGLOBAL g) return xcmd; } // end of MakeCMD -#if 0 -/***********************************************************************/ -/* JDBC Bind Parameter function. */ -/***********************************************************************/ -bool TDBXJDC::BindParameters(PGLOBAL g) -{ - PJDBCCOL colp; - - for (colp = (PJDBCCOL)Columns; colp; colp = (PJDBCCOL)colp->Next) { - colp->AllocateBuffers(g, 0); - - if (Jcp->BindParam(colp)) - return true; - - } // endfor colp - - return false; -} // end of BindParameters -#endif // 0 - /***********************************************************************/ /* XDBC GetMaxSize: returns table size (not always one row). */ /***********************************************************************/ @@ -1332,8 +1212,8 @@ int TDBXJDC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* JSRCCOL public constructor. */ /***********************************************************************/ -JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : JDBCCOL(cdp, tdbp, cprec, i, am) +JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : JDBCCOL(cdp, tdbp, cprec, i, am) { // Set additional JDBC access method information for column. Flag = cdp->GetOffset(); @@ -1416,17 +1296,3 @@ PQRYRES TDBJDBCL::GetResult(PGLOBAL g) { return JDBCColumns(g, Schema, Tab, Colpat, Maxres, false, &Ops); } // end of GetResult - -#if 0 -/* ---------------------------TDBJSRC class -------------------------- */ - -/***********************************************************************/ -/* GetResult: Get the list of JDBC data sources. */ -/***********************************************************************/ -PQRYRES TDBJSRC::GetResult(PGLOBAL g) -{ - return JDBCDataSources(g, Maxres, false); -} // end of GetResult - -/* ------------------------ End of TabJDBC --------------------------- */ -#endif // 0 diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 46d2073e923..d8ec65d02d8 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -1,7 +1,7 @@ /*************** Tabjdbc H Declares Source Code File (.H) **************/ -/* Name: TABJDBC.H Version 1.0 */ +/* Name: TABJDBC.H Version 1.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ /* */ /* This file contains the TDBJDBC classes declares. */ /***********************************************************************/ @@ -14,9 +14,6 @@ typedef class TDBJDBC *PTDBJDBC; typedef class JDBCCOL *PJDBCCOL; typedef class TDBXJDC *PTDBXJDC; typedef class JSRCCOL *PJSRCCOL; -//typedef class TDBOIF *PTDBOIF; -//typedef class OIFCOL *POIFCOL; -//typedef class TDBJSRC *PTDBJSRC; /***********************************************************************/ /* JDBC table. */ @@ -68,20 +65,14 @@ public: // Methods virtual PTDB Clone(PTABS t); -//virtual int GetRecpos(void); virtual bool SetRecpos(PGLOBAL g, int recpos); -//virtual PSZ GetFile(PGLOBAL g); -//virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); -//virtual int GetAffectedRows(void) {return AftRows;} - virtual PSZ GetServer(void) { return "JDBC"; } + virtual PCSZ GetServer(void) { return "JDBC"; } virtual int Indexable(void) { return 2; } // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual int Cardinality(PGLOBAL g); -//virtual int GetMaxSize(PGLOBAL g); -//virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); @@ -91,21 +82,14 @@ public: protected: // Internal functions -//int Decode(char *utf, char *buf, size_t n); -//bool MakeSQL(PGLOBAL g, bool cnt); bool MakeInsert(PGLOBAL g); -//virtual bool MakeCommand(PGLOBAL g); -//bool MakeFilter(PGLOBAL g, bool c); bool SetParameters(PGLOBAL g); -//char *MakeUpdate(PGLOBAL g); -//char *MakeDelete(PGLOBAL g); // Members JDBConn *Jcp; // Points to a JDBC connection class JDBCCOL *Cnp; // Points to count(*) column JDBCPARM Ops; // Additional parameters char *WrapName; // Points to Java wrapper name -//int Ncol; // The column number bool Prepared; // True when using prepared statement bool Werr; // Write error bool Rerr; // Rewind error @@ -119,7 +103,7 @@ class JDBCCOL : public EXTCOL { friend class TDBJDBC; public: // Constructors - JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC"); + JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC"); JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -152,12 +136,6 @@ public: virtual AMT GetAmType(void) {return TYPE_AM_XDBC;} // Methods - //virtual int GetRecpos(void); - //virtual PSZ GetFile(PGLOBAL g); - //virtual void SetFile(PGLOBAL g, PSZ fn); - //virtual void ResetSize(void); - //virtual int GetAffectedRows(void) {return AftRows;} - //virtual PSZ GetServer(void) {return "JDBC";} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -172,7 +150,6 @@ public: protected: // Internal functions PCMD MakeCMD(PGLOBAL g); - //bool BindParameters(PGLOBAL g); // Members PCMD Cmdlist; // The commands to execute @@ -188,7 +165,7 @@ class JSRCCOL : public JDBCCOL { friend class TDBXJDC; public: // Constructors - JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC"); + JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC"); // Implementation virtual int GetAmType(void) {return TYPE_AM_JDBC;} @@ -196,7 +173,7 @@ public: // Methods virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - // void Print(PGLOBAL g, FILE *, uint); + // void Printf(PGLOBAL g, FILE *, uint); protected: // Members @@ -233,9 +210,9 @@ protected: virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Schema; // Points to schema name or NULL - char *Tab; // Points to JDBC table name or pattern - char *Tabtype; // Points to JDBC table type + PCSZ Schema; // Points to schema name or NULL + PCSZ Tab; // Points to JDBC table name or pattern + PCSZ Tabtype; // Points to JDBC table type JDBCPARM Ops; // Additional parameters }; // end of class TDBJTB @@ -252,24 +229,7 @@ protected: virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Colpat; // Points to catalog column pattern + PCSZ Colpat; // Points to catalog column pattern }; // end of class TDBJDBCL -#if 0 -/***********************************************************************/ -/* This is the class declaration for the Data Sources catalog table. */ -/***********************************************************************/ -class TDBJSRC : public TDBJDRV { -public: - // Constructor - TDBJSRC(PJDBCDEF tdp) : TDBJDRV(tdp) {} - -protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); - - // No additional Members -}; // end of class TDBJSRC -#endif // 0 - #endif // !NJDBC diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 1e11d454cfc..063115c7a60 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1,6 +1,6 @@ /************* tabjson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.3 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2016 */ +/* PROGRAM NAME: tabjson Version 1.4 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ @@ -117,7 +117,9 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) return NULL; } // endif Fn - tdp->Database = SetPath(g, db); + if (!(tdp->Database = SetPath(g, db))) + return NULL; + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); @@ -151,7 +153,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) if (tdp->Zipped) { #if defined(ZIP_SUPPORT) - tjnp = new(g)TDBJSN(tdp, new(g)UNZFAM(tdp)); + tjnp = new(g)TDBJSN(tdp, new(g) UNZFAM(tdp)); #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; @@ -168,7 +170,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) G->Sarea_Size = tdp->Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G, G->Sarea, G->Sarea_Size); - G->jump_level = -1; + G->jump_level = 0; tjnp->SetG(G); #else tjnp->SetG(g); @@ -262,8 +264,13 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) break; if (jcp) { - if (jcp->Type != jcol.Type) - jcp->Type = TYPE_STRING; + if (jcp->Type != jcol.Type) { + if (jcp->Type == TYPE_UNKNOWN) + jcp->Type = jcol.Type; + else if (jcol.Type != TYPE_UNKNOWN) + jcp->Type = TYPE_STRING; + + } // endif Type if (*fmt && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { jcp->Fmt = PlugDup(g, fmt); @@ -336,7 +343,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) skipit: if (trace) - htrc("CSVColumns: n=%d len=%d\n", n, length[0]); + htrc("JSONColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ /* Allocate the structures used to refer to the result set. */ @@ -417,7 +424,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) Pretty = GetIntCatInfo("Pretty", 2); Limit = GetIntCatInfo("Limit", 10); Base = GetIntCatInfo("Base", 0) ? 1 : 0; - return DOSDEF::DefineAM(g, "DOS", poff); + return DOSDEF::DefineAM(g, "DOS", poff); } // end of DefineAM /***********************************************************************/ @@ -441,7 +448,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) if (Zipped) { #if defined(ZIP_SUPPORT) - if (m == MODE_READ || m == MODE_UPDATE) { + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { txfp = new(g) ZIPFAM(this); @@ -463,7 +470,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ"); return NULL; #endif // !GZ_SUPPORT - } else if (map) + } else if (map) txfp = new(g) MAPFAM(this); else txfp = new(g) DOSFAM(this); @@ -478,7 +485,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) G->Sarea_Size = Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G, G->Sarea, G->Sarea_Size); - G->jump_level = -1; + G->jump_level = 0; ((TDBJSN*)tdbp)->G = G; #else ((TDBJSN*)tdbp)->G = g; @@ -486,7 +493,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) } else { if (Zipped) { #if defined(ZIP_SUPPORT) - if (m == MODE_READ || m == MODE_UPDATE) { + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); @@ -535,7 +542,7 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) } else { Jmode = MODE_OBJECT; Objname = NULL; - Xcol = NULL; + Xcol = NULL; Limit = 1; Pretty = 0; B = 0; @@ -695,6 +702,9 @@ bool TDBJSN::OpenDB(PGLOBAL g) return true; } // endswitch Jmode + if (Xcol && Txfp->GetAmType() != TYPE_AM_MGO) + To_Filter = NULL; // Imcompatible + } // endif Use return TDBDOS::OpenDB(g); @@ -865,24 +875,21 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) } // end of PrepareWriting - /***********************************************************************/ - /* WriteDB: Data Base write routine for DOS access method. */ - /***********************************************************************/ - int TDBJSN::WriteDB(PGLOBAL g) +/***********************************************************************/ +/* WriteDB: Data Base write routine for DOS access method. */ +/***********************************************************************/ +int TDBJSN::WriteDB(PGLOBAL g) { int rc = TDBDOS::WriteDB(g); #if USE_G - if (rc == RC_FX) - strcpy(g->Message, G->Message); - PlugSubSet(G, G->Sarea, G->Sarea_Size); #endif Row->Clear(); return rc; } // end of WriteDB - /* ---------------------------- JSONCOL ------------------------------ */ +/* ---------------------------- JSONCOL ------------------------------ */ /***********************************************************************/ /* JSONCOL public constructor. */ @@ -1147,11 +1154,61 @@ bool JSONCOL::ParseJpath(PGLOBAL g) } // end of ParseJpath /***********************************************************************/ +/* Get Jpath converted to Mongo path. */ +/***********************************************************************/ +char *JSONCOL::GetJpath(PGLOBAL g, bool proj) +{ + if (Jpath) { + char *p1, *p2, *mgopath; + int i = 0; + + if (strcmp(Jpath, "*")) + mgopath = PlugDup(g, Jpath); + else + return NULL; + + for (p1 = p2 = mgopath; *p1; p1++) + if (i) { // Inside [] + if (isdigit(*p1)) { + if (!proj) + *p2++ = *p1; + + i = 2; + } else if (*p1 == ']' && i == 2) { + if (proj && *(p1 + 1) == ':') + p1++; + + i = 0; + } else if (proj) + i = 2; + else + return NULL; + + } else switch (*p1) { + case ':': *p2++ = '.'; break; + case '[': i = 1; break; + case '*': + if (*(p2 - 1) == '.' && !*(p1 + 1)) { + p2--; // Suppress last :* + break; + } // endif p2 + + default: *p2++ = *p1; break; + } // endswitch p1; + + *p2 = 0; + return mgopath; + } else + return NULL; + +} // end of GetJpath + +/***********************************************************************/ /* MakeJson: Serialize the json item and set value to it. */ /***********************************************************************/ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) - { - if (Value->IsTypeNum()) { +{ + if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); Value->Reset(); } else @@ -1171,7 +1228,8 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) case TYPE_INTG: case TYPE_BINT: case TYPE_DBL: - vp->SetValue_pval(val->GetValue()); + case TYPE_DATE: + vp->SetValue_pval(val->GetValue()); break; case TYPE_BOOL: if (vp->IsTypeNum()) @@ -1190,11 +1248,14 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) // } // endif Type default: - vp->Reset(); - } // endswitch Type + vp->Reset(); + vp->SetNull(true); + } // endswitch Type - } else - vp->Reset(); + } else { + vp->Reset(); + vp->SetNull(true); + } // endif val } // end of SetJsonValue @@ -1207,8 +1268,8 @@ void JSONCOL::ReadColumn(PGLOBAL g) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); // Set null when applicable - if (Nullable) - Value->SetNull(Value->IsZero()); + if (!Nullable) + Value->SetNull(false); } // end of ReadColumn @@ -1289,8 +1350,8 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); - longjmp(g->jumper[g->jump_level], 666); - } // endif jvp + throw 666; + } // endif jvp if (n < Nod - 1 && jvp->GetJson()) { jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1)); @@ -1475,8 +1536,8 @@ void JSONCOL::WriteColumn(PGLOBAL g) { if (Xpd && Tjp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); - longjmp(g->jumper[g->jump_level], 666); - } // endif Xpd + throw 666; + } // endif Xpd /*********************************************************************/ /* Check whether this node must be written. */ @@ -1510,8 +1571,8 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (!(jsp = ParseJson(G, s, (int)strlen(s)))) { strcpy(g->Message, s); - longjmp(g->jumper[g->jump_level], 666); - } // endif jsp + throw 666; + } // endif jsp if (arp) { if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ) @@ -1530,9 +1591,10 @@ void JSONCOL::WriteColumn(PGLOBAL g) break; } // endif Op - // Passthru + // fall through case TYPE_DATE: case TYPE_INT: + case TYPE_TINY: case TYPE_SHORT: case TYPE_BIGINT: case TYPE_DOUBLE: @@ -1860,8 +1922,11 @@ bool TDBJSON::OpenDB(PGLOBAL g) return true; } // endswitch Jmode - Use = USE_OPEN; - return false; + if (Xcol) + To_Filter = NULL; // Imcompatible + + Use = USE_OPEN; + return false; } // end of OpenDB /***********************************************************************/ @@ -1871,7 +1936,7 @@ int TDBJSON::ReadDB(PGLOBAL) { int rc; - N++; + N++; if (NextSame) { SameRow = NextSame; diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 924ce387900..c16cf6846b6 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -1,7 +1,7 @@ /*************** tabjson H Declares Source Code File (.H) **************/ -/* Name: tabjson.h Version 1.1 */ +/* Name: tabjson.h Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ @@ -32,12 +32,12 @@ typedef struct _jnode { /***********************************************************************/ /* JSON table. */ /***********************************************************************/ -class JSONDEF : public DOSDEF { /* Table description */ +class DllExport JSONDEF : public DOSDEF { /* Table description */ friend class TDBJSON; friend class TDBJSN; friend class TDBJCL; friend PQRYRES JSONColumns(PGLOBAL, char*, PTOS, bool); - public: +public: // Constructor JSONDEF(void); @@ -51,12 +51,12 @@ class JSONDEF : public DOSDEF { /* Table description */ protected: // Members JMODE Jmode; /* MODE_OBJECT by default */ - char *Objname; /* Name of first level object */ - char *Xcol; /* Name of expandable column */ + PCSZ Objname; /* Name of first level object */ + PCSZ Xcol; /* Name of expandable column */ int Limit; /* Limit of multiple values */ int Pretty; /* Depends on file structure */ int Level; /* Used for catalog table */ - int Base; /* Tne array index base */ + int Base; /* The array index base */ bool Strict; /* Strict syntax checking */ }; // end of JSONDEF @@ -66,7 +66,7 @@ class JSONDEF : public DOSDEF { /* Table description */ /* This is the JSN Access Method class declaration. */ /* The table is a DOS file, each record being a JSON object. */ /***********************************************************************/ -class TDBJSN : public TDBDOS { +class DllExport TDBJSN : public TDBDOS { friend class JSONCOL; friend class JSONDEF; public: @@ -87,6 +87,8 @@ public: virtual PCOL InsertSpecialColumn(PCOL colp); virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;} + virtual bool CanBeFiltered(void) + {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} // Database routines virtual int Cardinality(PGLOBAL g); @@ -107,8 +109,8 @@ public: PJSON Val; // The value of the current row PJCOL Colp; // The multiple column JMODE Jmode; // MODE_OBJECT by default - char *Objname; // The table object name - char *Xcol; // Name of expandable column + PCSZ Objname; // The table object name + PCSZ Xcol; // Name of expandable column int Fpos; // The current row index int N; // The current Rownum int M; // Index of multiple value @@ -127,9 +129,10 @@ public: /***********************************************************************/ /* Class JSONCOL: JSON access method column descriptor. */ /***********************************************************************/ -class JSONCOL : public DOSCOL { +class DllExport JSONCOL : public DOSCOL { friend class TDBJSN; friend class TDBJSON; + friend class MGOFAM; public: // Constructors JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); @@ -139,20 +142,21 @@ class JSONCOL : public DOSCOL { virtual int GetAmType(void) {return Tjp->GetAmType();} // Methods - virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - bool ParseJpath(PGLOBAL g); - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); + bool ParseJpath(PGLOBAL g); + char *GetJpath(PGLOBAL g, bool proj); + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); protected: - bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); - bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm); - PVAL GetColumnValue(PGLOBAL g, PJSON row, int i); - PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); - PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); - PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); - PJSON GetRow(PGLOBAL g); + bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); + bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm); + PVAL GetColumnValue(PGLOBAL g, PJSON row, int i); + PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); + PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); + PVAL MakeJson(PGLOBAL g, PJSON jsp); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); + PJSON GetRow(PGLOBAL g); // Default constructor not to be used JSONCOL(void) {} @@ -174,7 +178,7 @@ class JSONCOL : public DOSCOL { /***********************************************************************/ /* This is the JSON Access Method class declaration. */ /***********************************************************************/ -class TDBJSON : public TDBJSN { +class DllExport TDBJSON : public TDBJSN { friend class JSONDEF; friend class JSONCOL; public: @@ -221,7 +225,7 @@ class TDBJSON : public TDBJSN { /***********************************************************************/ /* This is the class declaration for the JSON catalog table. */ /***********************************************************************/ -class TDBJCL : public TDBCAT { +class DllExport TDBJCL : public TDBCAT { public: // Constructor TDBJCL(PJDEF tdp); @@ -233,4 +237,5 @@ class TDBJCL : public TDBCAT { // Members PTOS Topt; char *Db; + char *Dsn; }; // end of class TDBJCL diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 916449be6c6..d39837a7b5a 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -47,6 +47,7 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum) To_Orig = NULL; To_Filter = NULL; To_CondFil = NULL; + Cond = NULL; Next = NULL; Name = (tdp) ? tdp->GetName() : NULL; To_Table = NULL; @@ -68,6 +69,7 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) To_Orig = tdbp; To_Filter = NULL; To_CondFil = NULL; + Cond = NULL; Next = NULL; Name = tdbp->Name; To_Table = tdbp->To_Table; @@ -97,7 +99,7 @@ CHARSET_INFO *TDB::data_charset(void) /***********************************************************************/ /* Return the datapath of the DB this table belongs to. */ /***********************************************************************/ -PSZ TDB::GetPath(void) +PCSZ TDB::GetPath(void) { return To_Def->GetPath(); } // end of GetPath @@ -297,7 +299,7 @@ bool TDB::SetRecpos(PGLOBAL g, int) return true; } // end of SetRecpos -void TDB::Print(PGLOBAL g, FILE *f, uint n) +void TDB::Printf(PGLOBAL g, FILE *f, uint n) { PCOL cp; char m[64]; @@ -313,13 +315,13 @@ void TDB::Print(PGLOBAL g, FILE *f, uint n) fprintf(f, "%s Columns (deg=%d):\n", m, tp->Degree); for (cp = tp->Columns; cp; cp = cp->GetNext()) - cp->Print(g, f, n); + cp->Printf(g, f, n); } /* endfor tp */ } // end of Print -void TDB::Print(PGLOBAL, char *ps, uint) +void TDB::Prints(PGLOBAL, char *ps, uint) { sprintf(ps, "R%d.%s", Tdb_No, Name); } // end of Print diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp index bbaba591540..a28b5d7108c 100644 --- a/storage/connect/tabmac.cpp +++ b/storage/connect/tabmac.cpp @@ -329,7 +329,7 @@ void MACCOL::ReadColumn(PGLOBAL g) n = 0; break; default: - p = ""; + p = PlugDup(g, ""); } // endswitch Flag } else switch (Flag) { diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 78adde81d12..5c41f9094ac 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -1,7 +1,7 @@ /************* TabMul C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABMUL */ /* ------------- */ -/* Version 1.8 */ +/* Version 1.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -44,6 +44,11 @@ #define __MFC_COMPAT__ // To define min/max as macro #endif //#include <windows.h> +#if defined(PATHMATCHSPEC) +#include "Shlwapi.h" +//using namespace std; +#pragma comment(lib,"shlwapi.lib") +#endif // PATHMATCHSPEC #else #if defined(UNIX) #include <fnmatch.h> @@ -124,9 +129,10 @@ bool TDBMUL::InitFileNames(PGLOBAL g) { #define PFNZ 4096 #define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT) - char *pfn[PFNZ]; - char *filename; - int rc, n = 0; + PTDBDIR dirp; + PSZ pfn[PFNZ]; + PSZ filename; + int rc, n = 0; if (trace) htrc("in InitFileName: fn[]=%d\n", FNSZ); @@ -141,115 +147,39 @@ bool TDBMUL::InitFileNames(PGLOBAL g) if (trace) htrc("InitFileName: fn='%s'\n", filename); - if (Mul == 1) { + if (Mul != 2) { /*******************************************************************/ /* To_File is a multiple name with special characters */ /*******************************************************************/ -#if defined(__WIN__) - char drive[_MAX_DRIVE], direc[_MAX_DIR]; - WIN32_FIND_DATA FileData; - HANDLE hSearch; - - _splitpath(filename, drive, direc, NULL, NULL); - - // Start searching files in the target directory. - hSearch = FindFirstFile(filename, &FileData); - - if (hSearch == INVALID_HANDLE_VALUE) { - rc = GetLastError(); - - if (rc != ERROR_FILE_NOT_FOUND) { - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, GetLastError(), 0, - (LPTSTR)&filename, sizeof(filename), NULL); - sprintf(g->Message, MSG(BAD_FILE_HANDLE), filename); - return true; - } // endif rc - - goto suite; - } // endif hSearch + if (Mul == 1) + dirp = new(g) TDBDIR(PlugDup(g, filename)); + else // Mul == 3 (Subdir) + dirp = new(g) TDBSDR(PlugDup(g, filename)); - while (n < PFNZ) { - if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { - strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName); - pfn[n++] = PlugDup(g, filename); - } // endif dwFileAttributes - - if (!FindNextFile(hSearch, &FileData)) { - rc = GetLastError(); - - if (rc != ERROR_NO_MORE_FILES) { - sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); - FindClose(hSearch); - return true; - } // endif rc - - break; - } // endif FindNextFile - - } // endwhile n + if (dirp->OpenDB(g)) + return true; - // Close the search handle. - if (!FindClose(hSearch)) { - strcpy(g->Message, MSG(SRCH_CLOSE_ERR)); - return true; - } // endif FindClose + if (trace && Mul == 3) { + int nf = ((PTDBSDR)dirp)->FindInDir(g); + htrc("Number of files = %d\n", nf); + } // endif trace + while (true) + if ((rc = dirp->ReadDB(g)) == RC_OK) { +#if defined(__WIN__) + strcat(strcpy(filename, dirp->Drive), dirp->Direc); #else // !__WIN__ - struct stat fileinfo; - char fn[FN_REFLEN], direc[FN_REFLEN], pattern[FN_HEADLEN], ftype[FN_EXTLEN]; - DIR *dir; - struct dirent *entry; - - _splitpath(filename, NULL, direc, pattern, ftype); - strcat(pattern, ftype); - - if (trace) - htrc("direc=%s pattern=%s ftype=%s\n", direc, pattern, ftype); - - // Start searching files in the target directory. - if (!(dir = opendir(direc))) { - sprintf(g->Message, MSG(BAD_DIRECTORY), direc, strerror(errno)); - - if (trace) - htrc("%s\n", g->Message); - - return true; - } // endif dir - - if (trace) - htrc("dir opened: reading files\n"); - - while ((entry = readdir(dir)) && n < PFNZ) { - strcat(strcpy(fn, direc), entry->d_name); - - if (trace) - htrc("%s read\n", fn); - - if (lstat(fn, &fileinfo) < 0) { - sprintf(g->Message, "%s: %s", fn, strerror(errno)); - return true; - } else if (!S_ISREG(fileinfo.st_mode)) - continue; // Not a regular file (should test for links) - - /*******************************************************************/ - /* Test whether the file name matches the table name filter. */ - /*******************************************************************/ - if (fnmatch(pattern, entry->d_name, 0)) - continue; // Not a match - - strcat(strcpy(filename, direc), entry->d_name); - pfn[n++] = PlugDup(g, filename); - - if (trace) - htrc("Adding pfn[%d] %s\n", n, filename); + strcpy(filename, dirp->Direc); +#endif // !__WIN__ + strcat(strcat(filename, dirp->Fname), dirp->Ftype); + pfn[n++] = PlugDup(g, filename); + } else + break; - } // endwhile readdir + dirp->CloseDB(g); - // Close the dir handle. - closedir(dir); -#endif // !__WIN__ + if (rc == RC_FX) + return true; } else { /*******************************************************************/ @@ -297,10 +227,6 @@ bool TDBMUL::InitFileNames(PGLOBAL g) } // endif Mul -#if defined(__WIN__) - suite: -#endif - if (n) { Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*)); @@ -581,7 +507,95 @@ void TDBMUL::CloseDB(PGLOBAL g) } // end of CloseDB -/* --------------------------- Class DIRDEF -------------------------- */ +#if 0 +/* ------------------------- Class TDBMSD ---------------------------- */ + + // Method +PTDB TDBMSD::Clone(PTABS t) +{ + PTDBMSD tp; + PGLOBAL g = t->G; // Is this really useful ??? + + tp = new(g) TDBMSD(this); + tp->Tdbp = Tdbp->Clone(t); + tp->Columns = tp->Tdbp->GetColumns(); + return tp; +} // end of Clone + +PTDB TDBMSD::Duplicate(PGLOBAL g) +{ + PTDBMSD tmup = new(g) TDBMSD(this); + + tmup->Tdbp = Tdbp->Duplicate(g); + return tmup; +} // end of Duplicate + +/***********************************************************************/ +/* Initializes the table filename list. */ +/* Note: tables created by concatenating the file components without */ +/* specifying the LRECL value (that should be restricted to _MAX_PATH)*/ +/* have a LRECL that is the sum of the lengths of all components. */ +/* This is why we use a big filename array to take care of that. */ +/***********************************************************************/ +bool TDBMSD::InitFileNames(PGLOBAL g) +{ +#define PFNZ 4096 +#define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT) + PTDBSDR dirp; + PSZ pfn[PFNZ]; + PSZ filename; + int rc, n = 0; + + if (trace) + htrc("in InitFileName: fn[]=%d\n", FNSZ); + + filename = (char*)PlugSubAlloc(g, NULL, FNSZ); + + // The sub table may need to refer to the Table original block + Tdbp->SetTable(To_Table); // Was not set at construction + + PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); + + if (trace) + htrc("InitFileName: fn='%s'\n", filename); + + dirp = new(g) TDBSDR(filename); + + if (dirp->OpenDB(g)) + return true; + + while (true) + if ((rc = dirp->ReadDB(g)) == RC_OK) { +#if defined(__WIN__) + strcat(strcpy(filename, dirp->Drive), dirp->Direc); +#else // !__WIN__ + strcpy(filename, dirp->Direc); +#endif // !__WIN__ + strcat(strcat(filename, dirp->Fname), dirp->Ftype); + pfn[n++] = PlugDup(g, filename); + } else + break; + + if (rc == RC_FX) + return true; + + if (n) { + Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*)); + + for (int i = 0; i < n; i++) + Filenames[i] = pfn[i]; + + } else { + Filenames = (char**)PlugSubAlloc(g, NULL, sizeof(char*)); + Filenames[0] = NULL; + } // endif n + + NumFiles = n; + return false; +} // end of InitFileNames +#endif // 0 + + /* --------------------------- Class DIRDEF -------------------------- */ /***********************************************************************/ /* DefineAM: define specific AM block values from XDB file. */ @@ -589,8 +603,9 @@ void TDBMUL::CloseDB(PGLOBAL g) bool DIRDEF::DefineAM(PGLOBAL g, LPCSTR, int) { Desc = Fn = GetStringCatInfo(g, "Filename", NULL); - Incl = (GetIntCatInfo("Subdir", 0) != 0); - Huge = (GetIntCatInfo("Huge", 0) != 0); + Incl = GetBoolCatInfo("Subdir", false); + Huge = GetBoolCatInfo("Huge", false); + Nodir = GetBoolCatInfo("Nodir", true); return false; } // end of DefineAM @@ -616,57 +631,40 @@ PTDB DIRDEF::GetTable(PGLOBAL g, MODE) /***********************************************************************/ /* TABDIR constructors. */ /***********************************************************************/ -TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp) - { - To_File = tdp->Fn; - iFile = 0; +void TDBDIR::Init(void) +{ + iFile = 0; #if defined(__WIN__) - memset(&FileData, 0, sizeof(_finddata_t)); - Hsearch = -1; - *Drive = '\0'; -#else // !__WIN__ - memset(&Fileinfo, 0, sizeof(struct stat)); - Entry = NULL; - Dir = NULL; - Done = false; - *Pattern = '\0'; -#endif // !__WIN__ - *Fpath = '\0'; - *Direc = '\0'; - *Fname = '\0'; - *Ftype = '\0'; - } // end of TDBDIR standard constructor - -TDBDIR::TDBDIR(PTDBDIR tdbp) : TDBASE(tdbp) - { - To_File = tdbp->To_File; - iFile = tdbp->iFile; -#if defined(__WIN__) - FileData = tdbp->FileData; - Hsearch = tdbp->Hsearch; - strcpy(Drive, tdbp->Drive); + Dvalp = NULL; + memset(&FileData, 0, sizeof(_finddata_t)); + hSearch = INVALID_HANDLE_VALUE; + *Drive = '\0'; #else // !__WIN__ - Fileinfo = tdbp->Fileinfo; - Entry = tdbp->Entry; - Dir = tdbp->Dir; - Done = tdbp->Done; - strcpy(Pattern, tdbp->Pattern); + memset(&Fileinfo, 0, sizeof(struct stat)); + Entry = NULL; + Dir = NULL; + Done = false; + *Pattern = '\0'; #endif // !__WIN__ - strcpy(Direc, tdbp->Direc); - strcpy(Fname, tdbp->Fname); - strcpy(Ftype, tdbp->Ftype); - } // end of TDBDIR copy constructor + *Fpath = '\0'; + *Direc = '\0'; + *Fname = '\0'; + *Ftype = '\0'; +} // end of Init -// Method -PTDB TDBDIR::Clone(PTABS t) - { - PTDB tp; - PGLOBAL g = t->G; // Is this really useful ??? +TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp) +{ + To_File = tdp->Fn; + Nodir = tdp->Nodir; + Init(); +} // end of TDBDIR standard constructor - tp = new(g) TDBDIR(this); - tp->SetColumns(Columns); - return tp; - } // end of Clone +TDBDIR::TDBDIR(PSZ fpat) : TDBASE((PTABDEF)NULL) +{ + To_File = fpat; + Nodir = true; + Init(); +} // end of TDBDIR constructor /***********************************************************************/ /* Initialize/get the components of the search file pattern. */ @@ -674,18 +672,19 @@ PTDB TDBDIR::Clone(PTABS t) char* TDBDIR::Path(PGLOBAL g) { PCATLG cat = PlgGetCatalog(g); + PTABDEF defp = (PTABDEF)To_Def; #if defined(__WIN__) if (!*Drive) { - PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); + PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL); _splitpath(Fpath, Drive, Direc, Fname, Ftype); } else - _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull ??? + _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull for TDBSDR return Fpath; #else // !__WIN__ if (!Done) { - PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); + PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL); _splitpath(Fpath, NULL, Direc, Fname, Ftype); strcat(strcpy(Pattern, Fname), Ftype); Done = true; @@ -709,23 +708,48 @@ PCOL TDBDIR::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) int TDBDIR::GetMaxSize(PGLOBAL g) { if (MaxSize < 0) { - int n = -1; + int rc, n = -1; #if defined(__WIN__) - int h; // Start searching files in the target directory. - h = _findfirst(Path(g), &FileData); + hSearch = FindFirstFile(Path(g), &FileData); - if (h != -1) { - for (n = 1;; n++) - if (_findnext(h, &FileData)) - break; + if (hSearch == INVALID_HANDLE_VALUE) { + rc = GetLastError(); - // Close the search handle. - _findclose(h); - } else - n = 0; + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; + + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc + + return 0; + } // endif hSearch + + while (true) { + if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) + n++; + + if (!FindNextFile(hSearch, &FileData)) { + rc = GetLastError(); + + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(hSearch); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile + // Close the search handle. + FindClose(hSearch); #else // !__WIN__ Path(g); @@ -791,30 +815,35 @@ int TDBDIR::ReadDB(PGLOBAL g) int rc = RC_OK; #if defined(__WIN__) - if (Hsearch == -1) { - /*******************************************************************/ - /* Start searching files in the target directory. The use of the */ - /* Path function is required when called from TDBSDR. */ - /*******************************************************************/ - Hsearch = _findfirst(Path(g), &FileData); - - if (Hsearch == -1) - rc = RC_EF; - else - iFile++; - - } else { - if (_findnext(Hsearch, &FileData)) { - // Restore file name and type pattern - _splitpath(To_File, NULL, NULL, Fname, Ftype); - rc = RC_EF; - } else - iFile++; - - } // endif Hsearch + do { + if (hSearch == INVALID_HANDLE_VALUE) { + /*****************************************************************/ + /* Start searching files in the target directory. The use of */ + /* the Path function is required when called from TDBSDR. */ + /*****************************************************************/ + hSearch = FindFirstFile(Path(g), &FileData); + + if (hSearch == INVALID_HANDLE_VALUE) { + rc = RC_EF; + break; + } else + iFile++; + + } else { + if (!FindNextFile(hSearch, &FileData)) { + // Restore file name and type pattern + _splitpath(To_File, NULL, NULL, Fname, Ftype); + rc = RC_EF; + break; + } else + iFile++; + + } // endif hSearch + + } while (Nodir && FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY); if (rc == RC_OK) - _splitpath(FileData.name, NULL, NULL, Fname, Ftype); + _splitpath(FileData.cFileName, NULL, NULL, Fname, Ftype); #else // !Win32 rc = RC_NF; @@ -878,8 +907,8 @@ void TDBDIR::CloseDB(PGLOBAL) { #if defined(__WIN__) // Close the search handle. - _findclose(Hsearch); - Hsearch = -1; + FindClose(hSearch); + hSearch = INVALID_HANDLE_VALUE; #else // !__WIN__ // Close the DIR handle if (Dir) { @@ -895,7 +924,7 @@ void TDBDIR::CloseDB(PGLOBAL) /***********************************************************************/ /* DIRCOL public constructor. */ /***********************************************************************/ -DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -907,6 +936,7 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) } // endif cprec // Set additional DIR access method information for column. + Tdbp = (PTDBDIR)tdbp; N = cdp->GetOffset(); } // end of DIRCOL constructor @@ -916,75 +946,84 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) /***********************************************************************/ DIRCOL::DIRCOL(DIRCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) { - N = col1->N; + Tdbp = (PTDBDIR)tdbp; + N = col1->N; } // end of DIRCOL copy constructor +#if defined(__WIN__) +/***********************************************************************/ +/* Retrieve time information from FileData. */ +/***********************************************************************/ +void DIRCOL::SetTimeValue(PGLOBAL g, FILETIME& ftime) +{ + char tsp[24]; + SYSTEMTIME stp; + + if (FileTimeToSystemTime(&ftime, &stp)) { + sprintf(tsp, "%04d-%02d-%02d %02d:%02d:%02d", + stp.wYear, stp.wMonth, stp.wDay, stp.wHour, stp.wMinute, stp.wSecond); + + if (Value->GetType() != TYPE_STRING) { + if (!Tdbp->Dvalp) + Tdbp->Dvalp = AllocateValue(g, TYPE_DATE, 20, 0, false, + "YYYY-MM-DD hh:mm:ss"); + + Tdbp->Dvalp->SetValue_psz(tsp); + Value->SetValue_pval(Tdbp->Dvalp); + } else + Value->SetValue_psz(tsp); + + } else + Value->Reset(); + +} // end of SetTimeValue +#endif // __WIN__ + /***********************************************************************/ /* ReadColumn: what this routine does is to access the information */ /* corresponding to this column and convert it to buffer type. */ /***********************************************************************/ void DIRCOL::ReadColumn(PGLOBAL g) - { - PTDBDIR tdbp = (PTDBDIR)To_Tdb; - + { if (trace) htrc("DIR ReadColumn: col %s R%d use=%.4X status=%.4X type=%d N=%d\n", - Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); + Name, Tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); /*********************************************************************/ /* Retrieve the information corresponding to the column number. */ /*********************************************************************/ switch (N) { #if defined(__WIN__) - case 0: Value->SetValue_psz(tdbp->Drive); break; + case 0: Value->SetValue_psz(Tdbp->Drive); break; #endif // __WIN__ - case 1: Value->SetValue_psz(tdbp->Direc); break; - case 2: Value->SetValue_psz(tdbp->Fname); break; - case 3: Value->SetValue_psz(tdbp->Ftype); break; + case 1: Value->SetValue_psz(Tdbp->Direc); break; + case 2: Value->SetValue_psz(Tdbp->Fname); break; + case 3: Value->SetValue_psz(Tdbp->Ftype); break; #if defined(__WIN__) - case 4: Value->SetValue((int)tdbp->FileData.attrib); break; - case 5: Value->SetValue((int)tdbp->FileData.size); break; - case 6: Value->SetValue((int)tdbp->FileData.time_write); break; - case 7: Value->SetValue((int)tdbp->FileData.time_create); break; - case 8: Value->SetValue((int)tdbp->FileData.time_access); break; + case 4: Value->SetValue((int)Tdbp->FileData.dwFileAttributes); break; + case 5: Value->SetValue((int)Tdbp->FileData.nFileSizeLow); break; + case 6: SetTimeValue(g, Tdbp->FileData.ftLastWriteTime); break; + case 7: SetTimeValue(g, Tdbp->FileData.ftCreationTime); break; + case 8: SetTimeValue(g, Tdbp->FileData.ftLastAccessTime); break; #else // !__WIN__ - case 4: Value->SetValue((int)tdbp->Fileinfo.st_mode); break; - case 5: Value->SetValue((int)tdbp->Fileinfo.st_size); break; - case 6: Value->SetValue((int)tdbp->Fileinfo.st_mtime); break; - case 7: Value->SetValue((int)tdbp->Fileinfo.st_ctime); break; - case 8: Value->SetValue((int)tdbp->Fileinfo.st_atime); break; - case 9: Value->SetValue((int)tdbp->Fileinfo.st_uid); break; - case 10: Value->SetValue((int)tdbp->Fileinfo.st_gid); break; + case 4: Value->SetValue((int)Tdbp->Fileinfo.st_mode); break; + case 5: Value->SetValue((int)Tdbp->Fileinfo.st_size); break; + case 6: Value->SetValue((int)Tdbp->Fileinfo.st_mtime); break; + case 7: Value->SetValue((int)Tdbp->Fileinfo.st_ctime); break; + case 8: Value->SetValue((int)Tdbp->Fileinfo.st_atime); break; + case 9: Value->SetValue((int)Tdbp->Fileinfo.st_uid); break; + case 10: Value->SetValue((int)Tdbp->Fileinfo.st_gid); break; #endif // !__WIN__ default: sprintf(g->Message, MSG(INV_DIRCOL_OFST), N); - longjmp(g->jumper[g->jump_level], GetAmType()); - } // endswitch N + throw GetAmType(); + } // endswitch N } // end of ReadColumn /* ------------------------- Class TDBSDR ---------------------------- */ /***********************************************************************/ -/* TABSDR copy constructors. */ -/***********************************************************************/ -TDBSDR::TDBSDR(PTDBSDR tdbp) : TDBDIR(tdbp) - { - Sub = tdbp->Sub; - } // end of TDBSDR copy constructor - -// Method -PTDB TDBSDR::Clone(PTABS t) - { - PTDB tp; - PGLOBAL g = t->G; // Is this really useful ??? - - tp = new(g) TDBSDR(this); - tp->SetColumns(Columns); - return tp; - } // end of Clone - -/***********************************************************************/ /* SDR GetMaxSize: returns the number of retrieved files. */ /***********************************************************************/ int TDBSDR::GetMaxSize(PGLOBAL g) @@ -998,47 +1037,124 @@ int TDBSDR::GetMaxSize(PGLOBAL g) } // end of GetMaxSize /***********************************************************************/ -/* SDR GetMaxSize: returns the number of retrieved files. */ +/* SDR FindInDir: returns the number of retrieved files. */ /***********************************************************************/ int TDBSDR::FindInDir(PGLOBAL g) { - int n = 0; + int rc, n = 0; size_t m = strlen(Direc); // Start searching files in the target directory. #if defined(__WIN__) - int h = _findfirst(Path(g), &FileData); + HANDLE h; - if (h != -1) { - for (n = 1;; n++) - if (_findnext(h, &FileData)) - break; +#if defined(PATHMATCHSPEC) + if (!*Drive) + Path(g); - // Close the search handle. - _findclose(h); - } // endif h + _makepath(Fpath, Drive, Direc, "*", "*"); - // Now search files in sub-directories. - _makepath(Fpath, Drive, Direc, "*", ""); - h = _findfirst(Fpath, &FileData); + h = FindFirstFile(Fpath, &FileData); - if (h != -1) { - while (true) { - if (FileData.attrib & _A_SUBDIR && *FileData.name != '.') { - // Look in the name sub-directory - strcat(strcat(Direc, FileData.name), "\\"); - n += FindInDir(g); - Direc[m] = '\0'; // Restore path - } // endif SUBDIR + if (h == INVALID_HANDLE_VALUE) { + rc = GetLastError(); - if (_findnext(h, &FileData)) - break; + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; - } // endwhile + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc - // Close the search handle. - _findclose(h); - } // endif h + return 0; + } // endif h + + while (true) { + if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && + *FileData.cFileName != '.') { + // Look in the name sub-directory + strcat(strcat(Direc, FileData.cFileName), "/"); + n += FindInDir(g); + Direc[m] = '\0'; // Restore path + } else if (PathMatchSpec(FileData.cFileName, Fpath)) + n++; + + if (!FindNextFile(h, &FileData)) { + rc = GetLastError(); + + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(h); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile +#else // !PATHMATCHSPEC + h = FindFirstFile(Path(g), &FileData); + + if (h == INVALID_HANDLE_VALUE) { + rc = GetLastError(); + + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; + + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc + + return 0; + } // endif hSearch + + while (true) { + n++; + + if (!FindNextFile(h, &FileData)) { + rc = GetLastError(); + + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(h); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile + + // Now search files in sub-directories. + _makepath(Fpath, Drive, Direc, "*", "."); + h = FindFirstFile(Fpath, &FileData); + + if (h != INVALID_HANDLE_VALUE) { + while (true) { + if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && + *FileData.cFileName != '.') { + // Look in the name sub-directory + strcat(strcat(Direc, FileData.cFileName), "/"); + n += FindInDir(g); + Direc[m] = '\0'; // Restore path + } // endif SUBDIR + + if (!FindNextFile(h, &FileData)) + break; + + } // endwhile + + } // endif h +#endif // !PATHMATCHSPEC + + // Close the search handle. + FindClose(h); #else // !__WIN__ int k; DIR *dir = opendir(Direc); @@ -1090,7 +1206,7 @@ bool TDBSDR::OpenDB(PGLOBAL g) Sub->Next = NULL; Sub->Prev = NULL; #if defined(__WIN__) - Sub->H = -1; + Sub->H = INVALID_HANDLE_VALUE; Sub->Len = strlen(Direc); #else // !__WIN__ Sub->D = NULL; @@ -1116,18 +1232,22 @@ int TDBSDR::ReadDB(PGLOBAL g) // Are there more files in sub-directories retry: do { - if (Sub->H == -1) { - _makepath(Fpath, Drive, Direc, "*", ""); - Sub->H = _findfirst(Fpath, &FileData); - } else if (_findnext(Sub->H, &FileData)) { - _findclose(Sub->H); - Sub->H = -1; - *FileData.name = '\0'; - } // endif findnext - - } while(*FileData.name == '.'); - - if (Sub->H == -1) { + if (Sub->H == INVALID_HANDLE_VALUE) { +// _makepath(Fpath, Drive, Direc, "*", "."); why was this made? + _makepath(Fpath, Drive, Direc, "*", NULL); + Sub->H = FindFirstFile(Fpath, &FileData); + } else if (!FindNextFile(Sub->H, &FileData)) { + FindClose(Sub->H); + Sub->H = INVALID_HANDLE_VALUE; + *FileData.cFileName= '\0'; + break; + } // endif findnext + + } while(!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) || + (*FileData.cFileName == '.' && + (!FileData.cFileName[1] || FileData.cFileName[1] == '.'))); + + if (Sub->H == INVALID_HANDLE_VALUE) { // No more sub-directories. Are we in a sub-directory? if (!Sub->Prev) return rc; // No, all is finished @@ -1145,17 +1265,17 @@ int TDBSDR::ReadDB(PGLOBAL g) sup = (PSUBDIR)PlugSubAlloc(g, NULL, sizeof(SUBDIR)); sup->Next = NULL; sup->Prev = Sub; - sup->H = -1; + sup->H = INVALID_HANDLE_VALUE; Sub->Next = sup; } // endif Next Sub = Sub->Next; - strcat(strcat(Direc, FileData.name), "\\"); + strcat(strcat(Direc, FileData.cFileName), "/"); Sub->Len = strlen(Direc); // Reset Hsearch used by TDBDIR::ReadDB - _findclose(Hsearch); - Hsearch = -1; + FindClose(hSearch); + hSearch = INVALID_HANDLE_VALUE; goto again; } // endif H @@ -1179,7 +1299,8 @@ int TDBSDR::ReadDB(PGLOBAL g) if (lstat(Fpath, &Fileinfo) < 0) { sprintf(g->Message, "%s: %s", Fpath, strerror(errno)); rc = RC_FX; - } else if (S_ISDIR(Fileinfo.st_mode) && *Entry->d_name != '.') { + } else if (S_ISDIR(Fileinfo.st_mode) && strcmp(Entry->d_name, ".") + && strcmp(Entry->d_name, "..")) { // Look in the name sub-directory if (!Sub->Next) { PSUBDIR sup; @@ -1423,8 +1544,8 @@ void TDBDHR::CloseDB(PGLOBAL g) // Close the search handle. if (!FindClose(Hsearch)) { strcpy(g->Message, MSG(SRCH_CLOSE_ERR)); - longjmp(g->jumper[g->jump_level], GetAmType()); - } // endif FindClose + throw GetAmType(); + } // endif FindClose iFile = 0; Hsearch = INVALID_HANDLE_VALUE; @@ -1435,8 +1556,8 @@ void TDBDHR::CloseDB(PGLOBAL g) /***********************************************************************/ /* DHRCOL public constructor. */ /***********************************************************************/ -DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h index 51fa7f9000a..8a95a772c41 100644 --- a/storage/connect/tabmul.h +++ b/storage/connect/tabmul.h @@ -39,7 +39,7 @@ class DllExport TDBMUL : public TDBASE { virtual void ResetDB(void); virtual PTDB Clone(PTABS t); virtual bool IsSame(PTDB tp) {return tp == (PTDB)Tdbp;} - virtual PSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);} + virtual PCSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);} virtual int GetRecpos(void) {return 0;} virtual PCOL ColDB(PGLOBAL g, PSZ name, int num); bool InitFileNames(PGLOBAL g); @@ -69,6 +69,34 @@ class DllExport TDBMUL : public TDBASE { int iFile; // Index of currently processed file }; // end of class TDBMUL +#if 0 +/***********************************************************************/ +/* This is the MSD Access Method class declaration for files that are */ +/* physically split in multiple files having the same format. */ +/* This sub-class also include files of the sub-directories. */ +/***********************************************************************/ +class DllExport TDBMSD : public TDBMUL { + //friend class MULCOL; +public: + // Constructor + TDBMSD(PTDB tdbp) : TDBMUL(tdbp) {} + TDBMSD(PTDBMSD tdbp) : TDBMUL(tdbp) {} + + // Implementation + virtual PTDB Duplicate(PGLOBAL g); + + // Methods + virtual PTDB Clone(PTABS t); + bool InitFileNames(PGLOBAL g); + + // Database routines + +protected: + + // Members +}; // end of class TDBMSD +#endif + /***********************************************************************/ /* Directory listing table. */ /***********************************************************************/ @@ -90,7 +118,8 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */ // Members PSZ Fn; /* Path/Name of file search */ bool Incl; /* true to include sub-directories */ - bool Huge; /* true if files can be larger than 2GB */ + bool Huge; /* true if files can be larger than 2GB */ + bool Nodir; /* true to exclude directories */ }; // end of DIRDEF /***********************************************************************/ @@ -101,18 +130,16 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */ /***********************************************************************/ class TDBDIR : public TDBASE { friend class DIRCOL; - public: + friend class TDBMUL; +public: // Constructor TDBDIR(PDIRDEF tdp); - TDBDIR(PTDBDIR tdbp); + TDBDIR(PSZ fpat); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_DIR;} - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBDIR(this);} // Methods - virtual PTDB Clone(PTABS t); virtual int GetRecpos(void) {return iFile;} // Database routines @@ -127,14 +154,16 @@ class TDBDIR : public TDBASE { virtual void CloseDB(PGLOBAL g); protected: + void Init(void); char *Path(PGLOBAL g); // Members PSZ To_File; // Points to file search pathname int iFile; // Index of currently retrieved file #if defined(__WIN__) - _finddata_t FileData; // Find data structure - intptr_t Hsearch; // Search handle + PVAL Dvalp; // Used to retrieve file date values + WIN32_FIND_DATA FileData; // Find data structure + HANDLE hSearch; // Search handle char Drive[_MAX_DRIVE]; // Drive name #else // !__WIN__ struct stat Fileinfo; // File info structure @@ -147,6 +176,7 @@ class TDBDIR : public TDBASE { char Direc[_MAX_DIR]; // Search path char Fname[_MAX_FNAME]; // File name char Ftype[_MAX_EXT]; // File extention + bool Nodir; // Exclude directories from file list }; // end of class TDBDIR /***********************************************************************/ @@ -158,17 +188,11 @@ class TDBDIR : public TDBASE { /***********************************************************************/ class TDBSDR : public TDBDIR { friend class DIRCOL; + friend class TDBMUL; public: // Constructors TDBSDR(PDIRDEF tdp) : TDBDIR(tdp) {Sub = NULL;} - TDBSDR(PTDBSDR tdbp); - - // Implementation - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBSDR(this);} - - // Methods - virtual PTDB Clone(PTABS t); + TDBSDR(PSZ fpat) : TDBDIR(fpat) {Sub = NULL;} // Database routines virtual int GetMaxSize(PGLOBAL g); @@ -184,7 +208,7 @@ class TDBSDR : public TDBDIR { struct _Sub_Dir *Next; struct _Sub_Dir *Prev; #if defined(__WIN__) - intptr_t H; // Search handle + HANDLE H; // Search handle #else // !__WIN__ DIR *D; #endif // !__WIN__ @@ -202,7 +226,7 @@ class TDBSDR : public TDBDIR { class DIRCOL : public COLBLK { public: // Constructors - DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "DIR"); + DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "DIR"); DIRCOL(DIRCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -214,7 +238,11 @@ class DIRCOL : public COLBLK { protected: // Default constructor not to be used DIRCOL(void) {} +#if defined(__WIN__) + void SetTimeValue(PGLOBAL g, FILETIME& ftime); +#endif // __WIN__ // Members + PTDBDIR Tdbp; // To DIR table int N; // Column number }; // end of class DIRCOL diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index 1a715819fc8..bdddcf64ca8 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -68,8 +68,8 @@ void PrintResult(PGLOBAL, PSEM, PQRYRES); #endif // _CONSOLE // Used to check whether a MYSQL table is created on itself -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db, + PCSZ tab, PCSZ src, int port); /***********************************************************************/ /* External function. */ @@ -125,7 +125,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) } // endif server DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (long unsigned int) server)); + (size_t) server)); // TODO: We need to examine which of these can really be NULL Hostname = PlugDup(g, server->host); @@ -183,19 +183,22 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) /***********************************************************************/ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) { + char *tabn, *pwd, *schema; + if ((!strstr(url, "://") && (!strchr(url, '@')))) { // No :// or @ in connection string. Must be a straight // connection name of either "server" or "server/table" // ok, so we do a little parsing, but not completely! - if ((Tabname= strchr(url, '/'))) { + if ((tabn= strchr(url, '/'))) { // If there is a single '/' in the connection string, // this means the user is specifying a table name - *Tabname++= '\0'; + *tabn++= '\0'; // there better not be any more '/'s ! - if (strchr(Tabname, '/')) + if (strchr(tabn, '/')) return true; + Tabname = tabn; } else // Otherwise, straight server name, Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL; @@ -223,7 +226,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) Username += 3; - if (!(Hostname = strchr(Username, '@'))) { + if (!(Hostname = (char*)strchr(Username, '@'))) { strcpy(g->Message, "No host specified in URL"); return true; } else { @@ -231,11 +234,11 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) Server = Hostname; } // endif Hostname - if ((Password = strchr(Username, ':'))) { - *Password++ = 0; // End username + if ((pwd = (char*)strchr(Username, ':'))) { + *pwd++ = 0; // End username - // Make sure there isn't an extra / or @ - if ((strchr(Password, '/') || strchr(Hostname, '@'))) { + // Make sure there isn't an extra / + if (strchr(pwd, '/')) { strcpy(g->Message, "Syntax error in URL"); return true; } // endif @@ -243,8 +246,10 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) // Found that if the string is: // user:@hostname:port/db/table // Then password is a null string, so set to NULL - if ((Password[0] == 0)) - Password = NULL; + if ((pwd[0] == 0)) + Password = NULL; + else + Password = pwd; } // endif password @@ -254,21 +259,23 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) return true; } // endif - if ((Tabschema = strchr(Hostname, '/'))) { - *Tabschema++ = 0; + if ((schema = strchr(Hostname, '/'))) { + *schema++ = 0; - if ((Tabname = strchr(Tabschema, '/'))) { - *Tabname++ = 0; + if ((tabn = strchr(schema, '/'))) { + *tabn++ = 0; // Make sure there's not an extra / - if ((strchr(Tabname, '/'))) { + if ((strchr(tabn, '/'))) { strcpy(g->Message, "Syntax error in URL"); return true; } // endif / + Tabname = tabn; } // endif TableName - } // endif database + Tabschema = schema; + } // endif database if ((sport = strchr(Hostname, ':'))) *sport++ = 0; @@ -349,7 +356,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int) Portnumber = GetIntCatInfo("Port", GetDefaultPort()); Server = Hostname; } else { - char *locdb = Tabschema; + PCSZ locdb = Tabschema; if (ParseURL(g, url)) return true; @@ -495,11 +502,11 @@ PCOL TDBMYSQL::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /* filter should be removed from column list. */ /***********************************************************************/ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) - { +{ //char *tk = "`"; char tk = '`'; int len = 0, rank = 0; - bool b = false, oom = false; + bool b = false; PCOL colp; //PDBUSER dup = PlgGetUser(g); @@ -526,13 +533,13 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) for (colp = Columns; colp; colp = colp->GetNext()) if (!colp->IsSpecial()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; - oom |= Query->Append(tk); - oom |= Query->Append(colp->GetName()); - oom |= Query->Append(tk); + Query->Append(tk); + Query->Append(colp->GetName()); + Query->Append(tk); ((PMYCOL)colp)->Rank = rank++; } // endif colp @@ -542,22 +549,22 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) // Query '*' from... // (the use of a char constant minimize the result storage) if (Isview) - oom |= Query->Append('*'); + Query->Append('*'); else - oom |= Query->Append("'*'"); + Query->Append("'*'"); } // endif ncol - oom |= Query->Append(" FROM "); - oom |= Query->Append(tk); - oom |= Query->Append(TableName); - oom |= Query->Append(tk); + Query->Append(" FROM "); + Query->Append(tk); + Query->Append(TableName); + Query->Append(tk); len = Query->GetLength(); if (To_CondFil) { if (!mx) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); + Query->Append(" WHERE "); + Query->Append(To_CondFil->Body); len = Query->GetLength() + 1; } else len += (strlen(To_CondFil->Body) + 256); @@ -565,25 +572,25 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) } else len += (mx ? 256 : 1); - if (oom || Query->Resize(len)) { + if (Query->IsTruncated() || Query->Resize(len)) { strcpy(g->Message, "MakeSelect: Out of memory"); return true; - } // endif oom + } // endif Query if (trace) htrc("Query=%s\n", Query->GetStr()); return false; - } // end of MakeSelect +} // end of MakeSelect /***********************************************************************/ /* MakeInsert: make the Insert statement used with MySQL connection. */ /***********************************************************************/ bool TDBMYSQL::MakeInsert(PGLOBAL g) { - char *tk = "`"; + const char *tk = "`"; uint len = 0; - bool b = false, oom; + bool oom, b = false; PCOL colp; if (Query) @@ -622,38 +629,38 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g) Query = new(g) STRING(g, len); if (Delayed) - oom = Query->Set("INSERT DELAYED INTO "); + Query->Set("INSERT DELAYED INTO "); else - oom = Query->Set("INSERT INTO "); + Query->Set("INSERT INTO "); - oom |= Query->Append(tk); - oom |= Query->Append(TableName); - oom |= Query->Append("` ("); + Query->Append(tk); + Query->Append(TableName); + Query->Append("` ("); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; - oom |= Query->Append(tk); - oom |= Query->Append(colp->GetName()); - oom |= Query->Append(tk); + Query->Append(tk); + Query->Append(colp->GetName()); + Query->Append(tk); } // endfor colp - oom |= Query->Append(") VALUES ("); + Query->Append(") VALUES ("); #if defined(MYSQL_PREPARED_STATEMENTS) if (Prep) { for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); Query->RepLast(')'); Query->Trim(); } // endif Prep #endif // MYSQL_PREPARED_STATEMENTS - if (oom) + if ((oom = Query->IsTruncated())) strcpy(g->Message, "MakeInsert: Out of memory"); return oom; @@ -684,18 +691,18 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g) strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { - bool oom = Query->Set(Qrystr, p - qrystr); + Query->Set(Qrystr, p - qrystr); if (qtd && *(p-1) == ' ') { - oom |= Query->Append('`'); - oom |= Query->Append(TableName); - oom |= Query->Append('`'); + Query->Append('`'); + Query->Append(TableName); + Query->Append('`'); } else - oom |= Query->Append(TableName); + Query->Append(TableName); - oom |= Query->Append(Qrystr + (p - qrystr) + strlen(name)); + Query->Append(Qrystr + (p - qrystr) + strlen(name)); - if (oom) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeCommand: Out of memory"); return true; } else @@ -1096,7 +1103,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -1161,24 +1168,23 @@ int TDBMYSQL::WriteDB(PGLOBAL g) int rc; uint len = Query->GetLength(); char buf[64]; - bool oom = false; // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { if (!colp->GetValue()->IsNull()) { if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) - oom |= Query->Append_quoted(colp->GetValue()->GetCharString(buf)); + Query->Append_quoted(colp->GetValue()->GetCharString(buf)); else - oom |= Query->Append(colp->GetValue()->GetCharString(buf)); + Query->Append(colp->GetValue()->GetCharString(buf)); } else - oom |= Query->Append("NULL"); + Query->Append("NULL"); - oom |= Query->Append(','); + Query->Append(','); } // endfor colp - if (unlikely(oom)) { + if (unlikely(Query->IsTruncated())) { strcpy(g->Message, "WriteDB: Out of memory"); rc = RC_FX; } else { @@ -1186,7 +1192,7 @@ int TDBMYSQL::WriteDB(PGLOBAL g) Myc.m_Rows = -1; // To execute the query rc = Myc.ExecSQL(g, Query->GetStr()); Query->Truncate(len); // Restore query - } // endif oom + } // endif Query return (rc == RC_NF) ? RC_OK : rc; // RC_NF is Ok } // end of WriteDB @@ -1234,7 +1240,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -1260,7 +1266,7 @@ MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) +MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) : COLBLK(NULL, tdbp, i) { const char *chset = get_charset_name(fld->charsetnr); @@ -1407,8 +1413,8 @@ void MYSQLCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } else + throw 11; + } else tdbp->Fetched = true; if ((buf = ((PTDBMY)To_Tdb)->Myc.GetCharField(Rank))) { @@ -1669,7 +1675,7 @@ int TDBMYEXC::WriteDB(PGLOBAL g) /***********************************************************************/ /* MYXCOL public constructor. */ /***********************************************************************/ -MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : MYSQLCOL(cdp, tdbp, cprec, i, am) { // Set additional EXEC MYSQL access method information for column. @@ -1679,7 +1685,7 @@ MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) +MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) : MYSQLCOL(fld, tdbp, i, am) { if (trace) diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 050fa59259b..3c37ae5bf3b 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -86,7 +86,7 @@ class TDBMYSQL : public TDBEXT { virtual void ResetDB(void) {N = 0;} virtual int RowNumber(PGLOBAL g, bool b = false); virtual bool IsView(void) {return Isview;} - virtual PSZ GetServer(void) {return Server;} + virtual PCSZ GetServer(void) {return Server;} void SetDatabase(LPCSTR db) {Schema = (char*)db;} // Schema routines @@ -109,7 +109,7 @@ class TDBMYSQL : public TDBEXT { // Internal functions bool MakeSelect(PGLOBAL g, bool mx); bool MakeInsert(PGLOBAL g); - int BindColumns(PGLOBAL g); + int BindColumns(PGLOBAL g __attribute__((unused))); virtual bool MakeCommand(PGLOBAL g); //int MakeUpdate(PGLOBAL g); //int MakeDelete(PGLOBAL g); @@ -146,8 +146,8 @@ class MYSQLCOL : public COLBLK { friend class TDBMYSQL; public: // Constructors - MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL"); - MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL"); + MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL"); + MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL"); MYSQLCOL(MYSQLCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -215,8 +215,8 @@ class MYXCOL : public MYSQLCOL { friend class TDBMYEXC; public: // Constructors - MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL"); - MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL"); + MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL"); + MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL"); MYXCOL(MYXCOL *colp, PTDB tdbp); // Constructor used in copy process // Methods @@ -242,10 +242,10 @@ class TDBMCL : public TDBCAT { virtual PQRYRES GetResult(PGLOBAL g); // Members - PSZ Host; // Host machine to use - PSZ Db; // Database to be used by server - PSZ Tab; // External table name - PSZ User; // User logon name - PSZ Pwd; // Password logon info - int Port; // MySQL port number (0 = default) + PCSZ Host; // Host machine to use + PCSZ Db; // Database to be used by server + PCSZ Tab; // External table name + PCSZ User; // User logon name + PCSZ Pwd; // Password logon info + int Port; // MySQL port number (0 = default) }; // end of class TDBMCL diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 488acdd330d..34711d584f1 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -1,11 +1,11 @@ /************* Tabodbc C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABODBC */ /* ------------- */ -/* Version 3.1 */ +/* Version 3.2 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -116,47 +116,12 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (EXTDEF::DefineAM(g, am, poff)) return true; - // Tabname = GetStringCatInfo(g, "Name", - // (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); - // Tabname = GetStringCatInfo(g, "Tabname", Tabname); - // Tabschema = GetStringCatInfo(g, "Dbname", NULL); - // Tabschema = GetStringCatInfo(g, "Schema", Tabschema); - // Tabcat = GetStringCatInfo(g, "Qualifier", NULL); - // Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); - //Username = GetStringCatInfo(g, "User", NULL); - // Password = GetStringCatInfo(g, "Password", NULL); - - // if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) - // Read_Only = true; - - // Qrystr = GetStringCatInfo(g, "Query_String", "?"); - // Sep = GetStringCatInfo(g, "Separator", NULL); Catver = GetIntCatInfo("Catver", 2); - //Xsrc = GetBoolCatInfo("Execsrc", FALSE); - //Maxerr = GetIntCatInfo("Maxerr", 0); - //Maxres = GetIntCatInfo("Maxres", 0); - //Quoted = GetIntCatInfo("Quoted", 0); Options = ODBConn::noOdbcDialog; //Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib; Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT); Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT); - - //if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt) - // Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch - - //if (Catfunc == FNC_COL) - // Colpat = GetStringCatInfo(g, "Colpat", NULL); - - //if (Catfunc == FNC_TABLE) - // Tabtyp = GetStringCatInfo(g, "Tabtype", NULL); - UseCnc = GetBoolCatInfo("UseDSN", false); - - // Memory was Boolean, it is now integer - //if (!(Memory = GetIntCatInfo("Memory", 0))) - // Memory = GetBoolCatInfo("Memory", false) ? 1 : 0; - - //Pseudo = 2; // FILID is Ok but not ROWID return false; } // end of DefineAM @@ -210,59 +175,22 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp) if (tdp) { Connect = tdp->Connect; - //TableName = tdp->Tabname; - //Schema = tdp->Tabschema; Ops.User = tdp->Username; Ops.Pwd = tdp->Password; - //Catalog = tdp->Tabcat; - //Srcdef = tdp->Srcdef; - //Qrystr = tdp->Qrystr; - //Sep = tdp->GetSep(); - //Options = tdp->Options; Ops.Cto = tdp->Cto; Ops.Qto = tdp->Qto; - //Quoted = MY_MAX(0, tdp->GetQuoted()); - //Rows = tdp->GetElemt(); Catver = tdp->Catver; - //Memory = tdp->Memory; - //Scrollable = tdp->Scrollable; Ops.UseCnc = tdp->UseCnc; } else { Connect = NULL; - //TableName = NULL; - //Schema = NULL; Ops.User = NULL; Ops.Pwd = NULL; - //Catalog = NULL; - //Srcdef = NULL; - //Qrystr = NULL; - //Sep = 0; - //Options = 0; Ops.Cto = DEFAULT_LOGIN_TIMEOUT; Ops.Qto = DEFAULT_QUERY_TIMEOUT; - //Quoted = 0; - //Rows = 0; Catver = 0; - //Memory = 0; - //Scrollable = false; Ops.UseCnc = false; } // endif tdp - //Quote = NULL; - //Query = NULL; - //Count = NULL; -//Where = NULL; - //MulConn = NULL; - //DBQ = NULL; - //Qrp = NULL; - //Fpos = 0; - //Curpos = 0; - //AftRows = 0; - //CurNum = 0; - //Rbuf = 0; - //BufSize = 0; - //Nparm = 0; - //Placed = false; } // end of TDBODBC standard constructor TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp) @@ -270,32 +198,7 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp) Ocp = tdbp->Ocp; // is that right ? Cnp = tdbp->Cnp; Connect = tdbp->Connect; - //TableName = tdbp->TableName; - //Schema = tdbp->Schema; Ops = tdbp->Ops; - //Catalog = tdbp->Catalog; - //Srcdef = tdbp->Srcdef; - //Qrystr = tdbp->Qrystr; - //Memory = tdbp->Memory; - //Scrollable = tdbp->Scrollable; - //Quote = tdbp->Quote; - //Query = tdbp->Query; - //Count = tdbp->Count; -//Where = tdbp->Where; - //MulConn = tdbp->MulConn; - //DBQ = tdbp->DBQ; - //Options = tdbp->Options; - //Quoted = tdbp->Quoted; - //Rows = tdbp->Rows; - //Fpos = 0; - //Curpos = 0; - //AftRows = 0; - //CurNum = 0; - //Rbuf = 0; - //BufSize = tdbp->BufSize; - //Nparm = tdbp->Nparm; - //Qrp = tdbp->Qrp; - //Placed = false; } // end of TDBODBC copy constructor // Method @@ -328,7 +231,7 @@ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /* This used for Multiple(1) tables. Also prepare a connect string */ /* with a place holder to be used by SetFile. */ /***********************************************************************/ -PSZ TDBODBC::GetFile(PGLOBAL g) +PCSZ TDBODBC::GetFile(PGLOBAL g) { if (Connect) { char *p1, *p2; @@ -389,152 +292,15 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn) DBQ = fn; } // end of SetFile -#if 0 -/******************************************************************/ -/* Convert an UTF-8 string to latin characters. */ -/******************************************************************/ -int TDBODBC::Decode(char *txt, char *buf, size_t n) -{ - uint dummy_errors; - uint32 len= copy_and_convert(buf, n, &my_charset_latin1, - txt, strlen(txt), - &my_charset_utf8_general_ci, - &dummy_errors); - buf[len]= '\0'; - return 0; -} // end of Decode - -/***********************************************************************/ -/* MakeSQL: make the SQL statement use with ODBC connection. */ -/* Note: when implementing EOM filtering, column only used in local */ -/* filter should be removed from column list. */ -/***********************************************************************/ -bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt) - { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; - int len; - bool oom = false, first = true; - PTABLE tablep = To_Table; - PCOL colp; - - if (Srcdef) { - if (strstr(Srcdef, "%s")) { - char *fil; - - fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1"); - Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil)); - Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil)); - } else - Query = new(g)STRING(g, 0, Srcdef); - - return false; - } // endif Srcdef - - // Allocate the string used to contain the Query - Query = new(g)STRING(g, 1023, "SELECT "); - - if (!cnt) { - if (Columns) { - // Normal SQL statement to retrieve results - for (colp = Columns; colp; colp = colp->GetNext()) - if (!colp->IsSpecial()) { - if (!first) - oom |= Query->Append(", "); - else - first = false; - - // Column name can be encoded in UTF-8 - Decode(colp->GetName(), buf, sizeof(buf)); - - if (Quote) { - // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); - } else - oom |= Query->Append(buf); - - ((PEXTCOL)colp)->SetRank(++Ncol); - } // endif colp - - } else - // !Columns can occur for queries such that sql count(*) from... - // for which we will count the rows from sql * from... - oom |= Query->Append('*'); - - } else - // SQL statement used to retrieve the size of the result - oom |= Query->Append("count(*)"); - - oom |= Query->Append(" FROM "); - - if (Catalog && *Catalog) - catp = Catalog; - - //if (tablep->GetSchema()) - // schmp = (char*)tablep->GetSchema(); - //else - if (Schema && *Schema) - schmp = Schema; - - if (catp) { - oom |= Query->Append(catp); - - if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); - } // endif schmp - - oom |= Query->Append('.'); - } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); - } // endif schmp - - // Table name can be encoded in UTF-8 - Decode(TableName, buf, sizeof(buf)); - - if (Quote) { - // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); - } else - oom |= Query->Append(buf); - - len = Query->GetLength(); - - if (To_CondFil) { - if (Mode == MODE_READ) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); - len = Query->GetLength() + 1; - } else - len += (strlen(To_CondFil->Body) + 256); - - } else - len += ((Mode == MODE_READX) ? 256 : 1); - - if (oom || Query->Resize(len)) { - strcpy(g->Message, "MakeSQL: Out of memory"); - return true; - } // endif oom - - if (trace) - htrc("Query=%s\n", Query->GetStr()); - - return false; - } // end of MakeSQL -#endif // 0 - /***********************************************************************/ /* MakeInsert: make the Insert statement used with ODBC connection. */ /***********************************************************************/ bool TDBODBC::MakeInsert(PGLOBAL g) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len = 0; - bool b = false, oom = false; + bool oom, b = false; PTABLE tablep = To_Table; PCOL colp; @@ -571,32 +337,32 @@ bool TDBODBC::MakeInsert(PGLOBAL g) Query = new(g) STRING(g, len, "INSERT INTO "); if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); - oom |= Query->Append('('); + Query->Append('('); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; @@ -605,20 +371,20 @@ bool TDBODBC::MakeInsert(PGLOBAL g) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); } // endfor colp - oom |= Query->Append(") VALUES ("); + Query->Append(") VALUES ("); for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); - if (oom) + if ((oom = Query->IsTruncated())) strcpy(g->Message, "MakeInsert: Out of memory"); else Query->RepLast(')'); @@ -646,73 +412,6 @@ bool TDBODBC::BindParameters(PGLOBAL g) #if 0 /***********************************************************************/ -/* MakeCommand: make the Update or Delete statement to send to the */ -/* MySQL server. Limited to remote values and filtering. */ -/***********************************************************************/ -bool TDBODBC::MakeCommand(PGLOBAL g) - { - char *p, *stmt, name[68], *body = NULL, *qc = Ocp->GetQuoteChar(); - char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); - bool qtd = Quoted > 0; - int i = 0, k = 0; - - // Make a lower case copy of the originale query and change - // back ticks to the data source identifier quoting character - do { - qrystr[i] = (Qrystr[i] == '`') ? *qc : tolower(Qrystr[i]); - } while (Qrystr[i++]); - - if (To_CondFil && (p = strstr(qrystr, " where "))) { - p[7] = 0; // Remove where clause - Qrystr[(p - qrystr) + 7] = 0; - body = To_CondFil->Body; - stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr) - + strlen(body) + 64); - } else - stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); - - // Check whether the table name is equal to a keyword - // If so, it must be quoted in the original query - strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); - - if (strstr(" update delete low_priority ignore quick from ", name)) { - strlwr(strcat(strcat(strcpy(name, qc), Name), qc)); - k += 2; - } else - strlwr(strcpy(name, Name)); // Not a keyword - - if ((p = strstr(qrystr, name))) { - for (i = 0; i < p - qrystr; i++) - stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i]; - - stmt[i] = 0; - k += i + (int)strlen(Name); - - if (qtd && *(p - 1) == ' ') - strcat(strcat(strcat(stmt, qc), TableName), qc); - else - strcat(stmt, TableName); - - i = (int)strlen(stmt); - - do { - stmt[i++] = (Qrystr[k] == '`') ? *qc : Qrystr[k]; - } while (Qrystr[k++]); - - if (body) - strcat(stmt, body); - - } else { - sprintf(g->Message, "Cannot use this %s command", - (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE"); - return true; - } // endif p - - Query = new(g) STRING(g, 0, stmt); - return (!Query->GetSize()); - } // end of MakeCommand - -/***********************************************************************/ /* MakeUpdate: make the SQL statement to send to ODBC connection. */ /***********************************************************************/ char *TDBODBC::MakeUpdate(PGLOBAL g) @@ -829,35 +528,6 @@ int TDBODBC::Cardinality(PGLOBAL g) return Cardinal; } // end of Cardinality -#if 0 -/***********************************************************************/ -/* ODBC GetMaxSize: returns table size estimate in number of lines. */ -/***********************************************************************/ -int TDBODBC::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) { - if (Mode == MODE_DELETE) - // Return 0 in mode DELETE in case of delete all. - MaxSize = 0; - else if (!Cardinality(NULL)) - MaxSize = 10; // To make MySQL happy - else if ((MaxSize = Cardinality(g)) < 0) - MaxSize = 12; // So we can see an error occurred - - } // endif MaxSize - - return MaxSize; - } // end of GetMaxSize - -/***********************************************************************/ -/* Return max size value. */ -/***********************************************************************/ -int TDBODBC::GetProgMax(PGLOBAL g) - { - return GetMaxSize(g); - } // end of GetProgMax -#endif // 0 - /***********************************************************************/ /* ODBC Access Method opening routine. */ /* New method now that this routine is called recursively (last table */ @@ -1064,7 +734,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -1097,8 +767,6 @@ int TDBODBC::ReadDB(PGLOBAL g) if (trace > 1) htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode); - //htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n", - // GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex); if (Mode == MODE_UPDATE || Mode == MODE_DELETE) { if (!Query && MakeCommand(g)) @@ -1118,12 +786,6 @@ int TDBODBC::ReadDB(PGLOBAL g) } // endif Mode - //if (To_Kindex) { - // // Direct access of ODBC tables is not implemented yet - // strcpy(g->Message, MSG(NO_ODBC_DIRECT)); - // return RC_FX; - // } // endif To_Kindex - /*********************************************************************/ /* Now start the reading process. */ /* Here is the place to fetch the line(s). */ @@ -1208,11 +870,6 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ void TDBODBC::CloseDB(PGLOBAL g) { -//if (To_Kindex) { -// To_Kindex->Close(); -// To_Kindex = NULL; -// } // endif - if (Ocp) Ocp->Close(); @@ -1227,20 +884,13 @@ void TDBODBC::CloseDB(PGLOBAL g) /***********************************************************************/ /* ODBCCOL public constructor. */ /***********************************************************************/ -ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { // Set additional ODBC access method information for column. -//Crp = NULL; -//Long = Precision; -//strcpy(F_Date, cdp->F_Date); -//To_Val = NULL; Slen = 0; StrLen = &Slen; Sqlbuf = NULL; -//Bufp = NULL; -//Blkp = NULL; -//Rank = 0; // Not known yet } // end of ODBCCOL constructor /***********************************************************************/ @@ -1248,17 +898,9 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ ODBCCOL::ODBCCOL(void) : EXTCOL() { -//Crp = NULL; -//Buf_Type = TYPE_INT; // This is a count(*) column -//// Set additional Dos access method information for column. -//Long = sizeof(int); -//To_Val = NULL; Slen = 0; StrLen = &Slen; Sqlbuf = NULL; -//Bufp = NULL; -//Blkp = NULL; -//Rank = 1; } // end of ODBCCOL constructor /***********************************************************************/ @@ -1267,66 +909,11 @@ ODBCCOL::ODBCCOL(void) : EXTCOL() /***********************************************************************/ ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) { -//Crp = col1->Crp; -//Long = col1->Long; -//strcpy(F_Date, col1->F_Date); -//To_Val = col1->To_Val; Slen = col1->Slen; StrLen = col1->StrLen; Sqlbuf = col1->Sqlbuf; -//Bufp = col1->Bufp; -//Blkp = col1->Blkp; -//Rank = col1->Rank; } // end of ODBCCOL copy constructor -#if 0 -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { - if (!(To_Val = value)) { - sprintf(g->Message, MSG(VALUE_ERROR), Name); - return true; - } else if (Buf_Type == value->GetType()) { - // Values are of the (good) column type - if (Buf_Type == TYPE_DATE) { - // If any of the date values is formatted - // output format must be set for the receiving table - if (GetDomain() || ((DTVAL *)value)->IsFormatted()) - goto newval; // This will make a new value; - - } else if (Buf_Type == TYPE_DOUBLE) - // Float values must be written with the correct (column) precision - // Note: maybe this should be forced by ShowValue instead of this ? - value->SetPrec(GetScale()); - - Value = value; // Directly access the external value - } else { - // Values are not of the (good) column type - if (check) { - sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name, - GetTypeName(Buf_Type), GetTypeName(value->GetType())); - return true; - } // endif check - - newval: - if (InitValue(g)) // Allocate the matching value block - return true; - - } // endif's Value, Buf_Type - - // Because Colblk's have been made from a copy of the original TDB in - // case of Update, we must reset them to point to the original one. - if (To_Tdb->GetOrig()) - To_Tdb = (PTDB)To_Tdb->GetOrig(); - - // Set the Column - Status = (ok) ? BUF_EMPTY : BUF_NO; - return false; - } // end of SetBuffer -#endif // 0 - /***********************************************************************/ /* ReadColumn: when SQLFetch is used there is nothing to do as the */ /* column buffer was bind to the record set. This is also the case */ @@ -1715,7 +1302,7 @@ int TDBXDBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* XSRCCOL public constructor. */ /***********************************************************************/ -XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : ODBCCOL(cdp, tdbp, cprec, i, am) { // Set additional ODBC access method information for column. diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index fcefad5647b..0ca88b60858 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -1,7 +1,7 @@ /*************** Tabodbc H Declares Source Code File (.H) **************/ -/* Name: TABODBC.H Version 1.8 */ +/* Name: TABODBC.H Version 1.9 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */ /* */ /* This file contains the TDBODBC classes declares. */ /***********************************************************************/ @@ -33,14 +33,7 @@ public: // Implementation virtual const char *GetType(void) {return "ODBC";} PSZ GetConnect(void) {return Connect;} - //PSZ GetTabname(void) {return Tabname;} - //PSZ GetTabschema(void) {return Tabschema;} - //PSZ GetTabcat(void) {return Tabcat;} - //PSZ GetSrcdef(void) {return Srcdef;} - //char GetSep(void) {return (Sep) ? *Sep : 0;} - //int GetQuoted(void) {return Quoted;} int GetCatver(void) {return Catver;} - //int GetOptions(void) {return Options;} // Methods virtual int Indexable(void) {return 2;} @@ -50,27 +43,7 @@ public: protected: // Members PSZ Connect; /* ODBC connection string */ - //PSZ Tabname; /* External table name */ - //PSZ Tabschema; /* External table schema */ - //PSZ Username; /* User connect name */ - //PSZ Password; /* Password connect info */ - //PSZ Tabcat; /* External table catalog */ - //PSZ Tabtyp; /* Catalog table type */ - //PSZ Colpat; /* Catalog column pattern */ - //PSZ Srcdef; /* The source table SQL definition */ - //PSZ Qchar; /* Identifier quoting character */ - //PSZ Qrystr; /* The original query */ - //PSZ Sep; /* Decimal separator */ int Catver; /* ODBC version for catalog functions */ - //int Options; /* Open connection options */ - //int Cto; /* Open connection timeout */ - //int Qto; /* Query (command) timeout */ - //int Quoted; /* Identifier quoting level */ - //int Maxerr; /* Maxerr for an Exec table */ - //int Maxres; /* Maxres for a catalog table */ - //int Memory; /* Put result set in memory */ - //bool Scrollable; /* Use scrollable cursor */ - //bool Xsrc; /* Execution type */ bool UseCnc; /* Use SQLConnect (!SQLDriverConnect) */ }; // end of ODBCDEF @@ -96,20 +69,16 @@ class TDBODBC : public TDBEXT { // Methods virtual PTDB Clone(PTABS t); -//virtual int GetRecpos(void); virtual bool SetRecpos(PGLOBAL g, int recpos); - virtual PSZ GetFile(PGLOBAL g); + virtual PCSZ GetFile(PGLOBAL g); virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); -//virtual int GetAffectedRows(void) {return AftRows;} - virtual PSZ GetServer(void) {return "ODBC";} + virtual PCSZ GetServer(void) {return "ODBC";} virtual int Indexable(void) {return 2;} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual int Cardinality(PGLOBAL g); -//virtual int GetMaxSize(PGLOBAL g); -//virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); @@ -119,14 +88,8 @@ class TDBODBC : public TDBEXT { protected: // Internal functions -//int Decode(char *utf, char *buf, size_t n); -//bool MakeSQL(PGLOBAL g, bool cnt); bool MakeInsert(PGLOBAL g); -//virtual bool MakeCommand(PGLOBAL g); -//bool MakeFilter(PGLOBAL g, bool c); bool BindParameters(PGLOBAL g); -//char *MakeUpdate(PGLOBAL g); -//char *MakeDelete(PGLOBAL g); // Members ODBConn *Ocp; // Points to an ODBC connection class @@ -145,15 +108,12 @@ class ODBCCOL : public EXTCOL { friend class TDBODBC; public: // Constructors - ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC"); + ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC"); ODBCCOL(ODBCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation virtual int GetAmType(void) {return TYPE_AM_ODBC;} SQLLEN *GetStrLen(void) {return StrLen;} -// int GetRank(void) {return Rank;} -// PVBLK GetBlkp(void) {return Blkp;} -// void SetCrp(PCOLRES crp) {Crp = crp;} // Methods //virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); @@ -162,7 +122,6 @@ class ODBCCOL : public EXTCOL { void AllocateBuffers(PGLOBAL g, int rows); void *GetBuffer(DWORD rows); SWORD GetBuflen(void); -// void Print(PGLOBAL g, FILE *, uint); protected: // Constructor for count(*) column @@ -170,14 +129,8 @@ class ODBCCOL : public EXTCOL { // Members TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's -//PCOLRES Crp; // To storage result -//void *Bufp; // To extended buffer -//PVBLK Blkp; // To Value Block -//char F_Date[12]; // Internal Date format -//PVAL To_Val; // To value used for Insert SQLLEN *StrLen; // As returned by ODBC SQLLEN Slen; // Used with Fetch -//int Rank; // Rank (position) number in the query }; // end of class ODBCCOL /***********************************************************************/ @@ -226,16 +179,15 @@ class XSRCCOL : public ODBCCOL { friend class TDBXDBC; public: // Constructors - XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC"); + XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC"); XSRCCOL(XSRCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation -//virtual int GetAmType(void) {return TYPE_AM_ODBC;} // Methods virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); -// void Print(PGLOBAL g, FILE *, uint); +// void Printf(PGLOBAL g, FILE *, uint); protected: // Members @@ -287,10 +239,10 @@ class TDBOTB : public TDBDRV { virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Dsn; // Points to connection string - char *Schema; // Points to schema name or NULL - char *Tab; // Points to ODBC table name or pattern - char *Tabtyp; // Points to ODBC table type + PCSZ Dsn; // Points to connection string + PCSZ Schema; // Points to schema name or NULL + PCSZ Tab; // Points to ODBC table name or pattern + PCSZ Tabtyp; // Points to ODBC table type ODBCPARM Ops; // Additional parameters }; // end of class TDBOTB diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp index c6d32884417..76a46e6899b 100644 --- a/storage/connect/tabpivot.cpp +++ b/storage/connect/tabpivot.cpp @@ -106,214 +106,211 @@ bool PIVAID::SkipColumn(PCOLRES crp, char *skc) /* Make the Pivot table column list. */ /***********************************************************************/ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) - { +{ char *p, *query, *colname, *skc, buf[64]; - int rc, ndif, nblin, w = 0; + int ndif, nblin, w = 0; bool b = false; PVAL valp; PQRYRES qrp; PCOLRES *pcrp, crp, fncrp = NULL; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; - } // endif rc - - // Are there columns to skip? - if (Skcol) { - uint n = strlen(Skcol); - - skc = (char*)PlugSubAlloc(g, NULL, n + 2); - strcpy(skc, Skcol); - skc[n + 1] = 0; - - // Replace ; by nulls in skc - for (p = strchr(skc, ';'); p; p = strchr(p, ';')) - *p++ = 0; - - } else - skc = NULL; - - if (!Tabsrc && Tabname) { - // Locate the query - query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26); - sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname); - } else if (!Tabsrc) { - strcpy(g->Message, MSG(SRC_TABLE_UNDEF)); - return NULL; - } else - query = Tabsrc; - - // Open a MySQL connection for this table - if (!Myc.Open(g, Host, Database, User, Pwd, Port)) { - b = true; - - // Returned values must be in their original character set - if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) - goto err; - else - Myc.FreeResult(); - - } else - return NULL; - - // Send the source command to MySQL - if (Myc.ExecSQL(g, query, &w) == RC_FX) - goto err; - - // We must have a storage query to get pivot column values - if (!(Qryp = Myc.GetResult(g, true))) - goto err; - - if (!Fncol) { - for (crp = Qryp->Colresp; crp; crp = crp->Next) - if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc)) - Fncol = crp->Name; - - if (!Fncol) { - strcpy(g->Message, MSG(NO_DEF_FNCCOL)); - goto err; - } // endif Fncol - - } // endif Fncol - - if (!Picol) { - // Find default Picol as the last one not equal to Fncol - for (crp = Qryp->Colresp; crp; crp = crp->Next) - if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc)) - Picol = crp->Name; - - if (!Picol) { - strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); - goto err; - } // endif Picol - - } // endif picol - - // Prepare the column list - for (pcrp = &Qryp->Colresp; crp = *pcrp; ) - if (SkipColumn(crp, skc)) { - // Ignore this column - *pcrp = crp->Next; - } else if (!stricmp(Picol, crp->Name)) { - if (crp->Nulls) { - sprintf(g->Message, "Pivot column %s cannot be nullable", Picol); - goto err; - } // endif Nulls - - Rblkp = crp->Kdata; - *pcrp = crp->Next; - } else if (!stricmp(Fncol, crp->Name)) { - fncrp = crp; - *pcrp = crp->Next; - } else - pcrp = &crp->Next; - - if (!Rblkp) { - strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); - goto err; - } else if (!fncrp) { - strcpy(g->Message, MSG(NO_DEF_FNCCOL)); - goto err; - } // endif - - if (Tabsrc) { - Myc.Close(); - b = false; - - // Before calling sort, initialize all - nblin = Qryp->Nblin; - - Index.Size = nblin * sizeof(int); - Index.Sub = TRUE; // Should be small enough - - if (!PlgDBalloc(g, NULL, Index)) - return NULL; - - Offset.Size = (nblin + 1) * sizeof(int); - Offset.Sub = TRUE; // Should be small enough - - if (!PlgDBalloc(g, NULL, Offset)) - return NULL; - - ndif = Qsort(g, nblin); - - if (ndif < 0) // error - return NULL; - - } else { - // The query was limited, we must get pivot column values - // Returned values must be in their original character set -// if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) -// goto err; - - query = (char*)PlugSubAlloc(g, NULL, 0); - sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname); - PlugSubAlloc(g, NULL, strlen(query) + 1); - Myc.FreeResult(); - - // Send the source command to MySQL - if (Myc.ExecSQL(g, query, &w) == RC_FX) - goto err; - - // We must have a storage query to get pivot column values - if (!(qrp = Myc.GetResult(g, true))) - goto err; - - Myc.Close(); - b = false; - - // Get the column list - crp = qrp->Colresp; - Rblkp = crp->Kdata; - ndif = qrp->Nblin; - } // endif Tabsrc - - // Allocate the Value used to retieve column names - if (!(valp = AllocateValue(g, Rblkp->GetType(), - Rblkp->GetVlen(), - Rblkp->GetPrec()))) - return NULL; - - // Now make the functional columns - for (int i = 0; i < ndif; i++) { - if (i) { - crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); - memcpy(crp, fncrp, sizeof(COLRES)); - } else - crp = fncrp; - - // Get the value that will be the generated column name - if (Tabsrc) - valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]); - else - valp->SetValue_pvblk(Rblkp, i); - - colname = valp->GetCharString(buf); - crp->Name = PlugDup(g, colname); - crp->Flag = 1; - - // Add this column - *pcrp = crp; - crp->Next = NULL; - pcrp = &crp->Next; - } // endfor i - - // We added ndif columns and removed 2 (picol and fncol) - Qryp->Nbcol += (ndif - 2); - return Qryp; + try { + // Are there columns to skip? + if (Skcol) { + uint n = strlen(Skcol); + + skc = (char*)PlugSubAlloc(g, NULL, n + 2); + strcpy(skc, Skcol); + skc[n + 1] = 0; + + // Replace ; by nulls in skc + for (p = strchr(skc, ';'); p; p = strchr(p, ';')) + *p++ = 0; + + } else + skc = NULL; + + if (!Tabsrc && Tabname) { + // Locate the query + query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26); + sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname); + } else if (!Tabsrc) { + strcpy(g->Message, MSG(SRC_TABLE_UNDEF)); + goto err; + } else + query = (char*)Tabsrc; + + // Open a MySQL connection for this table + if (!Myc.Open(g, Host, Database, User, Pwd, Port)) { + b = true; + + // Returned values must be in their original character set + if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) + goto err; + else + Myc.FreeResult(); + + } else + goto err; + + // Send the source command to MySQL + if (Myc.ExecSQL(g, query, &w) == RC_FX) + goto err; + + // We must have a storage query to get pivot column values + if (!(Qryp = Myc.GetResult(g, true))) + goto err; + + if (!Fncol) { + for (crp = Qryp->Colresp; crp; crp = crp->Next) + if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc)) + Fncol = crp->Name; + + if (!Fncol) { + strcpy(g->Message, MSG(NO_DEF_FNCCOL)); + goto err; + } // endif Fncol + + } // endif Fncol + + if (!Picol) { + // Find default Picol as the last one not equal to Fncol + for (crp = Qryp->Colresp; crp; crp = crp->Next) + if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc)) + Picol = crp->Name; + + if (!Picol) { + strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); + goto err; + } // endif Picol + + } // endif picol + + // Prepare the column list + for (pcrp = &Qryp->Colresp; crp = *pcrp; ) + if (SkipColumn(crp, skc)) { + // Ignore this column + *pcrp = crp->Next; + } else if (!stricmp(Picol, crp->Name)) { + if (crp->Nulls) { + sprintf(g->Message, "Pivot column %s cannot be nullable", Picol); + goto err; + } // endif Nulls + + Rblkp = crp->Kdata; + *pcrp = crp->Next; + } else if (!stricmp(Fncol, crp->Name)) { + fncrp = crp; + *pcrp = crp->Next; + } else + pcrp = &crp->Next; + + if (!Rblkp) { + strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); + goto err; + } else if (!fncrp) { + strcpy(g->Message, MSG(NO_DEF_FNCCOL)); + goto err; + } // endif + + if (Tabsrc) { + Myc.Close(); + b = false; + + // Before calling sort, initialize all + nblin = Qryp->Nblin; + + Index.Size = nblin * sizeof(int); + Index.Sub = TRUE; // Should be small enough + + if (!PlgDBalloc(g, NULL, Index)) + goto err; + + Offset.Size = (nblin + 1) * sizeof(int); + Offset.Sub = TRUE; // Should be small enough + + if (!PlgDBalloc(g, NULL, Offset)) + goto err; + + ndif = Qsort(g, nblin); + + if (ndif < 0) // error + goto err; + + } else { + // The query was limited, we must get pivot column values + // Returned values must be in their original character set + // if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) + // goto err; + + query = (char*)PlugSubAlloc(g, NULL, 0); + sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname); + PlugSubAlloc(g, NULL, strlen(query) + 1); + Myc.FreeResult(); + + // Send the source command to MySQL + if (Myc.ExecSQL(g, query, &w) == RC_FX) + goto err; + + // We must have a storage query to get pivot column values + if (!(qrp = Myc.GetResult(g, true))) + goto err; + + Myc.Close(); + b = false; + + // Get the column list + crp = qrp->Colresp; + Rblkp = crp->Kdata; + ndif = qrp->Nblin; + } // endif Tabsrc + + // Allocate the Value used to retieve column names + if (!(valp = AllocateValue(g, Rblkp->GetType(), + Rblkp->GetVlen(), + Rblkp->GetPrec()))) + goto err; + + // Now make the functional columns + for (int i = 0; i < ndif; i++) { + if (i) { + crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); + memcpy(crp, fncrp, sizeof(COLRES)); + } else + crp = fncrp; + + // Get the value that will be the generated column name + if (Tabsrc) + valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]); + else + valp->SetValue_pvblk(Rblkp, i); + + colname = valp->GetCharString(buf); + crp->Name = PlugDup(g, colname); + crp->Flag = 1; + + // Add this column + *pcrp = crp; + crp->Next = NULL; + pcrp = &crp->Next; + } // endfor i + + // We added ndif columns and removed 2 (picol and fncol) + Qryp->Nbcol += (ndif - 2); + return Qryp; + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch err: if (b) Myc.Close(); return NULL; - } // end of MakePivotColumns +} // end of MakePivotColumns /***********************************************************************/ /* PIVAID: Compare routine for sorting pivot column values. */ diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h index 07d5c3e456b..6c2d53e9527 100644 --- a/storage/connect/tabpivot.h +++ b/storage/connect/tabpivot.h @@ -32,16 +32,16 @@ class PIVAID : public CSORT { protected: // Members MYSQLC Myc; // MySQL connection class - char *Host; // Host machine to use - char *User; // User logon info - char *Pwd; // Password logon info - char *Database; // Database to be used by server + PCSZ Host; // Host machine to use + PCSZ User; // User logon info + PCSZ Pwd; // Password logon info + PCSZ Database; // Database to be used by server PQRYRES Qryp; // Points to Query result block - char *Tabname; // Name of source table - char *Tabsrc; // SQL of source table - char *Picol; // Pivot column name - char *Fncol; // Function column name - char *Skcol; // Skipped columns + PCSZ Tabname; // Name of source table + PCSZ Tabsrc; // SQL of source table + PCSZ Picol; // Pivot column name + PCSZ Fncol; // Function column name + PCSZ Skcol; // Skipped columns PVBLK Rblkp; // The value block of the pivot column int Port; // MySQL port number }; // end of class PIVAID diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index 2ddd1c3c753..7f0d9881298 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -1,9 +1,9 @@ /************* TabSys C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABSYS */ /* ------------- */ -/* Version 2.3 */ +/* Version 2.4 */ /* */ -/* Author Olivier BERTRAND 2004-2015 */ +/* Author Olivier BERTRAND 2004-2017 */ /* */ /* This program are the INI/CFG tables classes. */ /***********************************************************************/ @@ -355,7 +355,7 @@ void TDBINI::CloseDB(PGLOBAL) /***********************************************************************/ /* INICOL public constructor. */ /***********************************************************************/ -INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -511,12 +511,12 @@ void INICOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], 31); - } else if (Flag == 1) { + throw 31; + } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { + throw 31; + } else if (*p) { tdbp->Section = p; } else tdbp->Section = NULL; @@ -524,8 +524,8 @@ void INICOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section) { strcpy(g->Message, MSG(SEC_NAME_FIRST)); - longjmp(g->jumper[g->jump_level], 31); - } // endif's + throw 31; + } // endif's /*********************************************************************/ /* Updating must be done only when not in checking pass. */ @@ -536,8 +536,8 @@ void INICOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); - longjmp(g->jumper[g->jump_level], 31); - } // endif rc + throw 31; + } // endif rc } // endif Status @@ -769,7 +769,7 @@ int TDBXIN::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* XINCOL public constructor. */ /***********************************************************************/ -XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : INICOL(cdp, tdbp, cprec, i, am) { } // end of XINCOL constructor @@ -837,12 +837,12 @@ void XINCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], 31); - } else if (Flag == 1) { + throw 31; + } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { + throw 31; + } else if (*p) { tdbp->Section = p; } else tdbp->Section = NULL; @@ -851,8 +851,8 @@ void XINCOL::WriteColumn(PGLOBAL g) } else if (Flag == 2) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_KEY_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { + throw 31; + } else if (*p) { tdbp->Keycur = p; } else tdbp->Keycur = NULL; @@ -860,8 +860,8 @@ void XINCOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section || !tdbp->Keycur) { strcpy(g->Message, MSG(SEC_KEY_FIRST)); - longjmp(g->jumper[g->jump_level], 31); - } // endif's + throw 31; + } // endif's /*********************************************************************/ /* Updating must be done only when not in checking pass. */ @@ -872,8 +872,8 @@ void XINCOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); - longjmp(g->jumper[g->jump_level], 31); - } // endif rc + throw 31; + } // endif rc } // endif Status diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h index ff1b8335690..0c6017af177 100644 --- a/storage/connect/tabsys.h +++ b/storage/connect/tabsys.h @@ -61,8 +61,8 @@ class TDBINI : public TDBASE { virtual int GetRecpos(void) {return N;} virtual int GetProgCur(void) {return N;} //virtual int GetAffectedRows(void) {return 0;} - virtual PSZ GetFile(PGLOBAL g) {return Ifile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;} + virtual PCSZ GetFile(PGLOBAL g) {return Ifile;} + virtual void SetFile(PGLOBAL g, PCSZ fn) {Ifile = fn;} virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;} virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;} virtual int RowNumber(PGLOBAL g, bool b = false) {return N;} @@ -80,7 +80,7 @@ class TDBINI : public TDBASE { protected: // Members - char *Ifile; // The INI file + PCSZ Ifile; // The INI file char *Seclist; // The section list char *Section; // The current section int Seclen; // Length of seclist buffer @@ -93,7 +93,7 @@ class TDBINI : public TDBASE { class INICOL : public COLBLK { public: // Constructors - INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); + INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI"); INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -165,7 +165,7 @@ class TDBXIN : public TDBINI { class XINCOL : public INICOL { public: // Constructors - XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); + XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI"); XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index ad939db86be..158cf744a4a 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -119,7 +119,8 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, FLD_LENGTH, FLD_SCALE, FLD_RADIX, FLD_NULL, FLD_REM, FLD_NO, FLD_CHARSET}; unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 32, 32}; - char *pn, *tn, *fld, *colname, *chset, *fmt, v; + PCSZ fmt; + char *pn, *tn, *fld, *colname, *chset, v; int i, n, ncol = sizeof(buftyp) / sizeof(int); int prec, len, type, scale; int zconv = GetConvSize(); @@ -227,7 +228,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, fmt = MyDateFmt(fp->type()); prec = len = strlen(fmt); } else { - fmt = (char*)fp->option_struct->dateformat; + fmt = (PCSZ)fp->option_struct->dateformat; prec = len = fp->field_length; } // endif mysql @@ -314,7 +315,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR, int) strcpy(g->Message, "Missing object table definition"); return true; } else - tab = "Noname"; + tab = PlugDup(g, "Noname"); } else // Analyze the table name, it may have the format: [dbname.]tabname @@ -626,7 +627,7 @@ void TDBPRX::RemoveNext(PTABLE tp) /***********************************************************************/ /* PRXCOL public constructor. */ /***********************************************************************/ -PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -741,7 +742,14 @@ void PRXCOL::ReadColumn(PGLOBAL g) if (Nullable) Value->SetNull(Value->IsNull()); - } // endif Colp + } else { + Value->Reset(); + + // Set null when applicable + if (Nullable) + Value->SetNull(true); + + } // endif Colp } // end of ReadColumn diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h index 8e56aecff86..62678508ca1 100644 --- a/storage/connect/tabutil.h +++ b/storage/connect/tabutil.h @@ -71,7 +71,7 @@ class DllExport TDBPRX : public TDBASE { virtual int GetRecpos(void) {return Tdbp->GetRecpos();} virtual void ResetDB(void) {Tdbp->ResetDB();} virtual int RowNumber(PGLOBAL g, bool b = FALSE); - virtual PSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";} + virtual PCSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -101,7 +101,7 @@ class DllExport PRXCOL : public COLBLK { friend class TDBOCCUR; public: // Constructors - PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "PRX"); + PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "PRX"); PRXCOL(PRXCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 282fb55a43c..533986e44da 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -1,11 +1,11 @@ /************* TabVct C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABVCT */ /* ------------- */ -/* Version 3.8 */ +/* Version 3.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1999-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -174,7 +174,7 @@ bool VCTDEF::Erase(char *filename) /***********************************************************************/ int VCTDEF::MakeFnPattern(char *fpat) { - char pat[8]; + char pat[16]; #if defined(__WIN__) char drive[_MAX_DRIVE]; #else @@ -490,15 +490,15 @@ void VCTCOL::ReadBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(TO_BLK_IS_NULL)); - longjmp(g->jumper[g->jump_level], 58); - } // endif + throw 58; + } // endif #endif /*********************************************************************/ /* Read column block according to used access method. */ /*********************************************************************/ if (txfp->ReadBlock(g, this)) - longjmp(g->jumper[g->jump_level], 6); + throw 6; ColBlk = txfp->CurBlk; ColPos = -1; // Any invalid position @@ -518,15 +518,15 @@ void VCTCOL::WriteBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(BLK_IS_NULL)); - longjmp(g->jumper[g->jump_level], 56); - } // endif + throw 56; + } // endif #endif /*******************************************************************/ /* Write column block according to used access method. */ /*******************************************************************/ if (txfp->WriteBlock(g, this)) - longjmp(g->jumper[g->jump_level], 6); + throw 6; Modif = 0; } // endif Modif diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp index 155c71fe268..84b3dd1787b 100644 --- a/storage/connect/tabvir.cpp +++ b/storage/connect/tabvir.cpp @@ -1,6 +1,6 @@ /************* tdbvir C++ Program Source Code File (.CPP) **************/ -/* PROGRAM NAME: tdbvir.cpp Version 1.1 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 */ +/* PROGRAM NAME: tdbvir.cpp Version 1.2 */ +/* (C) Copyright to the author Olivier BERTRAND 2014-2017 */ /* This program are the VIR classes DB execution routines. */ /***********************************************************************/ @@ -269,7 +269,7 @@ int TDBVIR::DeleteDB(PGLOBAL g, int) /***********************************************************************/ /* VIRCOL public constructor. */ /***********************************************************************/ -VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -289,8 +289,8 @@ void VIRCOL::ReadColumn(PGLOBAL g) { // This should never be called sprintf(g->Message, "ReadColumn: Column %s is not virtual", Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn + throw TYPE_COLBLK; +} // end of ReadColumn /* ---------------------------TDBVICL class -------------------------- */ diff --git a/storage/connect/tabvir.h b/storage/connect/tabvir.h index a53aceaeceb..e7313bbae67 100644 --- a/storage/connect/tabvir.h +++ b/storage/connect/tabvir.h @@ -76,7 +76,7 @@ class VIRCOL : public COLBLK { friend class TDBVIR; public: // Constructors - VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "VIRTUAL"); + VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "VIRTUAL"); // Implementation virtual int GetAmType(void) {return TYPE_AM_VIR;} diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp index 4871a1d66dc..335ffce5d7f 100644 --- a/storage/connect/tabwmi.cpp +++ b/storage/connect/tabwmi.cpp @@ -27,7 +27,7 @@ /***********************************************************************/ /* Initialize WMI operations. */ /***********************************************************************/ -PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname) +PWMIUT InitWMI(PGLOBAL g, PCSZ nsp, PCSZ classname) { IWbemLocator *loc; char *p; @@ -132,7 +132,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname) /* WMIColumns: constructs the result blocks containing the description */ /* of all the columns of a WMI table of a specified class. */ /***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info) +PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; diff --git a/storage/connect/tabwmi.h b/storage/connect/tabwmi.h index 6abb85453a1..7a18453374e 100644 --- a/storage/connect/tabwmi.h +++ b/storage/connect/tabwmi.h @@ -27,7 +27,7 @@ typedef struct _WMIutil { /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info); +PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info); /* -------------------------- WMI classes ---------------------------- */ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 52cf3d3812f..80d4395058e 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -1,9 +1,9 @@ /************* Tabxml C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABXML */ /* ------------- */ -/* Version 2.9 */ +/* Version 3.0 */ /* */ -/* Author Olivier BERTRAND 2007 - 2016 */ +/* Author Olivier BERTRAND 2007 - 2017 */ /* */ /* This program are the XML tables classes using MS-DOM or libxml2. */ /***********************************************************************/ @@ -118,10 +118,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) static XFLD fldtyp[] = {FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC, FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT}; static unsigned int length[] = {0, 6, 8, 10, 10, 6, 6, 0}; - char *fn, *op, colname[65], fmt[129], buf[512]; + char colname[65], fmt[129], buf[512]; int i, j, lvl, n = 0; int ncol = sizeof(buftyp) / sizeof(int); bool ok = true; + PCSZ fn, op; PXCL xcol, xcp, fxcp = NULL, pxcp = NULL; PLVL *lvlp, vp; PXNODE node = NULL; @@ -157,7 +158,10 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) tdp = new(g) XMLDEF; tdp->Fn = fn; - tdp->Database = SetPath(g, db); + + if (!(tdp->Database = SetPath(g, db))) + return NULL; + tdp->Tabname = tab; tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); @@ -359,7 +363,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) skipit: if (trace) - htrc("CSVColumns: n=%d len=%d\n", n, length[0]); + htrc("XMLColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ /* Allocate the structures used to refer to the result set. */ @@ -448,7 +452,8 @@ XMLDEF::XMLDEF(void) /***********************************************************************/ bool XMLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - char *defrow, *defcol, buf[10]; + PCSZ defrow, defcol; + char buf[10]; Fn = GetStringCatInfo(g, "Filename", NULL); Encoding = GetStringCatInfo(g, "Encoding", "UTF-8"); @@ -1314,8 +1319,8 @@ void TDBXML::CloseDB(PGLOBAL g) Docp->CloseDoc(g, To_Xb); // This causes a crash in Diagnostics_area::set_error_status -// longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif DumpDoc +// throw TYPE_AM_XML; + } // endif DumpDoc } // endif Changed @@ -1357,8 +1362,8 @@ void TDBXML::CloseDB(PGLOBAL g) /***********************************************************************/ /* XMLCOL public constructor. */ /***********************************************************************/ -XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); @@ -1637,8 +1642,8 @@ void XMLCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif type + throw TYPE_AM_XML; + } // endif type // Get the Xname value from the XML file switch (ValNode->GetContent(g, Valbuf, Long + 1)) { @@ -1648,8 +1653,8 @@ void XMLCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endswitch + throw TYPE_AM_XML; + } // endswitch Value->SetValue_psz(Valbuf); } else { @@ -1699,7 +1704,7 @@ void XMLCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod || Tdbp->Colname)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); + throw TYPE_AM_XML; /*********************************************************************/ /* Null values are represented by no node. */ @@ -1771,8 +1776,8 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif ColNode + throw TYPE_AM_XML; + } // endif ColNode } // endif ColNode @@ -1800,8 +1805,8 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else + throw TYPE_AM_XML; + } else strcpy(Valbuf, p); /*********************************************************************/ @@ -1850,8 +1855,8 @@ void XMULCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif type + throw TYPE_AM_XML; + } // endif type // Get the Xname value from the XML file switch (ValNode->GetContent(g, p, (b ? Long : len))) { @@ -1936,7 +1941,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); + throw TYPE_AM_XML; /*********************************************************************/ /* Find the column and value nodes to update or insert. */ @@ -1985,8 +1990,8 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (len > 1 && !Tdbp->Xpand) { sprintf(g->Message, MSG(BAD_VAL_UPDATE), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else + throw TYPE_AM_XML; + } else ValNode = Nlx->GetItem(g, Tdbp->Nsub, Vxnp); } else // Inod != Nod @@ -2027,8 +2032,8 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif ColNode + throw TYPE_AM_XML; + } // endif ColNode } // endif ColNode @@ -2056,8 +2061,8 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else + throw TYPE_AM_XML; + } else strcpy(Valbuf, p); /*********************************************************************/ @@ -2088,8 +2093,8 @@ void XPOSCOL::ReadColumn(PGLOBAL g) if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif Clist + throw TYPE_AM_XML; + } // endif Clist if ((ValNode = Tdbp->Clist->GetItem(g, Rank, Vxnp))) { // Get the column value from the XML file @@ -2100,8 +2105,8 @@ void XPOSCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endswitch + throw TYPE_AM_XML; + } // endswitch Value->SetValue_psz(Valbuf); } else { @@ -2151,15 +2156,15 @@ void XPOSCOL::WriteColumn(PGLOBAL g) /* For all columns the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, true)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); + throw TYPE_AM_XML; /*********************************************************************/ /* Find the column and value nodes to update or insert. */ /*********************************************************************/ if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif Clist + throw TYPE_AM_XML; + } // endif Clist n = Tdbp->Clist->GetLength(); k = Rank; @@ -2183,8 +2188,8 @@ void XPOSCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else + throw TYPE_AM_XML; + } else strcpy(Valbuf, p); /*********************************************************************/ diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index 65b353072cb..813f62dde52 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -31,7 +31,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */ protected: // Members - char *Fn; /* Path/Name of corresponding file */ + PCSZ Fn; /* Path/Name of corresponding file */ char *Encoding; /* New XML table file encoding */ char *Tabname; /* Name of Table node */ char *Rowname; /* Name of first level nodes */ @@ -42,7 +42,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */ char *DefNs; /* Dummy name of default namespace */ char *Attrib; /* Table node attributes */ char *Hdattr; /* Header node attributes */ - char *Entry; /* Zip entry name or pattern */ + PCSZ Entry; /* Zip entry name or pattern */ int Coltype; /* Default column type */ int Limit; /* Limit of multiple values */ int Header; /* n first rows are header rows */ @@ -74,8 +74,8 @@ class DllExport TDBXML : public TDBASE { virtual PTDB Clone(PTABS t); virtual int GetRecpos(void); virtual int GetProgCur(void) {return N;} - virtual PSZ GetFile(PGLOBAL g) {return Xfile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Xfile = fn;} + virtual PCSZ GetFile(PGLOBAL g) {return Xfile;} + virtual void SetFile(PGLOBAL g, PCSZ fn) {Xfile = fn;} virtual void ResetDB(void) {N = 0;} virtual void ResetSize(void) {MaxSize = -1;} virtual int RowNumber(PGLOBAL g, bool b = false); @@ -127,7 +127,7 @@ class DllExport TDBXML : public TDBASE { bool Void; // True if the file does not exist bool Zipped; // True if Zipped XML file(s) bool Mulentries; // True if multiple entries in zip file - char *Xfile; // The XML file + PCSZ Xfile; // The XML file char *Enc; // New XML table file encoding char *Tabname; // Name of Table node char *Rowname; // Name of first level nodes @@ -138,7 +138,7 @@ class DllExport TDBXML : public TDBASE { char *DefNs; // Dummy name of default namespace char *Attrib; // Table node attribut(s) char *Hdattr; // Header node attribut(s) - char *Entry; // Zip entry name or pattern + PCSZ Entry; // Zip entry name or pattern int Coltype; // Default column type int Limit; // Limit of multiple values int Header; // n first rows are header rows @@ -155,7 +155,7 @@ class DllExport TDBXML : public TDBASE { class XMLCOL : public COLBLK { public: // Constructors - XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "XML"); + XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "XML"); XMLCOL(XMLCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp index b91059a3843..c026744dba8 100644 --- a/storage/connect/tabzip.cpp +++ b/storage/connect/tabzip.cpp @@ -195,8 +195,8 @@ void TDBZIP::CloseDB(PGLOBAL g) /***********************************************************************/ /* ZIPCOL public constructor. */ /***********************************************************************/ -ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h index dcec3475371..32b15281f81 100644 --- a/storage/connect/tabzip.h +++ b/storage/connect/tabzip.h @@ -34,7 +34,7 @@ public: protected: // Members - PSZ target; // The inside file to query + PCSZ target; // The inside file to query }; // end of ZIPDEF /***********************************************************************/ @@ -68,7 +68,7 @@ protected: // Members unzFile zipfile; // The ZIP container file - PSZ zfn; // The ZIP file name + PCSZ zfn; // The ZIP file name //PSZ target; unz_file_info64 finfo; // The current file info char fn[FILENAME_MAX]; // The current file name @@ -82,7 +82,7 @@ class DllExport ZIPCOL : public COLBLK { friend class TDBZIP; public: // Constructors - ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ZIP"); + ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ZIP"); // Implementation virtual int GetAmType(void) { return TYPE_AM_ZIP; } diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index 5fefcba5856..5b98f3eb425 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -1,7 +1,7 @@ /************ Valblk C++ Functions Source Code File (.CPP) *************/ -/* Name: VALBLK.CPP Version 2.1 */ +/* Name: VALBLK.CPP Version 2.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ /* */ /* This file contains the VALBLK and derived classes functions. */ /* Second family is VALBLK, representing simple suballocated arrays */ @@ -138,14 +138,14 @@ PSZ VALBLK::GetCharValue(int) assert(g); sprintf(g->Message, MSG(NO_CHAR_FROM), Type); - longjmp(g->jumper[g->jump_level], Type); - return NULL; + throw Type; + return NULL; } // end of GetCharValue /***********************************************************************/ /* Set format so formatted dates can be converted on input. */ /***********************************************************************/ -bool VALBLK::SetFormat(PGLOBAL g, PSZ, int, int) +bool VALBLK::SetFormat(PGLOBAL g, PCSZ, int, int) { sprintf(g->Message, MSG(NO_DATE_FMT), Type); return true; @@ -206,8 +206,8 @@ void VALBLK::ChkIndx(int n) if (n < 0 || n >= Nval) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_VALBLK_INDX)); - longjmp(g->jumper[g->jump_level], Type); - } // endif n + throw Type; + } // endif n } // end of ChkIndx @@ -216,8 +216,8 @@ void VALBLK::ChkTyp(PVAL v) if (Check && (Type != v->GetType() || Unsigned != v->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type + throw Type; + } // endif Type } // end of ChkTyp @@ -226,8 +226,8 @@ void VALBLK::ChkTyp(PVBLK vb) if (Check && (Type != vb->GetType() || Unsigned != vb->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type + throw Type; + } // endif Type } // end of ChkTyp @@ -335,15 +335,15 @@ uchar TYPBLK<uchar>::GetTypedValue(PVAL valp) /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ template <class TYPE> -void TYPBLK<TYPE>::SetValue(PSZ p, int n) +void TYPBLK<TYPE>::SetValue(PCSZ p, int n) { ChkIndx(n); if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check + throw Type; + } // endif Check bool minus; ulonglong maxval = MaxVal(); @@ -385,15 +385,15 @@ template <> ulonglong TYPBLK<ulonglong>::MaxVal(void) {return ULONGLONG_MAX;} template <> -void TYPBLK<double>::SetValue(PSZ p, int n) +void TYPBLK<double>::SetValue(PCSZ p, int n) { ChkIndx(n); if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check + throw Type; + } // endif Check Typp[n] = atof(p); SetNull(n, false); @@ -403,7 +403,7 @@ void TYPBLK<double>::SetValue(PSZ p, int n) /* Set one value in a block from an array of characters. */ /***********************************************************************/ template <class TYPE> -void TYPBLK<TYPE>::SetValue(char *sp, uint len, int n) +void TYPBLK<TYPE>::SetValue(PCSZ sp, uint len, int n) { PGLOBAL& g = Global; PSZ spz = (PSZ)PlugSubAlloc(g, NULL, 0); // Temporary @@ -778,7 +778,7 @@ void CHRBLK::SetValue(PVAL valp, int n) /***********************************************************************/ /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ -void CHRBLK::SetValue(PSZ sp, int n) +void CHRBLK::SetValue(PCSZ sp, int n) { uint len = (sp) ? strlen(sp) : 0; SetValue(sp, len, n); @@ -787,7 +787,7 @@ void CHRBLK::SetValue(PSZ sp, int n) /***********************************************************************/ /* Set one value in a block from an array of characters. */ /***********************************************************************/ -void CHRBLK::SetValue(char *sp, uint len, int n) +void CHRBLK::SetValue(const char *sp, uint len, int n) { char *p = Chrp + n * Long; @@ -795,8 +795,8 @@ void CHRBLK::SetValue(char *sp, uint len, int n) if (Check && (signed)len > Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(SET_STR_TRUNC)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check + throw Type; + } // endif Check #endif // _DEBUG if (sp) @@ -823,8 +823,8 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type + throw Type; + } // endif Type if (!(b = pv->IsNull(n2))) memcpy(Chrp + n1 * Long, ((CHRBLK*)pv)->Chrp + n2 * Long, Long); @@ -874,8 +874,8 @@ void CHRBLK::SetValues(PVBLK pv, int k, int n) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type + throw Type; + } // endif Type #endif // _DEBUG char *p = ((CHRBLK*)pv)->Chrp; @@ -1152,7 +1152,7 @@ void STRBLK::SetValue(PVAL valp, int n) /***********************************************************************/ /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ -void STRBLK::SetValue(PSZ p, int n) +void STRBLK::SetValue(PCSZ p, int n) { if (p) { if (!Sorted || !n || !Strp[n-1] || strcmp(p, Strp[n-1])) @@ -1168,7 +1168,7 @@ void STRBLK::SetValue(PSZ p, int n) /***********************************************************************/ /* Set one value in a block from an array of characters. */ /***********************************************************************/ -void STRBLK::SetValue(char *sp, uint len, int n) +void STRBLK::SetValue(const char *sp, uint len, int n) { PSZ p; @@ -1316,7 +1316,7 @@ DATBLK::DATBLK(void *mp, int nval) : TYPBLK<int>(mp, nval, TYPE_INT) /***********************************************************************/ /* Set format so formatted dates can be converted on input. */ /***********************************************************************/ -bool DATBLK::SetFormat(PGLOBAL g, PSZ fmt, int len, int year) +bool DATBLK::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year) { if (!(Dvalp = AllocateValue(g, TYPE_DATE, len, year, false, fmt))) return true; @@ -1343,7 +1343,7 @@ char *DATBLK::GetCharString(char *p, int n) /***********************************************************************/ /* Set one value in a block from a char string. */ /***********************************************************************/ -void DATBLK::SetValue(PSZ p, int n) +void DATBLK::SetValue(PCSZ p, int n) { if (Dvalp) { // Decode the string according to format diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h index c3cad79b234..38a73424985 100644 --- a/storage/connect/valblk.h +++ b/storage/connect/valblk.h @@ -91,7 +91,7 @@ class VALBLK : public BLOCK { virtual char *GetCharString(char *p, int n) = 0; virtual void ReAlloc(void *mp, int n) {Blkp = mp; Nval = n;} virtual void Reset(int n) = 0; - virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); virtual void SetPrec(int p) {} virtual bool IsCi(void) {return false;} @@ -105,8 +105,8 @@ class VALBLK : public BLOCK { virtual void SetValue(double, int) {assert(false);} virtual void SetValue(char, int) {assert(false);} virtual void SetValue(uchar, int) {assert(false);} - virtual void SetValue(PSZ, int) {assert(false);} - virtual void SetValue(char *, uint, int) {assert(false);} + virtual void SetValue(PCSZ, int) {assert(false);} + virtual void SetValue(const char *, uint, int) {assert(false);} virtual void SetValue(PVAL valp, int n) = 0; virtual void SetValue(PVBLK pv, int n1, int n2) = 0; virtual void SetMin(PVAL valp, int n) = 0; @@ -165,8 +165,8 @@ class TYPBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(short sval, int n) {Typp[n] = (TYPE)sval; SetNull(n, false);} virtual void SetValue(ushort sval, int n) @@ -236,8 +236,8 @@ class CHRBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); virtual void SetMin(PVAL valp, int n); @@ -290,8 +290,8 @@ class STRBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); virtual void SetMin(PVAL valp, int n); @@ -322,12 +322,12 @@ class DATBLK : public TYPBLK<int> { DATBLK(void *mp, int size); // Implementation - virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); virtual char *GetCharString(char *p, int n); // Methods using TYPBLK<int>::SetValue; - virtual void SetValue(PSZ sp, int n); + virtual void SetValue(PCSZ sp, int n); protected: // Members @@ -352,7 +352,7 @@ class PTRBLK : public STRBLK { // Methods using STRBLK::SetValue; using STRBLK::CompVal; - virtual void SetValue(PSZ p, int n) {Strp[n] = p;} + virtual void SetValue(PCSZ p, int n) {Strp[n] = (char*)p;} virtual int CompVal(int i1, int i2); protected: diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index ced690e77c0..b6c63bdadd3 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -1,7 +1,7 @@ /************* Value C++ Functions Source Code File (.CPP) *************/ -/* Name: VALUE.CPP Version 2.6 */ +/* Name: VALUE.CPP Version 2.8 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */ /* */ /* This file contains the VALUE and derived classes family functions. */ /* These classes contain values of different types. They are used so */ @@ -60,7 +60,7 @@ #define CheckType(V) if (Type != V->GetType()) { \ PGLOBAL& g = Global; \ strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \ - longjmp(g->jumper[g->jump_level], Type); } + throw Type; #else #define CheckType(V) #endif @@ -94,12 +94,12 @@ PSZ strlwr(PSZ s); /* OUT minus: Set to true if the number is negative */ /* Returned val: The resulting number */ /***********************************************************************/ -ulonglong CharToNumber(char *p, int n, ulonglong maxval, +ulonglong CharToNumber(const char *p, int n, ulonglong maxval, bool un, bool *minus, bool *rc) { - char *p2; - uchar c; - ulonglong val; + const char *p2; + uchar c; + ulonglong val; if (minus) *minus = false; if (rc) *rc = false; @@ -118,7 +118,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, maxval++; if (minus) *minus = true; } // endif Unsigned - + /* fall through */ case '+': p++; break; @@ -138,9 +138,9 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, /***********************************************************************/ /* GetTypeName: returns the PlugDB internal type name. */ /***********************************************************************/ -PSZ GetTypeName(int type) +PCSZ GetTypeName(int type) { - PSZ name; + PCSZ name; switch (type) { case TYPE_STRING: name = "CHAR"; break; @@ -184,9 +184,9 @@ int GetTypeSize(int type, int len) /***********************************************************************/ /* GetFormatType: returns the FORMAT character(s) according to type. */ /***********************************************************************/ -char *GetFormatType(int type) +const char *GetFormatType(int type) { - char *c = "X"; + const char *c = "X"; switch (type) { case TYPE_STRING: c = "C"; break; @@ -370,7 +370,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type, short prec) /* Allocate a variable Value according to type, length and precision. */ /***********************************************************************/ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, - bool uns, PSZ fmt) + bool uns, PCSZ fmt) { PVAL valp; @@ -558,6 +558,38 @@ bool VALUE::Compute(PGLOBAL g, PVAL *, int, OPVAL) return true; } // end of Compute +/***********************************************************************/ +/* Make file output of an object value. */ +/***********************************************************************/ +void VALUE::Printf(PGLOBAL g, FILE *f, uint n) +{ + char m[64], buf[64]; + + memset(m, ' ', n); /* Make margin string */ + m[n] = '\0'; + + if (Null) + fprintf(f, "%s<null>\n", m); + else + fprintf(f, "%s%s%s", GetCharString(buf), "\n", m); + +} /* end of Print */ + +/***********************************************************************/ +/* Make string output of an object value. */ +/***********************************************************************/ +void VALUE::Prints(PGLOBAL g, char *ps, uint z) +{ + char *p, buf[64]; + + if (Null) + p = strcpy(buf, "<null>"); + else + p = GetCharString(buf); + + strncpy(ps, p, z); +} // end of Print + /* -------------------------- Class TYPVAL ---------------------------- */ /***********************************************************************/ @@ -682,7 +714,7 @@ uchar TYPVAL<uchar>::GetTypedValue(PVAL valp) /* TYPVAL SetValue: convert chars extracted from a line to TYPE value.*/ /***********************************************************************/ template <class TYPE> -bool TYPVAL<TYPE>::SetValue_char(char *p, int n) +bool TYPVAL<TYPE>::SetValue_char(const char *p, int n) { bool rc, minus; ulonglong maxval = MaxVal(); @@ -704,7 +736,7 @@ bool TYPVAL<TYPE>::SetValue_char(char *p, int n) } // end of SetValue template <> -bool TYPVAL<double>::SetValue_char(char *p, int n) +bool TYPVAL<double>::SetValue_char(const char *p, int n) { if (p && n > 0) { char buf[64]; @@ -732,7 +764,7 @@ bool TYPVAL<double>::SetValue_char(char *p, int n) /* TYPVAL SetValue: fill a typed value from a string. */ /***********************************************************************/ template <class TYPE> -void TYPVAL<TYPE>::SetValue_psz(PSZ s) +void TYPVAL<TYPE>::SetValue_psz(PCSZ s) { if (s) { SetValue_char(s, (int)strlen(s)); @@ -1019,12 +1051,12 @@ TYPE TYPVAL<TYPE>::SafeAdd(TYPE n1, TYPE n2) if ((n2 > 0) && (n < n1)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_ADD)); - longjmp(g->jumper[g->jump_level], 138); - } else if ((n2 < 0) && (n > n1)) { + throw 138; + } else if ((n2 < 0) && (n > n1)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_ADD)); - longjmp(g->jumper[g->jump_level], 138); - } // endif's n2 + throw 138; + } // endif's n2 return n; } // end of SafeAdd @@ -1047,12 +1079,12 @@ TYPE TYPVAL<TYPE>::SafeMult(TYPE n1, TYPE n2) if (n > MinMaxVal(true)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_TIMES)); - longjmp(g->jumper[g->jump_level], 138); - } else if (n < MinMaxVal(false)) { + throw 138; + } else if (n < MinMaxVal(false)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_TIMES)); - longjmp(g->jumper[g->jump_level], 138); - } // endif's n2 + throw 138; + } // endif's n2 return (TYPE)n; } // end of SafeMult @@ -1170,7 +1202,7 @@ bool TYPVAL<TYPE>::Compall(PGLOBAL g, PVAL *vp, int np, OPVAL op) /* This function assumes that the format matches the value type. */ /***********************************************************************/ template <class TYPE> -bool TYPVAL<TYPE>::FormatValue(PVAL vp, char *fmt) +bool TYPVAL<TYPE>::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Tval); @@ -1192,37 +1224,6 @@ bool TYPVAL<TYPE>::SetConstFormat(PGLOBAL g, FORMAT& fmt) return false; } // end of SetConstFormat -/***********************************************************************/ -/* Make file output of a typed object. */ -/***********************************************************************/ -template <class TYPE> -void TYPVAL<TYPE>::Print(PGLOBAL g, FILE *f, uint n) - { - char m[64], buf[12]; - - memset(m, ' ', n); /* Make margin string */ - m[n] = '\0'; - - if (Null) - fprintf(f, "%s<null>\n", m); - else - fprintf(f, strcat(strcat(strcpy(buf, "%s"), Fmt), "\n"), m, Tval); - - } /* end of Print */ - -/***********************************************************************/ -/* Make string output of a int object. */ -/***********************************************************************/ -template <class TYPE> -void TYPVAL<TYPE>::Print(PGLOBAL g, char *ps, uint z) - { - if (Null) - strcpy(ps, "<null>"); - else - sprintf(ps, Fmt, Tval); - - } /* end of Print */ - /* -------------------------- Class STRING --------------------------- */ /***********************************************************************/ @@ -1361,25 +1362,25 @@ bool TYPVAL<PSZ>::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* STRING SetValue: fill string with chars extracted from a line. */ /***********************************************************************/ -bool TYPVAL<PSZ>::SetValue_char(char *p, int n) +bool TYPVAL<PSZ>::SetValue_char(const char *cp, int n) { bool rc = false; - if (!p || n == 0) { + if (!cp || n == 0) { Reset(); Null = Nullable; - } else if (p != Strp) { - rc = n > Len; + } else if (cp != Strp) { + const char *p = cp + n - 1; - if ((n = MY_MIN(n, Len))) { - strncpy(Strp, p, n); + for (p; p >= cp; p--, n--) + if (*p && *p != ' ') + break; -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; - for (p = Strp + n - 1; p >= Strp; p--) - if (*p && *p != ' ') - break; + rc = n > Len; - *(++p) = '\0'; + if ((n = MY_MIN(n, Len))) { + strncpy(Strp, cp, n); + Strp[n] = '\0'; if (trace > 1) htrc(" Setting string to: '%s'\n", Strp); @@ -1396,7 +1397,7 @@ bool TYPVAL<PSZ>::SetValue_char(char *p, int n) /***********************************************************************/ /* STRING SetValue: fill string with another string. */ /***********************************************************************/ -void TYPVAL<PSZ>::SetValue_psz(PSZ s) +void TYPVAL<PSZ>::SetValue_psz(PCSZ s) { if (!s) { Reset(); @@ -1432,8 +1433,8 @@ void TYPVAL<PSZ>::SetValue(int n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else + throw 138; + } else SetValue_psz(buf); Null = false; @@ -1486,8 +1487,8 @@ void TYPVAL<PSZ>::SetValue(longlong n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else + throw 138; + } else SetValue_psz(buf); Null = false; @@ -1529,8 +1530,8 @@ void TYPVAL<PSZ>::SetValue(double f) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else + throw 138; + } else SetValue_psz(buf); Null = false; @@ -1559,7 +1560,7 @@ void TYPVAL<PSZ>::SetValue(uchar c) /***********************************************************************/ void TYPVAL<PSZ>::SetBinValue(void *p) { - SetValue_char((char *)p, Len); + SetValue_char((const char *)p, Len); } // end of SetBinValue /***********************************************************************/ @@ -1689,7 +1690,7 @@ bool TYPVAL<PSZ>::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool TYPVAL<PSZ>::FormatValue(PVAL vp, char *fmt) +bool TYPVAL<PSZ>::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Strp); @@ -1708,6 +1709,18 @@ bool TYPVAL<PSZ>::SetConstFormat(PGLOBAL, FORMAT& fmt) return false; } // end of SetConstFormat +/***********************************************************************/ +/* Make string output of an object value. */ +/***********************************************************************/ +void TYPVAL<PSZ>::Prints(PGLOBAL g, char *ps, uint z) +{ + if (Null) + strncpy(ps, "null", z); + else + strcat(strncat(strncpy(ps, "\"", z), Strp, z-2), "\""); + +} // end of Print + /* -------------------------- Class DECIMAL -------------------------- */ /***********************************************************************/ @@ -1797,102 +1810,6 @@ bool DECVAL::GetBinValue(void *buf, int buflen, bool go) return false; } // end of GetBinValue -#if 0 -/***********************************************************************/ -/* DECIMAL SetValue: copy the value of another Value object. */ -/***********************************************************************/ -bool DECVAL::SetValue_pval(PVAL valp, bool chktype) - { - if (chktype && (valp->GetType() != Type || valp->GetSize() > Len)) - return true; - - char buf[64]; - - if (!(Null = valp->IsNull() && Nullable)) - strncpy(Strp, valp->GetCharString(buf), Len); - else - Reset(); - - return false; - } // end of SetValue_pval - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with chars extracted from a line. */ -/***********************************************************************/ -bool DECVAL::SetValue_char(char *p, int n) - { - bool rc; - - if (p && n > 0) { - rc = n > Len; - - if ((n = MY_MIN(n, Len))) { - strncpy(Strp, p, n); - -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; - for (p = Strp + n - 1; p >= Strp; p--) - if (*p && *p != ' ') - break; - - *(++p) = '\0'; - - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); - - Null = false; - } else { - rc = false; - Reset(); - Null = Nullable; - } // endif p - - return rc; - } // end of SetValue_char - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with another string. */ -/***********************************************************************/ -void DECVAL::SetValue_psz(PSZ s) - { - if (s) { - strncpy(Strp, s, Len); - Null = false; - } else { - Reset(); - Null = Nullable; - } // endif s - - } // end of SetValue_psz - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with a string extracted from a block.*/ -/***********************************************************************/ -void DECVAL::SetValue_pvblk(PVBLK blk, int n) - { - // STRBLK's can return a NULL pointer - SetValue_psz(blk->GetCharValue(n)); - } // end of SetValue_pvblk - -/***********************************************************************/ -/* DECIMAL SetBinValue: fill string with chars extracted from a line. */ -/***********************************************************************/ -void DECVAL::SetBinValue(void *p) - { - SetValue_char((char *)p, Len); - } // end of SetBinValue - -/***********************************************************************/ -/* DECIMAL GetCharString: get string representation of a char value. */ -/***********************************************************************/ -char *DECVAL::GetCharString(char *p) - { - return Strp; - } // end of GetCharString -#endif // 0 - /***********************************************************************/ /* DECIMAL compare value with another Value. */ /***********************************************************************/ @@ -1927,32 +1844,6 @@ int DECVAL::CompareValue(PVAL vp) return (f > n) ? 1 : (f < n) ? (-1) : 0; } // end of CompareValue -#if 0 -/***********************************************************************/ -/* FormatValue: This function set vp (a STRING value) to the string */ -/* constructed from its own value formated using the fmt format. */ -/* This function assumes that the format matches the value type. */ -/***********************************************************************/ -bool DECVAL::FormatValue(PVAL vp, char *fmt) - { - char *buf = (char*)vp->GetTo_Val(); // Should be big enough - int n = sprintf(buf, fmt, Strp); - - return (n > vp->GetValLen()); - } // end of FormatValue - -/***********************************************************************/ -/* DECIMAL SetFormat function (used to set SELECT output format). */ -/***********************************************************************/ -bool DECVAL::SetConstFormat(PGLOBAL g, FORMAT& fmt) - { - fmt.Type[0] = 'C'; - fmt.Length = Len; - fmt.Prec = 0; - return false; - } // end of SetConstFormat -#endif // 0 - /* -------------------------- Class BINVAL --------------------------- */ /***********************************************************************/ @@ -2110,7 +2001,7 @@ bool BINVAL::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* BINVAL SetValue: fill value with chars extracted from a line. */ /***********************************************************************/ -bool BINVAL::SetValue_char(char *p, int n) +bool BINVAL::SetValue_char(const char *p, int n) { bool rc; @@ -2131,7 +2022,7 @@ bool BINVAL::SetValue_char(char *p, int n) /***********************************************************************/ /* BINVAL SetValue: fill value with another string. */ /***********************************************************************/ -void BINVAL::SetValue_psz(PSZ s) +void BINVAL::SetValue_psz(PCSZ s) { if (s) { Len = MY_MIN(Clen, (signed)strlen(s)); @@ -2357,7 +2248,7 @@ bool BINVAL::IsEqual(PVAL vp, bool chktype) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool BINVAL::FormatValue(PVAL vp, char *fmt) +bool BINVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Len, Binp); @@ -2381,7 +2272,7 @@ bool BINVAL::SetConstFormat(PGLOBAL, FORMAT& fmt) /***********************************************************************/ /* DTVAL public constructor for new void values. */ /***********************************************************************/ -DTVAL::DTVAL(PGLOBAL g, int n, int prec, PSZ fmt) +DTVAL::DTVAL(PGLOBAL g, int n, int prec, PCSZ fmt) : TYPVAL<int>((int)0, TYPE_DATE) { if (!fmt) { @@ -2410,7 +2301,7 @@ DTVAL::DTVAL(int n) : TYPVAL<int>(n, TYPE_DATE) /***********************************************************************/ /* Set format so formatted dates can be converted on input/output. */ /***********************************************************************/ -bool DTVAL::SetFormat(PGLOBAL g, PSZ fmt, int len, int year) +bool DTVAL::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year) { Pdtp = MakeDateFormat(g, fmt, true, true, (year > 9999) ? 1 : 0); Sdate = (char*)PlugSubAlloc(g, NULL, len + 1); @@ -2668,7 +2559,11 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype) ndv = ExtractDate(valp->GetCharValue(), Pdtp, DefYear, dval); MakeDate(NULL, dval, ndv); - } else + } else if (valp->GetType() == TYPE_BIGINT && + !(valp->GetBigintValue() % 1000)) { + // Assuming that this timestamp is in milliseconds + Tval = valp->GetBigintValue() / 1000; + } else Tval = valp->GetIntValue(); } else @@ -2682,14 +2577,14 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* SetValue: convert chars extracted from a line to date value. */ /***********************************************************************/ -bool DTVAL::SetValue_char(char *p, int n) +bool DTVAL::SetValue_char(const char *p, int n) { bool rc= 0; if (Pdtp) { - char *p2; - int ndv; - int dval[6]; + const char *p2; + int ndv; + int dval[6]; if (n > 0) { // Trim trailing blanks @@ -2721,11 +2616,11 @@ bool DTVAL::SetValue_char(char *p, int n) /***********************************************************************/ /* SetValue: convert a char string to date value. */ /***********************************************************************/ -void DTVAL::SetValue_psz(PSZ p) +void DTVAL::SetValue_psz(PCSZ p) { if (Pdtp) { - int ndv; - int dval[6]; + int ndv; + int dval[6]; strncpy(Sdate, p, Len); Sdate[Len] = '\0'; @@ -2815,8 +2710,10 @@ char *DTVAL::ShowValue(char *buf, int len) strncat(p, "Error", m); } // endif n - } else - p = ""; // DEFAULT VALUE ??? + } else { + p = buf; + *p = '\0'; // DEFAULT VALUE ??? + } // endif Null return p; } else @@ -2881,7 +2778,7 @@ bool DTVAL::WeekNum(PGLOBAL g, int& nval) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool DTVAL::FormatValue(PVAL vp, char *fmt) +bool DTVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough struct tm tm, *ptm = GetGmTime(&tm); diff --git a/storage/connect/value.h b/storage/connect/value.h index 14a568c3549..2754c761815 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -1,7 +1,7 @@ /**************** Value H Declares Source Code File (.H) ***************/ -/* Name: VALUE.H Version 2.2 */ +/* Name: VALUE.H Version 2.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */ /* */ /* This file contains the VALUE and derived classes declares. */ /***********************************************************************/ @@ -40,14 +40,14 @@ typedef struct _datpar *PDTP; // For DTVAL /* Utilities used to test types and to allocated values. */ /***********************************************************************/ // Exported functions -DllExport PSZ GetTypeName(int); +DllExport PCSZ GetTypeName(int); DllExport int GetTypeSize(int, int); #ifdef ODBC_SUPPORT /* This function is exported for use in OEM table type DLLs */ DllExport int TranslateSQLType(int stp, int prec, int& len, char& v, bool& w); #endif -DllExport char *GetFormatType(int); +DllExport const char *GetFormatType(int); DllExport int GetFormatType(char); DllExport bool IsTypeChar(int type); DllExport bool IsTypeNum(int type); @@ -55,8 +55,8 @@ DllExport int ConvertType(int, int, CONV, bool match = false); DllExport PVAL AllocateValue(PGLOBAL, void *, short, short = 2); DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0); DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0, - bool uns = false, PSZ fmt = NULL); -DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, + bool uns = false, PCSZ fmt = NULL); +DllExport ulonglong CharToNumber(PCSZ, int, ulonglong, bool, bool *minus = NULL, bool *rc = NULL); DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc); @@ -100,8 +100,8 @@ class DllExport VALUE : public BLOCK { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype = false) = 0; - virtual bool SetValue_char(char *p, int n) = 0; - virtual void SetValue_psz(PSZ s) = 0; + virtual bool SetValue_char(const char *p, int n) = 0; + virtual void SetValue_psz(PCSZ s) = 0; virtual void SetValue_bool(bool) {assert(FALSE);} virtual int CompareValue(PVAL vp) = 0; virtual BYTE TestValue(PVAL vp); @@ -121,7 +121,9 @@ class DllExport VALUE : public BLOCK { virtual char *GetCharString(char *p) = 0; virtual bool IsEqual(PVAL vp, bool chktype) = 0; virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); - virtual bool FormatValue(PVAL vp, char *fmt) = 0; + virtual bool FormatValue(PVAL vp, PCSZ fmt) = 0; + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *ps, uint z); /** Set value from a non-aligned in-memory value in the machine byte order. @@ -211,8 +213,8 @@ class DllExport TYPVAL : public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_bool(bool b) {Tval = (b) ? 1 : 0;} virtual int CompareValue(PVAL vp); virtual void SetValue(char c) {Tval = (TYPE)c; Null = false;} @@ -232,9 +234,7 @@ class DllExport TYPVAL : public VALUE { virtual bool IsEqual(PVAL vp, bool chktype); virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); virtual bool SetConstFormat(PGLOBAL, FORMAT&); - virtual bool FormatValue(PVAL vp, char *fmt); - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual bool FormatValue(PVAL vp, PCSZ fmt); protected: static TYPE MinMaxVal(bool b); @@ -287,8 +287,8 @@ class DllExport TYPVAL<PSZ>: public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual void SetValue(char c); virtual void SetValue(uchar c); @@ -306,8 +306,9 @@ class DllExport TYPVAL<PSZ>: public VALUE { virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); - virtual bool FormatValue(PVAL vp, char *fmt); + virtual bool FormatValue(PVAL vp, PCSZ fmt); virtual bool SetConstFormat(PGLOBAL, FORMAT&); + virtual void Prints(PGLOBAL g, char *ps, uint z); protected: // Members @@ -371,8 +372,8 @@ class DllExport BINVAL: public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual void SetValue(char c); virtual void SetValue(uchar c); @@ -389,7 +390,7 @@ class DllExport BINVAL: public VALUE { virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); - virtual bool FormatValue(PVAL vp, char *fmt); + virtual bool FormatValue(PVAL vp, PCSZ fmt); virtual bool SetConstFormat(PGLOBAL, FORMAT&); protected: @@ -405,18 +406,18 @@ class DllExport BINVAL: public VALUE { class DllExport DTVAL : public TYPVAL<int> { public: // Constructors - DTVAL(PGLOBAL g, int n, int p, PSZ fmt); + DTVAL(PGLOBAL g, int n, int p, PCSZ fmt); DTVAL(int n); // Implementation virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual char *GetCharString(char *p); virtual char *ShowValue(char *buf, int); - virtual bool FormatValue(PVAL vp, char *fmt); - bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool FormatValue(PVAL vp, PCSZ fmt); + bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); bool SetFormat(PGLOBAL g, PVAL valp); bool IsFormatted(void) {return Pdtp != NULL;} bool MakeTime(struct tm *ptm); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 15fb71ab88a..3e4db8080ae 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -1,7 +1,7 @@ /***************** Xindex C++ Class Xindex Code (.CPP) *****************/ -/* Name: XINDEX.CPP Version 2.9 */ +/* Name: XINDEX.CPP Version 3.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ /* */ /* This file contains the class XINDEX implementation code. */ /***********************************************************************/ @@ -181,7 +181,7 @@ XXBASE::XXBASE(PTDBDOS tbxp, bool b) : CSORT(b), /***********************************************************************/ /* Make file output of XINDEX contents. */ /***********************************************************************/ -void XXBASE::Print(PGLOBAL, FILE *f, uint n) +void XXBASE::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; @@ -193,7 +193,7 @@ void XXBASE::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of XINDEX contents. */ /***********************************************************************/ -void XXBASE::Print(PGLOBAL, char *ps, uint z) +void XXBASE::Prints(PGLOBAL, char *ps, uint z) { *ps = '\0'; strncat(ps, "Xindex", z); @@ -446,8 +446,8 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) #if 0 if (!dup->Step) { strcpy(g->Message, MSG(QUERY_CANCELLED)); - longjmp(g->jumper[g->jump_level], 99); - } // endif Step + throw 99; + } // endif Step #endif // 0 /*******************************************************************/ @@ -464,7 +464,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) if (ApplyFilter(g, filp)) break; - // passthru + // fall through case RC_NF: continue; case RC_EF: @@ -819,7 +819,7 @@ bool XINDEX::Reorder(PGLOBAL g __attribute__((unused))) /***********************************************************************/ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) { - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int n[NZ], nof = (Mul) ? (Ndif + 1) : 0; int id = -1, size = 0; @@ -948,7 +948,7 @@ bool XINDEX::Init(PGLOBAL g) /* Table will be accessed through an index table. */ /* If sorting is required, this will be done later. */ /*********************************************************************/ - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int k, n, nv[NZ], id = -1; bool estim = false; @@ -965,7 +965,7 @@ bool XINDEX::Init(PGLOBAL g) // For DBF tables, Cardinality includes bad or soft deleted lines // that are not included in the index, and can be larger then the // index size. - estim = (Tdbp->Ftype == RECFM_DBF); + estim = (Tdbp->Ftype == RECFM_DBF || Tdbp->Txfp->GetAmType() == TYPE_AM_ZIP); n = Tdbp->Cardinality(g); // n is exact table size } else { // Variable table not optimized @@ -1412,7 +1412,7 @@ err: /***********************************************************************/ bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) { - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int nv[NZ], id = -1; // n //bool estim = false; @@ -2320,9 +2320,9 @@ XFILE::XFILE(void) : XLOAD() /***********************************************************************/ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) { - char *pmod; - bool rc; - IOFF noff[MAX_INDX]; + PCSZ pmod; + bool rc; + IOFF noff[MAX_INDX]; /*********************************************************************/ /* Open the index file according to mode. */ @@ -3032,7 +3032,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) return true; Klen = Valp->GetClen(); - Keys.Size = n * Klen; + Keys.Size = (size_t)n * (size_t)Klen; if (!PlgDBalloc(g, NULL, Keys)) { sprintf(g->Message, MSG(KEY_ALLOC_ERROR), Klen, n); diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h index 2d10d72722e..339d7e68b75 100644 --- a/storage/connect/xindex.h +++ b/storage/connect/xindex.h @@ -200,8 +200,8 @@ class DllExport XXBASE : public CSORT, public BLOCK { void FreeIndex(void) {PlgDBfree(Index);} // Methods - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); virtual bool Init(PGLOBAL g) = 0; virtual bool Make(PGLOBAL g, PIXDEF sxp) = 0; #if defined(XMAP) diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index a0b7849543d..205edc12d0c 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -1,7 +1,7 @@ /************ Xobject C++ Functions Source Code File (.CPP) ************/ -/* Name: XOBJECT.CPP Version 2.4 */ +/* Name: XOBJECT.CPP Version 2.5 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains base XOBJECT class functions. */ /* Also here is the implementation of the CONSTANT class. */ @@ -84,7 +84,7 @@ double XOBJECT::GetFloatValue(void) CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) { if (!(Value = AllocateValue(g, value, (int)type))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); + throw TYPE_CONST; Constant = true; } // end of CONSTANT constructor @@ -95,7 +95,7 @@ CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) CONSTANT::CONSTANT(PGLOBAL g, int n) { if (!(Value = AllocateValue(g, &n, TYPE_INT))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); + throw TYPE_CONST; Constant = true; } // end of CONSTANT constructor @@ -117,7 +117,7 @@ void CONSTANT::Convert(PGLOBAL g, int newtype) { if (Value->GetType() != newtype) if (!(Value = AllocateValue(g, Value, newtype))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); + throw TYPE_CONST; } // end of Convert @@ -173,17 +173,17 @@ bool CONSTANT::Rephrase(PGLOBAL g, PSZ work) /***********************************************************************/ /* Make file output of a constant object. */ /***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, FILE *f, uint n) +void CONSTANT::Printf(PGLOBAL g, FILE *f, uint n) { - Value->Print(g, f, n); + Value->Printf(g, f, n); } /* end of Print */ /***********************************************************************/ /* Make string output of a constant object. */ /***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, char *ps, uint z) +void CONSTANT::Prints(PGLOBAL g, char *ps, uint z) { - Value->Print(g, ps, z); + Value->Prints(g, ps, z); } /* end of Print */ /* -------------------------- Class STRING --------------------------- */ @@ -192,7 +192,7 @@ void CONSTANT::Print(PGLOBAL g, char *ps, uint z) /* STRING public constructor for new char values. Alloc Size must be */ /* calculated because PlugSubAlloc rounds up size to multiple of 8. */ /***********************************************************************/ -STRING::STRING(PGLOBAL g, uint n, char *str) +STRING::STRING(PGLOBAL g, uint n, PCSZ str) { G = g; Length = (str) ? strlen(str) : 0; @@ -205,10 +205,12 @@ STRING::STRING(PGLOBAL g, uint n, char *str) Next = GetNext(); Size = Next - Strp; + Trc = false; } else { // This should normally never happen Next = NULL; Size = 0; + Trc = true; } // endif Strp } // end of STRING constructor @@ -229,6 +231,7 @@ char *STRING::Realloc(uint len) if (!p) { // No more room in Sarea; this is very unlikely strcpy(G->Message, "No more room in work area"); + Trc = true; return NULL; } // endif p @@ -243,7 +246,7 @@ char *STRING::Realloc(uint len) /***********************************************************************/ /* Set a STRING new PSZ value. */ /***********************************************************************/ -bool STRING::Set(PSZ s) +bool STRING::Set(PCSZ s) { if (!s) return false; @@ -333,9 +336,9 @@ bool STRING::Append(const char *s, uint ln, bool nq) } // end of Append /***********************************************************************/ -/* Append a PSZ to a STRING. */ +/* Append a PCSZ to a STRING. */ /***********************************************************************/ -bool STRING::Append(PSZ s) +bool STRING::Append(PCSZ s) { if (!s) return false; @@ -392,11 +395,11 @@ bool STRING::Append(char c) /***********************************************************************/ /* Append a quoted PSZ to a STRING. */ /***********************************************************************/ -bool STRING::Append_quoted(PSZ s) +bool STRING::Append_quoted(PCSZ s) { bool b = Append('\''); - if (s) for (char *p = s; !b && *p; p++) + if (s) for (const char *p = s; !b && *p; p++) switch (*p) { case '\'': case '\\': @@ -405,7 +408,7 @@ bool STRING::Append_quoted(PSZ s) case '\r': case '\b': case '\f': b |= Append('\\'); - // passthru + // fall through default: b |= Append(*p); break; diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index 8f6c23c4aeb..bc5912d3054 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -112,8 +112,8 @@ class DllExport CONSTANT : public XOBJECT { {return Value->SetConstFormat(g, fmt);} void Convert(PGLOBAL g, int newtype); void SetValue(PVAL vp) {Value = vp;} - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *, uint); }; // end of class CONSTANT /***********************************************************************/ @@ -123,24 +123,25 @@ class DllExport CONSTANT : public XOBJECT { class DllExport STRING : public BLOCK { public: // Constructor - STRING(PGLOBAL g, uint n, PSZ str = NULL); + STRING(PGLOBAL g, uint n, PCSZ str = NULL); // Implementation inline int GetLength(void) {return (int)Length;} inline void SetLength(uint n) {Length = n;} inline PSZ GetStr(void) {return Strp;} inline uint32 GetSize(void) {return Size;} + inline bool IsTruncated(void) {return Trc;} // Methods inline void Reset(void) {*Strp = 0;} - bool Set(PSZ s); + bool Set(PCSZ s); bool Set(char *s, uint n); bool Append(const char *s, uint ln, bool nq = false); - bool Append(PSZ s); + bool Append(PCSZ s); bool Append(STRING &str); bool Append(char c); bool Resize(uint n); - bool Append_quoted(PSZ s); + bool Append_quoted(PCSZ s); inline void Trim(void) {(void)Resize(Length + 1);} inline void Chop(void) {if (Length) Strp[--Length] = 0;} inline void RepLast(char c) {if (Length) Strp[Length-1] = c;} @@ -156,6 +157,7 @@ class DllExport STRING : public BLOCK { PSZ Strp; // The char string uint Length; // String length uint Size; // Allocated size + bool Trc; // When truncated char *Next; // Next alloc position }; // end of class STRING diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h index 4aeea05946a..ebef7a2549a 100644 --- a/storage/connect/xtable.h +++ b/storage/connect/xtable.h @@ -33,29 +33,6 @@ class CMD : public BLOCK { char *Cmd; }; // end of class CMD -#if 0 -// Condition filter structure -class CONDFIL : public BLOCK { - public: - // Constructor - CONDFIL(const Item *cond, uint idx, AMT type) - { - Cond = cond; Idx = idx; Type = type; Op = OP_XX; - Cmds = NULL; All = true; Body = NULL, Having = NULL; - } - - // Members - const Item *Cond; - AMT Type; - uint Idx; - OPVAL Op; - PCMD Cmds; - bool All; - char *Body; - char *Having; -}; // end of class CONDFIL -#endif // 0 - typedef class EXTCOL *PEXTCOL; typedef class CONDFIL *PCFIL; typedef class TDBCAT *PTDBCAT; @@ -84,7 +61,6 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline PFIL GetFilter(void) {return To_Filter;} inline PCOL GetSetCols(void) {return To_SetCols;} inline void SetSetCols(PCOL colp) {To_SetCols = colp;} - inline void SetFilter(PFIL fp) {To_Filter = fp;} inline void SetOrig(PTDB txp) {To_Orig = txp;} inline void SetUse(TUSE n) {Use = n;} inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;} @@ -94,11 +70,14 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline void SetColumns(PCOL colp) {Columns = colp;} inline void SetDegree(int degree) {Degree = degree;} inline void SetMode(MODE mode) {Mode = mode;} + inline const Item *GetCond(void) {return Cond;} + inline void SetCond(const Item *cond) {Cond = cond;} // Properties virtual AMT GetAmType(void) {return TYPE_AM_ERROR;} virtual bool IsRemote(void) {return false;} virtual bool IsIndexed(void) {return false;} + virtual void SetFilter(PFIL fp) {To_Filter = fp;} virtual int GetTdb_No(void) {return Tdb_No;} virtual PTDB GetNext(void) {return Next;} virtual PCATLG GetCat(void) {return NULL;} @@ -110,7 +89,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual bool IsSpecial(PSZ name); virtual bool IsReadOnly(void) {return Read_Only;} virtual bool IsView(void) {return FALSE;} - virtual PSZ GetPath(void); + virtual PCSZ GetPath(void); virtual RECFM GetFtype(void) {return RECFM_NAF;} virtual bool GetBlockValues(PGLOBAL) { return false; } virtual int Cardinality(PGLOBAL) {return 0;} @@ -119,19 +98,20 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual int GetMaxSize(PGLOBAL) = 0; virtual int GetProgMax(PGLOBAL) = 0; virtual int GetProgCur(void) {return GetRecpos();} - virtual PSZ GetFile(PGLOBAL) {return "Not a file";} - virtual void SetFile(PGLOBAL, PSZ) {} + virtual PCSZ GetFile(PGLOBAL) {return "Not a file";} + virtual void SetFile(PGLOBAL, PCSZ) {} virtual void ResetDB(void) {} virtual void ResetSize(void) {MaxSize = -1;} virtual int RowNumber(PGLOBAL g, bool b = false); + virtual bool CanBeFiltered(void) {return true;} virtual PTDB Duplicate(PGLOBAL) {return NULL;} virtual PTDB Clone(PTABS) {return this;} virtual PTDB Copy(PTABS t); virtual void PrintAM(FILE *f, char *m) {fprintf(f, "%s AM(%d)\n", m, GetAmType());} - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); - virtual PSZ GetServer(void) = 0; + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); + virtual PCSZ GetServer(void) = 0; virtual int GetBadLines(void) {return 0;} virtual CHARSET_INFO *data_charset(void); @@ -157,6 +137,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. TUSE Use; PFIL To_Filter; PCFIL To_CondFil; // To condition filter structure + const Item *Cond; // The condition used to make filters static int Tnum; // Used to generate Tdb_no's const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN PTDB Next; // Next in linearized queries @@ -187,9 +168,6 @@ class DllExport TDBASE : public TDB { // Implementation inline int GetKnum(void) {return Knum;} -//inline PTABDEF GetDef(void) {return To_Def;} -//inline PCOL GetSetCols(void) {return To_SetCols;} -//inline void SetSetCols(PCOL colp) {To_SetCols = colp;} inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;} inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;} inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;} @@ -201,36 +179,14 @@ class DllExport TDBASE : public TDB { // Methods virtual bool IsUsingTemp(PGLOBAL) {return false;} -//virtual bool IsIndexed(void) {return false;} -//virtual bool IsSpecial(PSZ name); virtual PCATLG GetCat(void); -//virtual PSZ GetPath(void); virtual void PrintAM(FILE *f, char *m); -//virtual RECFM GetFtype(void) {return RECFM_NAF;} -//virtual int GetAffectedRows(void) {return -1;} -//virtual int GetRecpos(void) = 0; -//virtual bool SetRecpos(PGLOBAL g, int recpos); -//virtual bool IsReadOnly(void) {return Read_Only;} -//virtual bool IsView(void) {return FALSE;} -//virtual CHARSET_INFO *data_charset(void); virtual int GetProgMax(PGLOBAL g) {return GetMaxSize(g);} -//virtual int GetProgCur(void) {return GetRecpos();} -//virtual PSZ GetFile(PGLOBAL) {return "Not a file";} -//virtual int GetRemote(void) {return 0;} -//virtual void SetFile(PGLOBAL, PSZ) {} -//virtual void ResetDB(void) {} -//virtual void ResetSize(void) {MaxSize = -1;} virtual void RestoreNrec(void) {} virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); - virtual PSZ GetServer(void) {return "Current";} + virtual PCSZ GetServer(void) {return "Current";} // Database routines -//virtual PCOL ColDB(PGLOBAL g, PSZ name, int num); -//virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int) -// {assert(false); return NULL;} -//virtual PCOL InsertSpecialColumn(PCOL colp); -//virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp); -//virtual void MarkDB(PGLOBAL g, PTDB tdb2); virtual int MakeIndex(PGLOBAL g, PIXDEF, bool) {strcpy(g->Message, "Remote index"); return RC_INFO;} virtual bool ReadKey(PGLOBAL, OPVAL, const key_range *) @@ -241,18 +197,12 @@ class DllExport TDBASE : public TDB { "This function should not be called for this table"); return true;} // Members -//PTABDEF To_Def; // Points to catalog description block PXOB *To_Link; // Points to column of previous relations PCOL *To_Key_Col; // Points to key columns in current file PKXBASE To_Kindex; // Points to table key index PIXDEF To_Xdp; // To the index definition block -//PCOL To_SetCols; // Points to updated columns RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) -//int MaxSize; // Max size in number of lines int Knum; // Size of key arrays -//bool Read_Only; // True for read only tables -//const CHARSET_INFO *m_data_charset; -//const char *csname; // Table charset name }; // end of class TDBASE /***********************************************************************/ diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 363fb993fe7..f00fe0e201f 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -300,7 +300,7 @@ static int read_meta_file(File meta_file, ha_rows *rows) mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0)); if (mysql_file_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(my_errno= HA_ERR_CRASHED_ON_USAGE); /* Parse out the meta data, we ignore version at the moment @@ -429,10 +429,13 @@ static int free_share(TINA_SHARE *share) int result_code= 0; if (!--share->use_count){ /* Write the meta file. Mark it as crashed if needed. */ - (void)write_meta_file(share->meta_file, share->rows_recorded, - share->crashed ? TRUE :FALSE); - if (mysql_file_close(share->meta_file, MYF(0))) - result_code= 1; + if (share->meta_file != -1) + { + (void)write_meta_file(share->meta_file, share->rows_recorded, + share->crashed ? TRUE :FALSE); + if (mysql_file_close(share->meta_file, MYF(0))) + result_code= 1; + } if (share->tina_write_opened) { if (mysql_file_close(share->tina_write_filedes, MYF(0))) @@ -954,7 +957,7 @@ int ha_tina::open(const char *name, int mode, uint open_options) if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR)) { free_share(share); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(my_errno ? my_errno : HA_ERR_CRASHED_ON_USAGE); } local_data_file_version= share->data_file_version; @@ -1505,13 +1508,13 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* Don't assert in field::val() functions */ table->use_all_columns(); - if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ if (init_data_file()) DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* Local_saved_data_file_length is initialized during the lock phase. Sometimes this is not getting executed before ::repair (e.g. for @@ -1595,9 +1598,9 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) DBUG_RETURN(my_errno ? my_errno : -1); share->tina_write_opened= FALSE; } - if (mysql_file_close(data_file, MYF(0)) || - mysql_file_close(repair_file, MYF(0)) || - mysql_file_rename(csv_key_file_data, + mysql_file_close(data_file, MYF(0)); + mysql_file_close(repair_file, MYF(0)); + if (mysql_file_rename(csv_key_file_data, repaired_fname, share->data_file_name, MYF(0))) DBUG_RETURN(-1); @@ -1719,13 +1722,14 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) DBUG_ENTER("ha_tina::check"); old_proc_info= thd_proc_info(thd, "Checking table"); - if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ if (init_data_file()) DBUG_RETURN(HA_ERR_CRASHED); + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + /* Local_saved_data_file_length is initialized during the lock phase. Check does not use store_lock in certain cases. So, we set it diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index f22bb6cb758..a3121f56d7c 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2004, 2015, Oracle and/or its affiliates. + Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2943,6 +2944,7 @@ int ha_federated::extra(ha_extra_function operation) break; case HA_EXTRA_PREPARE_FOR_DROP: table_will_be_deleted = TRUE; + break; default: /* do nothing */ DBUG_PRINT("info",("unhandled operation: %d", (uint) operation)); diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h index 759ddfdfe71..3345bbc2180 100644 --- a/storage/federatedx/ha_federatedx.h +++ b/storage/federatedx/ha_federatedx.h @@ -330,7 +330,7 @@ public: return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_REPAIR | - HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | + HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY); } /* diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index bb5537c9363..431e992e75b 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, SkySQL Ab. + Copyright (c) 2010, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -94,7 +94,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, case HA_KEYTYPE_VARBINARY1: /* Case-insensitiveness is handled in coll->hash_sort */ keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; - /* fall_through */ + /* fall through */ case HA_KEYTYPE_VARTEXT1: keyinfo->flag|= HA_VAR_LENGTH_KEY; length+= 2; diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 3d778c49012..89ebe5b8783 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -171,7 +171,8 @@ btr_root_block_get( if (index && index->table) { index->table->file_unreadable = true; - ib_push_warning(index->table->thd, DB_DECRYPTION_FAILED, + ib_push_warning( + static_cast<THD*>(NULL), DB_DECRYPTION_FAILED, "Table %s in tablespace %lu is encrypted but encryption service or" " used key_id is not available. " " Can't continue reading table.", @@ -1133,9 +1134,7 @@ btr_create( const btr_create_t* btr_redo_create_info, mtr_t* mtr) { - ulint page_no; buf_block_t* block; - buf_frame_t* frame; page_t* page; page_zip_des_t* page_zip; @@ -1170,33 +1169,28 @@ btr_create( + IBUF_HEADER + IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); - ut_ad(block->page.id.page_no() == IBUF_TREE_ROOT_PAGE_NO); - } else { - block = fseg_create(space, 0, - PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); - } - if (block == NULL) { - - return(FIL_NULL); - } + if (block == NULL) { + return(FIL_NULL); + } - page_no = block->page.id.page_no(); - frame = buf_block_get_frame(block); + ut_ad(block->page.id.page_no() == IBUF_TREE_ROOT_PAGE_NO); - if (type & DICT_IBUF) { - /* It is an insert buffer tree: initialize the free list */ buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); - ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO); - - flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr); + flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, + mtr); } else { - /* It is a non-ibuf tree: create a file segment for leaf - pages */ + block = fseg_create(space, 0, + PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); + + if (block == NULL) { + return(FIL_NULL); + } + buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW); - if (!fseg_create(space, page_no, + if (!fseg_create(space, block->page.id.page_no(), PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { /* Not enough space for new segment, free root segment before return. */ @@ -1287,7 +1281,7 @@ btr_create( ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE); - return(page_no); + return(block->page.id.page_no()); } /** Free a B-tree except the root page. The root page MUST be freed after diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 9ff3bc5f6d1..8a954f9d3c3 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -991,11 +992,7 @@ BtrBulk::finish(dberr_t err) ut_ad(err == DB_SUCCESS); } -#ifdef UNIV_DEBUG - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); ut_ad(err != DB_SUCCESS || btr_validate_index(m_index, NULL, false)); return(err); diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index f3d62f79a81..9e7d4f6a0bb 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -2813,18 +2813,21 @@ btr_cur_ins_lock_and_undo( } if (err != DB_SUCCESS + || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) || !dict_index_is_clust(index) || dict_index_is_ibuf(index)) { return(err); } - err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP, - thr, index, entry, - NULL, 0, NULL, NULL, - &roll_ptr); - if (err != DB_SUCCESS) { - - return(err); + if (flags & BTR_NO_UNDO_LOG_FLAG) { + roll_ptr = 0; + } else { + err = trx_undo_report_row_operation(thr, index, entry, + NULL, 0, NULL, NULL, + &roll_ptr); + if (err != DB_SUCCESS) { + return(err); + } } /* Now we can fill in the roll ptr field in entry */ @@ -2884,15 +2887,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -2912,6 +2917,7 @@ btr_cur_optimistic_insert( ulint rec_size; dberr_t err; + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; block = btr_cur_get_block(cursor); @@ -3147,15 +3153,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index = cursor->index; @@ -3166,6 +3174,7 @@ btr_cur_pessimistic_insert( ulint n_reserved = 0; ut_ad(dtuple_check_typed(entry)); + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; @@ -3353,9 +3362,10 @@ btr_cur_upd_lock_and_undo( /* Append the info about the update in the undo log */ - return(trx_undo_report_row_operation( - flags, TRX_UNDO_MODIFY_OP, thr, - index, NULL, update, + return((flags & BTR_NO_UNDO_LOG_FLAG) + ? DB_SUCCESS + : trx_undo_report_row_operation( + thr, index, NULL, update, cmpl_info, rec, offsets, roll_ptr)); } @@ -4069,12 +4079,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ upd_t* update, /*!< in/out: update vector; this is allowed to also contain trx id and roll ptr fields. Non-updated columns that are moved offpage will @@ -4601,7 +4611,6 @@ undo log record created. dberr_t btr_cur_del_mark_set_clust_rec( /*===========================*/ - ulint flags, /*!< in: undo logging and locking flags */ buf_block_t* block, /*!< in/out: buffer block of the record */ rec_t* rec, /*!< in/out: record */ dict_index_t* index, /*!< in: clustered index of the record */ @@ -4637,8 +4646,8 @@ btr_cur_del_mark_set_clust_rec( return(err); } - err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr, - index, entry, NULL, 0, rec, offsets, + err = trx_undo_report_row_operation(thr, index, + entry, NULL, 0, rec, offsets, &roll_ptr); if (err != DB_SUCCESS) { diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index 6913124cea1..d4b83930191 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -154,7 +154,6 @@ btr_defragment_init() srv_defragment_interval = ut_microseconds_to_timer( (ulonglong) (1000000.0 / srv_defragment_frequency)); mutex_create(LATCH_ID_BTR_DEFRAGMENT_MUTEX, &btr_defragment_mutex); - os_thread_create(btr_defragment_thread, NULL, NULL); } /******************************************************************//** @@ -736,14 +735,13 @@ btr_defragment_n_pages( return current_block; } -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ +/** Whether btr_defragment_thread is active */ +bool btr_defragment_thread_active; + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg) /*!< in: work queue */ +DECLARE_THREAD(btr_defragment_thread)(void*) { btr_pcur_t* pcur; btr_cur_t* cursor; @@ -753,6 +751,8 @@ DECLARE_THREAD(btr_defragment_thread)( buf_block_t* last_block; while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { + ut_ad(btr_defragment_thread_active); + /* If defragmentation is disabled, sleep before checking whether it's enabled. */ if (!srv_defragment) { @@ -844,7 +844,8 @@ DECLARE_THREAD(btr_defragment_thread)( btr_defragment_remove_item(item); } } - btr_defragment_shutdown(); + + btr_defragment_thread_active = false; os_thread_exit(); OS_THREAD_DUMMY_RETURN; } diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index e63c234b2f6..8d6d95a020e 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -79,6 +79,10 @@ Created 11/5/1995 Heikki Tuuri #include "ut0byte.h" #include <new> +#ifdef UNIV_LINUX +#include <stdlib.h> +#endif + #ifdef HAVE_LZO #include "lzo/lzo1x.h" #endif @@ -126,6 +130,30 @@ struct set_numa_interleave_t #define NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE #endif /* HAVE_LIBNUMA */ +#ifdef HAVE_SNAPPY +#include "snappy-c.h" +#endif + +inline void* aligned_malloc(size_t size, size_t align) { + void *result; +#ifdef _MSC_VER + result = _aligned_malloc(size, align); +#else + if(posix_memalign(&result, align, size)) { + result = 0; + } +#endif + return result; +} + +inline void aligned_free(void *ptr) { +#ifdef _MSC_VER + _aligned_free(ptr); +#else + free(ptr); +#endif +} + /* IMPLEMENTATION OF THE BUFFER POOL ================================= @@ -1943,20 +1971,14 @@ buf_pool_free_instance( if (buf_pool->tmp_arr) { for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]); -#ifdef HAVE_LZO - if (slot && slot->lzo_mem) { - ut_free(slot->lzo_mem); - slot->lzo_mem = NULL; - } -#endif - if (slot && slot->crypt_buf_free) { - ut_free(slot->crypt_buf_free); - slot->crypt_buf_free = NULL; + if (slot && slot->crypt_buf) { + aligned_free(slot->crypt_buf); + slot->crypt_buf = NULL; } - if (slot && slot->comp_buf_free) { - ut_free(slot->comp_buf_free); - slot->comp_buf_free = NULL; + if (slot && slot->comp_buf) { + aligned_free(slot->comp_buf); + slot->comp_buf = NULL; } } @@ -3850,14 +3872,25 @@ buf_zip_decompress( { const byte* frame = block->page.zip.data; ulint size = page_zip_get_size(&block->page.zip); + /* The tablespace will not be found if this function is called + during IMPORT. */ + fil_space_t* space = fil_space_acquire_for_io(block->page.id.space()); + const unsigned key_version = mach_read_from_4( + frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL; + const bool encrypted = crypt_data + && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED + && (!crypt_data->is_default_encryption() + || srv_encrypt_tables); ut_ad(block->page.size.is_compressed()); ut_a(block->page.id.space() != 0); if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) { - ib::error() << "Compressed page checksum mismatch " - << block->page.id << "): stored: " + ib::error() << "Compressed page checksum mismatch for " + << (space ? space->chain.start->name : "") + << block->page.id << ": stored: " << mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM) << ", crc32: " << page_zip_calc_checksum( @@ -3873,7 +3906,7 @@ buf_zip_decompress( << page_zip_calc_checksum( frame, size, SRV_CHECKSUM_ALGORITHM_NONE); - return(FALSE); + goto err_exit; } switch (fil_page_get_type(frame)) { @@ -3881,15 +3914,16 @@ buf_zip_decompress( case FIL_PAGE_RTREE: if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { + if (space) { + fil_space_release_for_io(space); + } return(TRUE); } - ib::error() << "Unable to decompress space " - << block->page.id.space() - << " page " << block->page.id.page_no(); - - return(FALSE); - + ib::error() << "Unable to decompress " + << (space ? space->chain.start->name : "") + << block->page.id; + goto err_exit; case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: @@ -3899,11 +3933,31 @@ buf_zip_decompress( case FIL_PAGE_TYPE_ZBLOB2: /* Copy to uncompressed storage. */ memcpy(block->frame, frame, block->page.size.physical()); + if (space) { + fil_space_release_for_io(space); + } + return(TRUE); } ib::error() << "Unknown compressed page type " - << fil_page_get_type(frame); + << fil_page_get_type(frame) + << " in " << (space ? space->chain.start->name : "") + << block->page.id; + +err_exit: + if (encrypted) { + ib::info() << "Row compressed page could be encrypted" + " with key_version " << key_version; + block->page.encrypted = true; + dict_set_encrypted_by_space(block->page.id.space()); + } else { + dict_set_corrupted_by_space(block->page.id.space()); + } + + if (space) { + fil_space_release_for_io(space); + } return(FALSE); } @@ -4504,12 +4558,21 @@ got_block: /* Decompress the page while not holding buf_pool->mutex or block->mutex. */ - /* Page checksum verification is already done when - the page is read from disk. Hence page checksum - verification is not necessary when decompressing the page. */ { - bool success = buf_zip_decompress(block, FALSE); - ut_a(success); + bool success = buf_zip_decompress(block, TRUE); + + if (!success) { + buf_pool_mutex_enter(buf_pool); + buf_page_mutex_enter(fix_block); + buf_block_set_io_fix(fix_block, BUF_IO_NONE); + buf_page_mutex_exit(fix_block); + + --buf_pool->n_pend_unzip; + buf_block_unfix(fix_block); + buf_pool_mutex_exit(buf_pool); + rw_lock_x_unlock(&fix_block->lock); + return NULL; + } } if (!recv_no_ibuf_operations) { @@ -4612,19 +4675,12 @@ got_block: goto loop; } - ib::info() << "innodb_change_buffering_debug evict " - << page_id; - return(NULL); } buf_page_mutex_enter(fix_block); if (buf_flush_page_try(buf_pool, fix_block)) { - - ib::info() << "innodb_change_buffering_debug flush " - << page_id; - guess = fix_block; goto loop; @@ -5541,15 +5597,11 @@ buf_page_create( memset(frame + FIL_PAGE_NEXT, 0xff, 4); mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); - /* These 8 bytes are also repurposed for PageIO compression and must - be reset when the frame is assigned to a new page id. See fil0fil.h. - - - FIL_PAGE_FILE_FLUSH_LSN is used on the following pages: + /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the + following pages: (1) The first page of the InnoDB system tablespace (page 0:0) - (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages . - - Therefore we don't transparently compress such pages. */ + (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages + (3) key_version on encrypted pages (not page 0:0) */ memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); @@ -7220,22 +7272,27 @@ buf_pool_reserve_tmp_slot( buf_pool_mutex_exit(buf_pool); /* Allocate temporary memory for encryption/decryption */ - if (free_slot->crypt_buf_free == NULL) { - free_slot->crypt_buf_free = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE*2)); - free_slot->crypt_buf = static_cast<byte *>(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2); + if (free_slot->crypt_buf == NULL) { + free_slot->crypt_buf = static_cast<byte*>(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); + memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); } /* For page compressed tables allocate temporary memory for compression/decompression */ - if (compressed && free_slot->comp_buf_free == NULL) { - free_slot->comp_buf_free = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE*2)); - free_slot->comp_buf = static_cast<byte *>(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2); -#ifdef HAVE_LZO - free_slot->lzo_mem = static_cast<byte *>(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS)); - memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + if (compressed && free_slot->comp_buf == NULL) { + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + size = snappy_max_compressed_length(size); +#endif +#if HAVE_LZO + size += LZO1X_1_15_MEM_COMPRESS; #endif + free_slot->comp_buf = static_cast<byte*>(aligned_malloc(size, UNIV_PAGE_SIZE)); + memset(free_slot->comp_buf, 0, size); } return (free_slot); @@ -7320,8 +7377,7 @@ buf_page_encrypt_before_write( fsp_flags_get_page_compression_level(space->flags), fil_space_get_block_size(space, bpage->id.page_no()), encrypted, - &out_len, - IF_LZO(slot->lzo_mem, NULL)); + &out_len); bpage->real_size = out_len; diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index f99fc6434de..b770e8483d9 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -162,11 +162,11 @@ buf_dblwr_init( ut_zalloc_nokey(buf_size * sizeof(void*))); } -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. -@return true if successful, false if not. */ -MY_ATTRIBUTE((warn_unused_result)) +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ bool buf_dblwr_create() { @@ -181,12 +181,11 @@ buf_dblwr_create() if (buf_dblwr) { /* Already inited */ - return(true); } start_again: - mtr_start(&mtr); + mtr.start(); buf_dblwr_being_created = TRUE; doublewrite = buf_dblwr_get(&mtr); @@ -198,33 +197,49 @@ start_again: buf_dblwr_init(doublewrite); - mtr_commit(&mtr); + mtr.commit(); buf_dblwr_being_created = FALSE; return(true); - } + } else { + fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE); + const bool fail = UT_LIST_GET_FIRST(space->chain)->size + < 3 * FSP_EXTENT_SIZE; + fil_space_release(space); - ib::info() << "Doublewrite buffer not found: creating new"; + if (fail) { + goto too_small; + } + } block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG, &mtr); - /* fseg_create acquires a second latch on the page, - therefore we must declare it: */ - - buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); - if (block2 == NULL) { - ib::error() << "Cannot create doublewrite buffer: you must" - " increase your tablespace size." - " Cannot continue operation."; - - /* The mini-transaction did not write anything yet; - we merely failed to allocate a page. */ +too_small: + ib::error() + << "Cannot create doublewrite buffer: " + "the first file in innodb_data_file_path" + " must be at least " + << (3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20) + << "M."; mtr.commit(); return(false); } + ib::info() << "Doublewrite buffer not found: creating new"; + + /* FIXME: After this point, the doublewrite buffer creation + is not atomic. The doublewrite buffer should not exist in + the InnoDB system tablespace file in the first place. + It could be located in separate optional file(s) in a + user-specified location. */ + + /* fseg_create acquires a second latch on the page, + therefore we must declare it: */ + + buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); + fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG; prev_page_no = 0; @@ -338,7 +353,7 @@ recovery, this function loads the pages from double write buffer into memory. @return DB_SUCCESS or error code */ dberr_t buf_dblwr_init_or_load_pages( - os_file_t file, + pfs_os_file_t file, const char* path) { byte* buf; @@ -516,6 +531,10 @@ buf_dblwr_process() byte* unaligned_read_buf; recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + if (!buf_dblwr) { + return; + } + unaligned_read_buf = static_cast<byte*>( ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index ce7488e3d1f..b318d1e9a3a 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -394,7 +394,7 @@ buf_dump( buf_dump_status( STATUS_VERBOSE, "Dumping buffer pool" - " " ULINTPF "/" ULINTPF "," + " " ULINTPF "/%lu," " page " ULINTPF "/" ULINTPF, i + 1, srv_buf_pool_instances, j + 1, n_pages); @@ -595,8 +595,8 @@ buf_load() if (dump == NULL) { fclose(f); buf_load_status(STATUS_ERR, - "Cannot allocate %lu bytes: %s", - (ulint) (dump_n * sizeof(*dump)), + "Cannot allocate " ULINTPF " bytes: %s", + dump_n * sizeof(*dump), strerror(errno)); return; } diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 2a9acc8c298..791939f32dd 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. Copyright (c) 2013, 2014, Fusion-io @@ -70,7 +70,7 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set back to FALSE at shutdown by the page_cleaner as well. Therefore no need to protect it by a mutex. It is only ever read by the thread doing the shutdown */ -bool buf_page_cleaner_is_active = false; +bool buf_page_cleaner_is_active; /** Factor for scan length to determine n_pages for intended oldest LSN progress */ @@ -269,6 +269,7 @@ buf_flush_insert_in_flush_rbt( buf_page_t* prev = NULL; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(buf_flush_list_mutex_own(buf_pool)); /* Insert this buffer into the rbt. */ @@ -480,6 +481,7 @@ buf_flush_insert_sorted_into_flush_list( buf_page_t* prev_b; buf_page_t* b; + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(!buf_pool_mutex_own(buf_pool)); ut_ad(log_flush_order_mutex_own()); ut_ad(buf_page_mutex_own(block)); @@ -789,6 +791,7 @@ buf_flush_write_complete( flush_type = buf_page_get_flush_type(bpage); buf_pool->n_flush[flush_type]--; + ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX); ut_ad(buf_pool_mutex_own(buf_pool)); @@ -1214,6 +1217,7 @@ buf_flush_page( } ++buf_pool->n_flush[flush_type]; + ut_ad(buf_pool->n_flush[flush_type] != 0); mutex_exit(block_mutex); @@ -1866,15 +1870,8 @@ buf_flush_batch( counts */ { ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(flush_type != BUF_FLUSH_LIST - || !sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(flush_type == BUF_FLUSH_LRU + || !sync_check_iterate(dict_sync_check())); buf_pool_mutex_enter(buf_pool); @@ -2697,6 +2694,11 @@ pc_sleep_if_needed( ulint next_loop_time, int64_t sig_count) { + /* No sleep if we are cleaning the buffer pool during the shutdown + with everything else finished */ + if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE) + return OS_SYNC_TIME_EXCEEDED; + ulint cur_time = ut_time_ms(); if (next_loop_time > cur_time) { @@ -3121,6 +3123,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ulint next_loop_time = ut_time_ms() + 1000; ulint n_flushed = 0; ulint last_activity = srv_get_activity_count(); @@ -3150,8 +3153,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)( } #endif /* UNIV_LINUX */ - buf_page_cleaner_is_active = true; - while (!srv_read_only_mode && srv_shutdown_state == SRV_SHUTDOWN_NONE && recv_sys->heap != NULL) { @@ -3479,7 +3480,6 @@ thread_exit: buf_page_cleaner_is_active = false; my_thread_end(); - /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(); diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 12775c74daf..20603021072 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -752,14 +752,9 @@ buf_read_ahead_linear( switch (err) { case DB_SUCCESS: case DB_TABLESPACE_TRUNCATED: + case DB_TABLESPACE_DELETED: case DB_ERROR: break; - case DB_TABLESPACE_DELETED: - ib::info() << "linear readahead trying to" - " access page " - << page_id_t(page_id.space(), i) - << " in nonexisting or being-dropped" - " tablespace"; case DB_DECRYPTION_FAILED: ib::error() << "linear readahead failed to" " decrypt page " @@ -778,11 +773,11 @@ buf_read_ahead_linear( os_aio_simulated_wake_handler_threads(); if (count) { - DBUG_PRINT("ib_buf", ("linear read-ahead %lu pages, " - "%lu:%lu", + DBUG_PRINT("ib_buf", ("linear read-ahead " ULINTPF " pages, " + "%u:%u", count, - (ulint)page_id.space(), - (ulint)page_id.page_no())); + page_id.space(), + page_id.page_no())); } /* Read ahead is considered one I/O operation for the purpose of diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc index 315b12e135c..a40724b303d 100644 --- a/storage/innobase/data/data0type.cc +++ b/storage/innobase/data/data0type.cc @@ -199,6 +199,7 @@ dtype_print(const dtype_t* type) case DATA_VAR_POINT: fputs("DATA_VAR_POINT", stderr); + break; case DATA_GEOMETRY: fputs("DATA_GEOMETRY", stderr); diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index 092c0cb50a1..7cc1c876589 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -459,14 +459,9 @@ dict_build_tablespace_for_table( mtr_start(&mtr); mtr.set_named_space(table->space); - bool ret = fsp_header_init(table->space, - FIL_IBD_FILE_INITIAL_SIZE, - &mtr); + fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); - if (!ret) { - return(DB_ERROR); - } } else { ut_ad(dict_tf_get_rec_format(table->flags) != REC_FORMAT_COMPRESSED); diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index b733ac580d9..df36a320616 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -4367,7 +4367,7 @@ dict_table_get_highest_foreign_id( } DBUG_PRINT("dict_table_get_highest_foreign_id", - ("id: %lu", biggest_id)); + ("id: " ULINTPF, biggest_id)); DBUG_RETURN(biggest_id); } @@ -6114,14 +6114,7 @@ dict_set_corrupted( ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); /* Mark the table as corrupted only if the clustered index is corrupted */ @@ -6612,7 +6605,8 @@ dict_table_schema_check( if ((ulint) table->n_def - n_sys_cols != req_schema->n_cols) { /* the table has a different number of columns than required */ ut_snprintf(errstr, errstr_sz, - "%s has %lu columns but should have " ULINTPF ".", + "%s has " ULINTPF " columns but should have " + ULINTPF ".", ut_format_name(req_schema->table_name, buf, sizeof(buf)), table->n_def - n_sys_cols, diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index a40bd9ec0a6..3ef3c9f329e 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -185,17 +185,6 @@ dict_load_field_low( for temporary storage */ const rec_t* rec); /*!< in: SYS_FIELDS record */ -/** Load a table definition from a SYS_TABLES record to dict_table_t. -Do not load any columns or indexes. -@param[in] name Table name -@param[in] rec SYS_TABLES record -@param[out,own] table table, or NULL -@return error message -@retval NULL on success */ -static -const char* -dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table); - /* If this flag is TRUE, then we will load the cluster index's (and tables') metadata even if it is marked as "corrupted". */ my_bool srv_load_corrupted; @@ -1140,6 +1129,67 @@ dict_sys_tablespaces_rec_read( return(true); } +/** Check if SYS_TABLES.TYPE is valid +@param[in] type SYS_TABLES.TYPE +@param[in] not_redundant whether ROW_FORMAT=REDUNDANT is not used +@return whether the SYS_TABLES.TYPE value is valid */ +static +bool +dict_sys_tables_type_valid(ulint type, bool not_redundant) +{ + /* The DATA_DIRECTORY flag can be assigned fully independently + of all other persistent table flags. */ + type &= ~DICT_TF_MASK_DATA_DIR; + + if (type == 1) { + return(true); /* ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPACT */ + } + + if (!(type & 1)) { + /* For ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT, + SYS_TABLES.TYPE=1. Else, it is the same as + dict_table_t::flags, and the least significant bit + would be set. So, the bit never can be 0. */ + return(false); + } + + if (!not_redundant) { + /* SYS_TABLES.TYPE must be 1 for ROW_FORMAT=REDUNDANT. */ + return(false); + } + + if (type >= 1U << DICT_TF_POS_UNUSED) { + /* Some unknown bits are set. */ + return(false); + } + + return(dict_tf_is_valid_not_redundant(type)); +} + +/** Convert SYS_TABLES.TYPE to dict_table_t::flags. +@param[in] type SYS_TABLES.TYPE +@param[in] not_redundant whether ROW_FORMAT=REDUNDANT is not used +@return table flags */ +static +ulint +dict_sys_tables_type_to_tf(ulint type, bool not_redundant) +{ + ut_ad(dict_sys_tables_type_valid(type, not_redundant)); + ulint flags = not_redundant ? 1 : 0; + + /* ZIP_SSIZE, ATOMIC_BLOBS, DATA_DIR, PAGE_COMPRESSION, + PAGE_COMPRESSION_LEVEL are the same. */ + flags |= type & (DICT_TF_MASK_ZIP_SSIZE + | DICT_TF_MASK_ATOMIC_BLOBS + | DICT_TF_MASK_DATA_DIR + | DICT_TF_MASK_PAGE_COMPRESSION + | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL + | DICT_TF_MASK_NO_ROLLBACK); + + ut_ad(dict_tf_is_valid(flags)); + return(flags); +} + /** Read and return 5 integer fields from a SYS_TABLES record. @param[in] rec A record of SYS_TABLES @param[in] name Table Name, the same as SYS_TABLES.NAME @@ -1149,6 +1199,7 @@ dict_sys_tablespaces_rec_read( @param[out] flags Pointer to table flags @param[out] flags2 Pointer to table flags2 @return true if the record was read correctly, false if not. */ +MY_ATTRIBUTE((warn_unused_result)) static bool dict_sys_tables_rec_read( @@ -1164,8 +1215,6 @@ dict_sys_tables_rec_read( ulint len; ulint type; - *flags2 = 0; - field = rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLES__ID, &len); ut_ad(len == 8); @@ -1182,6 +1231,79 @@ dict_sys_tables_rec_read( ut_a(len == 4); type = mach_read_from_4(field); + /* Handle MDEV-12873 InnoDB SYS_TABLES.TYPE incompatibility + for PAGE_COMPRESSED=YES in MariaDB 10.2.2 to 10.2.6. + + MariaDB 10.2.2 introduced the SHARED_SPACE flag from MySQL 5.7, + shifting the flags PAGE_COMPRESSION, PAGE_COMPRESSION_LEVEL, + ATOMIC_WRITES (repurposed to NO_ROLLBACK in 10.3.1) by one bit. + The SHARED_SPACE flag would always + be written as 0 by MariaDB, because MariaDB does not support + CREATE TABLESPACE or CREATE TABLE...TABLESPACE for InnoDB. + + So, instead of the bits AALLLLCxxxxxxx we would have + AALLLLC0xxxxxxx if the table was created with MariaDB 10.2.2 + to 10.2.6. (AA=ATOMIC_WRITES, LLLL=PAGE_COMPRESSION_LEVEL, + C=PAGE_COMPRESSED, xxxxxxx=7 bits that were not moved.) + + The case LLLLC=00000 is not a problem. The problem is the case + AALLLL10DB00001 where D is the (mostly ignored) DATA_DIRECTORY + flag and B is the ATOMIC_BLOBS flag (1 for ROW_FORMAT=DYNAMIC + and 0 for ROW_FORMAT=COMPACT in this case). Other low-order + bits must be so, because PAGE_COMPRESSED=YES is only allowed + for ROW_FORMAT=DYNAMIC and ROW_FORMAT=COMPACT, not for + ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPRESSED. + + Starting with MariaDB 10.2.4, the flags would be + 00LLLL10DB00001, because ATOMIC_WRITES is always written as 0. + + We will concentrate on the PAGE_COMPRESSION_LEVEL and + PAGE_COMPRESSED=YES. PAGE_COMPRESSED=NO implies + PAGE_COMPRESSION_LEVEL=0, and in that case all the affected + bits will be 0. For PAGE_COMPRESSED=YES, the values 1..9 are + allowed for PAGE_COMPRESSION_LEVEL. That is, we must interpret + the bits AALLLL10DB00001 as AALLLL1DB00001. + + If someone created a table in MariaDB 10.2.2 or 10.2.3 with + the attribute ATOMIC_WRITES=OFF (value 2) and without + PAGE_COMPRESSED=YES or PAGE_COMPRESSION_LEVEL, that should be + rejected. The value ATOMIC_WRITES=ON (1) would look like + ATOMIC_WRITES=OFF, but it would be ignored starting with + MariaDB 10.2.4. */ + compile_time_assert(DICT_TF_POS_PAGE_COMPRESSION == 7); + compile_time_assert(DICT_TF_POS_UNUSED == 14); + + if ((type & 0x19f) != 0x101) { + /* The table cannot have been created with MariaDB + 10.2.2 to 10.2.6, because they would write the + low-order bits of SYS_TABLES.TYPE as 0b10xx00001 for + PAGE_COMPRESSED=YES. No adjustment is applicable. */ + } else if (type >= 3 << 13) { + /* 10.2.2 and 10.2.3 write ATOMIC_WRITES less than 3, + and no other flags above that can be set for the + SYS_TABLES.TYPE to be in the 10.2.2..10.2.6 format. + This would in any case be invalid format for 10.2 and + earlier releases. */ + ut_ad(!dict_sys_tables_type_valid(type, true)); + } else { + /* SYS_TABLES.TYPE is of the form AALLLL10DB00001. We + must still validate that the LLLL bits are between 0 + and 9 before we can discard the extraneous 0 bit. */ + ut_ad(!DICT_TF_GET_PAGE_COMPRESSION(type)); + + if ((((type >> 9) & 0xf) - 1) < 9) { + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) & 1); + + type = (type & 0x7fU) | (type >> 1 & ~0x7fU); + + ut_ad(DICT_TF_GET_PAGE_COMPRESSION(type)); + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) >= 1); + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) <= 9); + } else { + ut_ad(!dict_sys_tables_type_valid(type, true)); + } + } + /* The low order bit of SYS_TABLES.TYPE is always set to 1. But in dict_table_t::flags the low order bit is used to determine if the ROW_FORMAT=REDUNDANT (0) or anything else (1). @@ -1193,31 +1315,48 @@ dict_sys_tables_rec_read( ut_a(len == 4); *n_cols = mach_read_from_4(field); - /* This validation function also combines the DICT_N_COLS_COMPACT - flag in n_cols into the type field to effectively make it a - dict_table_t::flags. */ + const bool not_redundant = 0 != (*n_cols & DICT_N_COLS_COMPACT); - if (ULINT_UNDEFINED == dict_sys_tables_type_validate(type, *n_cols)) { + if (!dict_sys_tables_type_valid(type, not_redundant)) { ib::error() << "Table " << table_name << " in InnoDB" " data dictionary contains invalid flags." " SYS_TABLES.TYPE=" << type << " SYS_TABLES.N_COLS=" << *n_cols; - *flags = ULINT_UNDEFINED; return(false); } - *flags = dict_sys_tables_type_to_tf(type, *n_cols); + *flags = dict_sys_tables_type_to_tf(type, not_redundant); - /* Get flags2 from SYS_TABLES.MIX_LEN */ - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); - *flags2 = mach_read_from_4(field); + /* For tables created before MySQL 4.1, there may be + garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables + would always be in ROW_FORMAT=REDUNDANT which do not have the + high bit set in n_cols, and flags would be zero. + MySQL 4.1 was the first version to support innodb_file_per_table, + that is, *space_id != 0. */ + if (not_redundant || *space_id != 0 || *n_cols & DICT_N_COLS_COMPACT) { + + /* Get flags2 from SYS_TABLES.MIX_LEN */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); + *flags2 = mach_read_from_4(field); + + if (!dict_tf2_is_valid(*flags, *flags2)) { + ib::error() << "Table " << table_name << " in InnoDB" + " data dictionary contains invalid flags." + " SYS_TABLES.TYPE=" << type + << " SYS_TABLES.MIX_LEN=" << *flags2; + return(false); + } - /* DICT_TF2_FTS will be set when indexes are being loaded */ - *flags2 &= ~DICT_TF2_FTS; + /* DICT_TF2_FTS will be set when indexes are being loaded */ + *flags2 &= ~DICT_TF2_FTS; + + /* Now that we have used this bit, unset it. */ + *n_cols &= ~DICT_N_COLS_COMPACT; + } else { + *flags2 = 0; + } - /* Now that we have used this bit, unset it. */ - *n_cols &= ~DICT_N_COLS_COMPACT; return(true); } @@ -1280,11 +1419,10 @@ dict_check_sys_tables( ("name: %p, '%s'", table_name.m_name, table_name.m_name)); - dict_sys_tables_rec_read(rec, table_name, - &table_id, &space_id, - &n_cols, &flags, &flags2); - if (flags == ULINT_UNDEFINED - || is_system_tablespace(space_id)) { + if (!dict_sys_tables_rec_read(rec, table_name, + &table_id, &space_id, + &n_cols, &flags, &flags2) + || space_id == TRX_SYS_SPACE) { ut_free(table_name.m_name); continue; } @@ -1307,7 +1445,7 @@ dict_check_sys_tables( look to see if it is already in the tablespace cache. */ if (fil_space_for_table_exists_in_mem( space_id, table_name.m_name, - false, true, NULL, 0, NULL, flags)) { + false, true, NULL, 0, flags)) { /* Recovery can open a datafile that does not match SYS_DATAFILES. If they don't match, update SYS_DATAFILES. */ @@ -1339,8 +1477,7 @@ dict_check_sys_tables( FIL_TYPE_TABLESPACE, space_id, dict_tf_to_fsp_flags(flags), table_name.m_name, - filepath, - NULL); + filepath); if (err != DB_SUCCESS) { ib::warn() << "Ignoring tablespace for " @@ -2091,6 +2228,8 @@ func_exit: static const char* dict_load_index_del = "delete-marked record in SYS_INDEXES"; /** Error message for table->id mismatch in dict_load_index_low() */ static const char* dict_load_index_id_err = "SYS_INDEXES.TABLE_ID mismatch"; +/** Error message for SYS_TABLES flags mismatch in dict_load_table_low() */ +static const char* dict_load_table_flags = "incorrect flags in SYS_TABLES"; /** Load an index definition from a SYS_INDEXES record to dict_index_t. If allocate=TRUE, we will create a dict_index_t structure and fill it @@ -2534,16 +2673,13 @@ dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table) ulint flags2; ulint n_v_col; - const char* error_text = dict_sys_tables_rec_check(rec); - if (error_text != NULL) { + if (const char* error_text = dict_sys_tables_rec_check(rec)) { return(error_text); } - dict_sys_tables_rec_read(rec, name, &table_id, &space_id, - &t_num, &flags, &flags2); - - if (flags == ULINT_UNDEFINED) { - return("incorrect flags in SYS_TABLES"); + if (!dict_sys_tables_rec_read(rec, name, &table_id, &space_id, + &t_num, &flags, &flags2)) { + return(dict_load_table_flags); } dict_table_decode_n_col(t_num, &n_cols, &n_v_col); @@ -2718,7 +2854,7 @@ dict_load_tablespace( /* The tablespace may already be open. */ if (fil_space_for_table_exists_in_mem( table->space, space_name, false, - true, heap, table->id, table, table->flags)) { + true, heap, table->id, table->flags)) { return; } @@ -2750,7 +2886,7 @@ dict_load_tablespace( dberr_t err = fil_ibd_open( true, false, FIL_TYPE_TABLESPACE, table->space, dict_tf_to_fsp_flags(table->flags), - space_name, filepath, table); + space_name, filepath); if (err != DB_SUCCESS) { /* We failed to find a sensible tablespace file */ @@ -2797,7 +2933,6 @@ dict_load_table_one( const rec_t* rec; const byte* field; ulint len; - const char* err_msg; mtr_t mtr; DBUG_ENTER("dict_load_table_one"); @@ -2854,11 +2989,10 @@ err_exit: goto err_exit; } - err_msg = dict_load_table_low(name, rec, &table); - - if (err_msg) { - - ib::error() << err_msg; + if (const char* err_msg = dict_load_table_low(name, rec, &table)) { + if (err_msg != dict_load_table_flags) { + ib::error() << err_msg; + } goto err_exit; } @@ -2879,6 +3013,8 @@ err_exit: mem_heap_empty(heap); + ut_ad(dict_tf2_is_valid(table->flags, table->flags2)); + /* If there is no tablespace for the table then we only need to load the index definitions. So that we can IMPORT the tablespace later. When recovering table locks for resurrected incomplete @@ -2914,32 +3050,6 @@ err_exit: } } - /* We don't trust the table->flags2(retrieved from SYS_TABLES.MIX_LEN - field) if the datafiles are from 3.23.52 version. To identify this - version, we do the below check and reset the flags. */ - if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID) - && table->space == srv_sys_space.space_id() - && table->flags == 0) { - table->flags2 = 0; - } - - DBUG_EXECUTE_IF("ib_table_invalid_flags", - if(strcmp(table->name.m_name, "test/t1") == 0) { - table->flags2 = 255; - table->flags = 255; - }); - - if (!dict_tf2_is_valid(table->flags, table->flags2)) { - ib::error() << "Table " << table->name << " in InnoDB" - " data dictionary contains invalid flags." - " SYS_TABLES.MIX_LEN=" << table->flags2; - table->flags2 &= ~DICT_TF2_TEMPORARY; - dict_table_remove_from_cache(table); - table = NULL; - err = DB_FAIL; - goto func_exit; - } - /* Initialize table foreign_child value. Its value could be changed when dict_load_foreigns() is called below */ table->fk_max_recusive_level = 0; diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 54988b910d8..9350b5d400d 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1159,10 +1159,11 @@ dict_stats_analyze_index_level( leaf-level delete marks because delete marks on non-leaf level do not make sense. */ - if (level == 0 && (srv_stats_include_delete_marked ? 0: - rec_get_deleted_flag( + if (level == 0 + && !srv_stats_include_delete_marked + && rec_get_deleted_flag( rec, - page_is_comp(btr_pcur_get_page(&pcur))))) { + page_is_comp(btr_pcur_get_page(&pcur)))) { if (rec_is_last_on_page && !prev_rec_is_copied @@ -1336,16 +1337,11 @@ dict_stats_analyze_index_level( /* aux enum for controlling the behavior of dict_stats_scan_page() @{ */ enum page_scan_method_t { - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED,/* scan all records on - the given page and count the number of - distinct ones, also ignore delete marked - records */ - QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs - from its right neighbor is found */ - COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on - the given page and count the number of - distinct ones, include delete marked - records */ + /** scan the records on the given page, counting the number + of distinct ones; @see srv_stats_include_delete_marked */ + COUNT_ALL_NON_BORING, + /** quit on the first record that differs from its right neighbor */ + QUIT_ON_FIRST_NON_BORING }; /* @} */ @@ -1392,13 +1388,10 @@ dict_stats_scan_page( Because offsets1,offsets2 should be big enough, this memory heap should never be used. */ mem_heap_t* heap = NULL; - const rec_t* (*get_next)(const rec_t*); - - if (scan_method == COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED) { - get_next = page_rec_get_next_non_del_marked; - } else { - get_next = page_rec_get_next_const; - } + const rec_t* (*get_next)(const rec_t*) + = srv_stats_include_delete_marked + ? page_rec_get_next_const + : page_rec_get_next_non_del_marked; const bool should_count_external_pages = n_external_pages != NULL; @@ -1618,9 +1611,7 @@ dict_stats_analyze_index_below_cur( offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - srv_stats_include_delete_marked ? - COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED: - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + COUNT_ALL_NON_BORING, n_diff, n_external_pages); #if 0 diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 876d1bcb342..f2ef18b116d 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -38,8 +38,6 @@ Created Apr 25, 2012 Vasil Dimov /** Minimum time interval between stats recalc for a given table */ #define MIN_RECALC_INTERVAL 10 /* seconds */ -#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) - /** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add() or shutdown. Not protected by any mutex. */ os_event_t dict_stats_event; @@ -120,6 +118,7 @@ background stats gathering thread. Only the table id is added to the list, so the table can be closed after being enqueued and it will be opened when needed. If the table does not exist later (has been DROPped), then it will be removed from the pool and skipped. */ +static void dict_stats_recalc_pool_add( /*=======================*/ @@ -147,6 +146,44 @@ dict_stats_recalc_pool_add( os_event_set(dict_stats_event); } +/** Update the table modification counter and if necessary, +schedule new estimates for table and index statistics to be calculated. +@param[in,out] table persistent or temporary table */ +void +dict_stats_update_if_needed(dict_table_t* table) +{ + ut_ad(table->stat_initialized); + ut_ad(!mutex_own(&dict_sys->mutex)); + + ulonglong counter = table->stat_modified_counter++; + ulonglong n_rows = dict_table_get_n_rows(table); + + if (dict_stats_is_persistent_enabled(table)) { + if (counter > n_rows / 10 /* 10% */ + && dict_stats_auto_recalc_is_enabled(table)) { + + dict_stats_recalc_pool_add(table); + table->stat_modified_counter = 0; + } + return; + } + + /* Calculate new statistics if 1 / 16 of table has been modified + since the last time a statistics batch was run. + We calculate statistics at most every 16th round, since we may have + a counter table which is very small and updated very often. */ + ulonglong threshold = 16 + n_rows / 16; /* 6.25% */ + + if (srv_stats_modified_counter) { + threshold = std::min(srv_stats_modified_counter, threshold); + } + + if (counter > threshold) { + /* this will reset table->stat_modified_counter to 0 */ + dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT); + } +} + /*****************************************************************//** Get a table from the auto recalc pool. The returned table id is removed from the pool. @@ -231,7 +268,6 @@ Initialize global variables needed for the operation of dict_stats_thread() Must be called before dict_stats_thread() is started. */ void dict_stats_thread_init() -/*====================*/ { ut_a(!srv_read_only_mode); @@ -276,15 +312,9 @@ dict_stats_thread_deinit() mutex_free(&recalc_pool_mutex); -#ifdef UNIV_DEBUG - os_event_destroy(dict_stats_disabled_event); - dict_stats_disabled_event = NULL; -#endif /* UNIV_DEBUG */ - + ut_d(os_event_destroy(dict_stats_disabled_event)); os_event_destroy(dict_stats_event); os_event_destroy(dict_stats_shutdown_event); - dict_stats_event = NULL; - dict_stats_shutdown_event = NULL; dict_stats_start_shutdown = false; } @@ -401,6 +431,7 @@ extern "C" os_thread_ret_t DECLARE_THREAD(dict_stats_thread)(void*) { + my_thread_init(); ut_a(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -452,7 +483,7 @@ DECLARE_THREAD(dict_stats_thread)(void*) OS_THREAD_DUMMY_RETURN; } -/** Shutdown the dict stats thread. */ +/** Shut down the dict_stats_thread. */ void dict_stats_shutdown() { diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 3216e6ef99e..7917cbb528b 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -831,7 +831,7 @@ fil_space_decrypt( Calculate post encryption checksum @param[in] page_size page size @param[in] dst_frame Block where checksum is calculated -@return page checksum or BUF_NO_CHECKSUM_MAGIC +@return page checksum not needed. */ UNIV_INTERN uint32_t @@ -839,34 +839,11 @@ fil_crypt_calculate_checksum( const page_size_t& page_size, const byte* dst_frame) { - uint32_t checksum = 0; - srv_checksum_algorithm_t algorithm = - static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm); - - if (!page_size.is_compressed()) { - switch (algorithm) { - case SRV_CHECKSUM_ALGORITHM_CRC32: - case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: - checksum = buf_calc_page_crc32(dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_INNODB: - case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: - checksum = (ib_uint32_t) buf_calc_page_new_checksum( - dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_NONE: - case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: - checksum = BUF_NO_CHECKSUM_MAGIC; - break; - /* no default so the compiler will emit a warning - * if new enum is added and not handled here */ - } - } else { - checksum = page_zip_calc_checksum(dst_frame, page_size.physical(), - algorithm); - } - - return checksum; + /* For encrypted tables we use only crc32 and strict_crc32 */ + return page_size.is_compressed() + ? page_zip_calc_checksum(dst_frame, page_size.physical(), + SRV_CHECKSUM_ALGORITHM_CRC32) + : buf_calc_page_crc32(dst_frame); } /***********************************************************************/ @@ -945,6 +922,34 @@ fil_crypt_needs_rotation( return false; } +/** Read page 0 and possible crypt data from there. +@param[in,out] space Tablespace */ +static inline +void +fil_crypt_read_crypt_data(fil_space_t* space) +{ + if (space->crypt_data || space->size) { + /* The encryption metadata has already been read, or + the tablespace is not encrypted and the file has been + opened already. */ + return; + } + + const page_size_t page_size(space->flags); + mtr_t mtr; + mtr.start(); + if (buf_block_t* block = buf_page_get(page_id_t(space->id, 0), + page_size, RW_S_LATCH, &mtr)) { + mutex_enter(&fil_system->mutex); + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data( + page_size, block->frame); + } + mutex_exit(&fil_system->mutex); + } + mtr.commit(); +} + /*********************************************************************** Start encrypting a space @param[in,out] space Tablespace @@ -955,6 +960,7 @@ fil_crypt_start_encrypting_space( fil_space_t* space) { bool recheck = false; + mutex_enter(&fil_crypt_threads_mutex); fil_space_crypt_t *crypt_data = space->crypt_data; @@ -1097,12 +1103,12 @@ struct rotate_thread_t { bool should_shutdown() const { switch (srv_shutdown_state) { case SRV_SHUTDOWN_NONE: - case SRV_SHUTDOWN_CLEANUP: return thread_no >= srv_n_fil_crypt_threads; case SRV_SHUTDOWN_EXIT_THREADS: /* srv_init_abort() must have been invoked */ - case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_CLEANUP: return true; + case SRV_SHUTDOWN_FLUSH_PHASE: case SRV_SHUTDOWN_LAST_PHASE: break; } @@ -1451,6 +1457,13 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!state->space->crypt_data) { + fil_crypt_read_crypt_data(state->space); + } + if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); /* init state->min_key_version_found before @@ -2152,8 +2165,10 @@ DECLARE_THREAD(fil_crypt_thread)( while (!thr.should_shutdown() && fil_crypt_find_page_to_rotate(&new_state, &thr)) { - /* rotate a (set) of pages */ - fil_crypt_rotate_pages(&new_state, &thr); + if (!thr.space->is_stopping()) { + /* rotate a (set) of pages */ + fil_crypt_rotate_pages(&new_state, &thr); + } /* If space is marked as stopping, release space and stop rotation. */ @@ -2380,6 +2395,14 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); + + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!space->crypt_data) { + fil_crypt_read_crypt_data(const_cast<fil_space_t*>(space)); + } + fil_space_crypt_t* crypt_data = space->crypt_data; status->space = space->id; @@ -2492,15 +2515,8 @@ fil_space_verify_crypt_checksum( return false; } - srv_checksum_algorithm_t algorithm = - static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm); - /* If no checksum is used, can't continue checking. */ - if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { - return(true); - } - /* Read stored post encryption checksum. */ - ib_uint32_t checksum = mach_read_from_4( + uint32_t checksum = mach_read_from_4( page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4); /* Declare empty pages non-corrupted */ diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index ce417b1e511..4ce1e7755e3 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -167,13 +167,13 @@ UNIV_INTERN extern ib_mutex_t fil_crypt_threads_mutex; /** Determine if the space id is a user tablespace id or not. @param[in] space_id Space ID to check @return true if it is a user tablespace ID */ -UNIV_INLINE +inline bool -fil_is_user_tablespace_id( - ulint space_id) +fil_is_user_tablespace_id(ulint space_id) { - return(space_id > srv_undo_tablespaces_open - && space_id != SRV_TMP_SPACE_ID); + return(space_id != TRX_SYS_SPACE + && space_id != SRV_TMP_SPACE_ID + && !srv_is_undo_tablespace(space_id)); } #ifdef UNIV_DEBUG @@ -655,12 +655,8 @@ retry: FSP_HEADER_OFFSET + FSP_FREE + page); /* Try to read crypt_data from page 0 if it is not yet - read. FIXME: Remove page_0_crypt_read, and simply ensure in - fil_space_t object creation that node->size==0 if and only - if the crypt_data is not known and must be read. */ - if (!space->page_0_crypt_read) { - space->page_0_crypt_read = true; - ut_ad(space->crypt_data == NULL); + read. */ + if (!space->crypt_data) { space->crypt_data = fil_space_read_crypt_data( page_size_t(space->flags), page); } @@ -673,7 +669,7 @@ retry: if (cflags == ULINT_UNDEFINED) { ib::error() << "Expected tablespace flags " - << ib::hex(flags) + << ib::hex(space->flags) << " but found " << ib::hex(flags) << " in the file " << node->name; return(false); @@ -1591,7 +1587,6 @@ Error messages are issued to the server log. @param[in] flags tablespace flags @param[in] purpose tablespace purpose @param[in,out] crypt_data encryption information -@param[in] create_table whether this is CREATE TABLE @param[in] mode encryption mode @return pointer to created tablespace, to be filled in with fil_node_create() @retval NULL on failure (such as when the same tablespace exists) */ @@ -1602,7 +1597,6 @@ fil_space_create( ulint flags, fil_type_t purpose, fil_space_crypt_t* crypt_data, - bool create_table, fil_encryption_t mode) { fil_space_t* space; @@ -1667,16 +1661,8 @@ fil_space_create( space->magic_n = FIL_SPACE_MAGIC_N; space->crypt_data = crypt_data; - /* In create table we write page 0 so we have already - "read" it and for system tablespaces we have read - crypt data at startup. */ - if (create_table || crypt_data != NULL) { - space->page_0_crypt_read = true; - } - DBUG_LOG("tablespace", - "Tablespace for space " << id << " name " << name - << (create_table ? " created" : " opened")); + "Created metadata for " << id << " name " << name); if (crypt_data) { DBUG_LOG("crypt", "Tablespace " << id << " name " << name @@ -3796,7 +3782,7 @@ fil_ibd_create( fil_encryption_t mode, uint32_t key_id) { - os_file_t file; + pfs_os_file_t file; dberr_t err; byte* buf2; byte* page; @@ -3833,18 +3819,12 @@ fil_ibd_create( ib::error() << "Cannot create file '" << path << "'"; if (error == OS_FILE_ALREADY_EXISTS) { - ib::error() << "The file '" << path << "'" + ib::info() << "The file '" << path << "'" " already exists though the" " corresponding table did not exist" " in the InnoDB data dictionary." - " Have you moved InnoDB .ibd files" - " around without using the SQL commands" - " DISCARD TABLESPACE and IMPORT TABLESPACE," - " or did mysqld crash in the middle of" - " CREATE TABLE?" " You can resolve the problem by removing" - " the file '" << path - << "' under the 'datadir' of MySQL."; + " the file."; return(DB_TABLESPACE_EXISTS); } @@ -4004,7 +3984,7 @@ fil_ibd_create( } space = fil_space_create(name, space_id, flags, FIL_TYPE_TABLESPACE, - crypt_data, true, mode); + crypt_data, mode); fil_node_t* node = NULL; @@ -4085,8 +4065,7 @@ fil_ibd_open( ulint id, ulint flags, const char* space_name, - const char* path_in, - dict_table_t* table) + const char* path_in) { dberr_t err = DB_SUCCESS; bool dict_filepath_same_as_default = false; @@ -4131,10 +4110,6 @@ fil_ibd_open( validate = true; ++tablespaces_found; link_file_found = true; - if (table) { - table->crypt_data = df_remote.get_crypt_info(); - table->page_0_read = true; - } } else if (df_remote.filepath() != NULL) { /* An ISL file was found but contained a bad filepath in it. Better validate anything we do find. */ @@ -4153,11 +4128,6 @@ fil_ibd_open( if (df_dict.open_read_only(true) == DB_SUCCESS) { ut_ad(df_dict.is_open()); ++tablespaces_found; - - if (table) { - table->crypt_data = df_dict.get_crypt_info(); - table->page_0_read = true; - } } } } @@ -4169,10 +4139,6 @@ fil_ibd_open( if (df_default.open_read_only(strict) == DB_SUCCESS) { ut_ad(df_default.is_open()); ++tablespaces_found; - if (table) { - table->crypt_data = df_default.get_crypt_info(); - table->page_0_read = true; - } } /* Check if multiple locations point to the same file. */ @@ -4282,12 +4248,14 @@ fil_ibd_open( df_default.close(); tablespaces_found--; } + if (df_dict.is_open() && !df_dict.is_valid()) { df_dict.close(); /* Leave dict.filepath so that SYS_DATAFILES can be corrected below. */ tablespaces_found--; } + if (df_remote.is_open() && !df_remote.is_valid()) { df_remote.close(); tablespaces_found--; @@ -4370,7 +4338,7 @@ skip_validate: space_name, id, flags, purpose, df_remote.is_open() ? df_remote.get_crypt_info() : df_dict.is_open() ? df_dict.get_crypt_info() : - df_default.get_crypt_info(), false); + df_default.get_crypt_info()); /* We do not measure the size of the file, that is why we pass the 0 below */ @@ -4690,7 +4658,7 @@ fil_ibd_load( space = fil_space_create( file.name(), space_id, flags, FIL_TYPE_TABLESPACE, - file.get_crypt_info(), false); + file.get_crypt_info()); if (space == NULL) { return(FIL_LOAD_INVALID); @@ -4807,7 +4775,6 @@ error log if a matching tablespace is not found from memory. @param[in] adjust_space Whether to adjust space id on mismatch @param[in] heap Heap memory @param[in] table_id table id -@param[in] table table @param[in] table_flags table flags @return true if a matching tablespace exists in the memory cache */ bool @@ -4818,7 +4785,6 @@ fil_space_for_table_exists_in_mem( bool adjust_space, mem_heap_t* heap, table_id_t table_id, - dict_table_t* table, ulint table_flags) { fil_space_t* fnamespace; @@ -4839,10 +4805,6 @@ fil_space_for_table_exists_in_mem( bool valid = space && !((space->flags ^ expected_flags) & ~FSP_FLAGS_MEM_MASK); - if (valid && table && !table->crypt_data) { - table->crypt_data = space->crypt_data; - } - if (!space) { } else if (!valid || space == fnamespace) { /* Found with the same file name, or got a flag mismatch. */ @@ -5151,23 +5113,16 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ bool is_read) /*!< in: I/O type */ { - ib::error() - << "Trying to access page number " << block_offset << " in" + ib::fatal() + << "Trying to " << (is_read ? "read" : "write") + << " page number " << block_offset << " in" " space " << space_id << ", space name " << space_name << "," " which is outside the tablespace bounds. Byte offset " - << byte_offset << ", len " << len << ", i/o type " << - (is_read ? "read" : "write") - << ". If you get this error at mysqld startup, please check" - " that your my.cnf matches the ibdata files that you have in" - " the MySQL server."; - - ib::error() << "Server exits" -#ifdef UNIV_DEBUG - << " at " << __FILE__ << "[" << __LINE__ << "]" -#endif - << "."; - - _exit(1); + << byte_offset << ", len " << len << + (space_id == 0 && !srv_was_started + ? "Please check that the configuration matches" + " the InnoDB system tablespace location (ibdata files)" + : ""); } /** Reads or writes data. This operation could be asynchronous (aio). @@ -5833,7 +5788,7 @@ fil_buf_block_init( } struct fil_iterator_t { - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ const char* filepath; /*!< File path name */ os_offset_t start; /*!< From where to start */ os_offset_t end; /*!< Where to stop */ @@ -5875,18 +5830,15 @@ fil_iterate( ut_ad(!srv_read_only_mode); - /* For old style compressed tables we do a lot of useless copying - for non-index pages. Unfortunately, it is required by - buf_zip_decompress() */ - - ulint read_type = IORequest::READ; - ulint write_type = IORequest::WRITE; + /* TODO: For compressed tables we do a lot of useless + copying for non-index pages. Unfortunately, it is + required by buf_zip_decompress() */ + const bool row_compressed + = callback.get_page_size().is_compressed(); for (offset = iter.start; offset < iter.end; offset += n_bytes) { byte* io_buffer = iter.io_buffer; - const bool row_compressed - = callback.get_page_size().is_compressed(); block->frame = io_buffer; @@ -5906,8 +5858,6 @@ fil_iterate( /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; - } else { - io_buffer = iter.io_buffer; } /* We have to read the exact number of bytes. Otherwise the @@ -5920,22 +5870,14 @@ fil_iterate( ut_ad(n_bytes > 0); ut_ad(!(n_bytes % iter.page_size)); - dberr_t err = DB_SUCCESS; - IORequest read_request(read_type); - - byte* readptr = io_buffer; - byte* writeptr = io_buffer; - bool encrypted = false; - + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); /* Use additional crypt io buffer if tablespace is encrypted */ - if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) { - - encrypted = true; - readptr = iter.crypt_io_buffer; - writeptr = iter.crypt_io_buffer; - } - - err = os_file_read( + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; + IORequest read_request(IORequest::READ); + dberr_t err = os_file_read( read_request, iter.file, readptr, offset, (ulint) n_bytes); @@ -5960,9 +5902,9 @@ fil_iterate( ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - bool page_compressed = - (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED - || page_type == FIL_PAGE_PAGE_COMPRESSED); + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; /* If tablespace is encrypted, we need to decrypt the page. Note that tablespaces are not in @@ -6064,8 +6006,7 @@ fil_iterate( dict_table_page_compression_level(iter.table), 512,/* FIXME: use proper block size */ encrypted, - &len, - NULL); + &len); if (len != size) { memset(res+len, 0, size-len); @@ -6107,7 +6048,7 @@ fil_iterate( block->frame += iter.page_size; } - IORequest write_request(write_type); + IORequest write_request(IORequest::WRITE); /* A page was updated in the set, write back to disk. Note: We don't have the compression algorithm, we write @@ -6144,7 +6085,7 @@ fil_tablespace_iterate( PageCallback& callback) { dberr_t err; - os_file_t file; + pfs_os_file_t file; char* filepath; bool success; diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index 5f7ce530776..83191c98b37 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -93,17 +93,16 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len) /*!< out: actual length of compressed page */ - byte* lzo_mem) /*!< in: temporal memory used by LZO */ { int err = Z_OK; int comp_level = int(level); ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - ulint write_size=0; + ulint write_size = 0; /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; - bool allocated=false; + bool allocated = false; /* page_compression does not apply to tables or tablespaces that use ROW_FORMAT=COMPRESSED */ @@ -115,13 +114,23 @@ fil_compress_page( if (!out_buf) { allocated = true; - out_buf = static_cast<byte *>(ut_malloc_nokey(UNIV_PAGE_SIZE)); -#ifdef HAVE_LZO + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + if (comp_method == PAGE_SNAPPY_ALGORITHM) { + size = snappy_max_compressed_length(size); + } +#endif +#if HAVE_LZO if (comp_method == PAGE_LZO_ALGORITHM) { - lzo_mem = static_cast<byte *>(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS)); - memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + size += LZO1X_1_15_MEM_COMPRESS; } #endif + + out_buf = static_cast<byte *>(ut_malloc_nokey(size)); } ut_ad(buf); @@ -173,7 +182,7 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size, lzo_mem); + buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE); if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { goto err_exit; @@ -229,6 +238,7 @@ fil_compress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; + write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); cstatus = snappy_compress( (const char *)buf, @@ -373,11 +383,6 @@ err_exit: exit_free: if (allocated) { ut_free(out_buf); -#ifdef HAVE_LZO - if (comp_method == PAGE_LZO_ALGORITHM) { - ut_free(lzo_mem); - } -#endif } return (buf); @@ -436,13 +441,14 @@ fil_decompress_page( /* Before actual decompress, make sure that page type is correct */ - if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) != BUF_NO_CHECKSUM_MAGIC) { - ib::error() << "Corruption: We try to uncompress corrupted page:" - << " CRC " - << mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) - << " page_type " - << mach_read_from_2(buf+FIL_PAGE_TYPE) - << " page len " << len << "."; + if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) + != BUF_NO_CHECKSUM_MAGIC + || (ptype != FIL_PAGE_PAGE_COMPRESSED + && ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { + ib::error() << "Corruption: We try to uncompress corrupted " + "page CRC " + << mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) + << " type " << ptype << " len " << len << "."; if (return_error) { goto error_return; @@ -508,7 +514,7 @@ fil_decompress_page( #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { - ulint olen=0; + ulint olen = 0; err = lzo1x_decompress((const unsigned char *)buf+header_len, actual_size,(unsigned char *)in_buf, &olen, NULL); @@ -579,7 +585,7 @@ fil_decompress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - ulint olen = 0; + ulint olen = UNIV_PAGE_SIZE; cstatus = snappy_uncompress( (const char *)(buf+header_len), @@ -595,6 +601,7 @@ fil_decompress_page( goto error_return; } } + break; } #endif /* HAVE_SNAPPY */ @@ -613,8 +620,7 @@ fil_decompress_page( memcpy(buf, in_buf, len); error_return: - // Need to free temporal buffer if no buffer was given - if (page_buf == NULL) { + if (page_buf != in_buf) { ut_free(in_buf); } diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index b8ad49a254f..346962567d6 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -425,13 +425,10 @@ Datafile::validate_to_dd(ulint space_id, ulint flags) /* else do not use this tablespace. */ m_is_valid = false; - ib::error() << "In file '" << m_filepath << "', tablespace id and" - " flags are " << m_space_id << " and " << ib::hex(m_flags) - << ", but in the InnoDB data dictionary they are " - << space_id << " and " << ib::hex(flags) - << ". Have you moved InnoDB .ibd files around without" - " using the commands DISCARD TABLESPACE and IMPORT TABLESPACE?" - " " << TROUBLESHOOT_DATADICT_MSG; + ib::error() << "Refusing to load '" << m_filepath << "' (id=" + << m_space_id << ", flags=" << ib::hex(m_flags) + << "); dictionary contains id=" + << space_id << ", flags=" << ib::hex(flags); return(DB_ERROR); } diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 57b6c8de825..bf86d83a8c7 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -690,17 +690,12 @@ fsp_header_init_fields( flags); } -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr) +@param[in,out] mtr mini-transaction */ +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -752,19 +747,9 @@ fsp_header_init( fsp_fill_free_list(!is_system_tablespace(space_id), space, header, mtr); - if (space_id == srv_sys_space.space_id()) { - if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, - 0, univ_page_size, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, NULL, mtr) == FIL_NULL) { - return(false); - } - } - if (space->crypt_data) { space->crypt_data->write_page0(space, page, mtr); } - - return(true); } /**********************************************************************//** @@ -889,14 +874,12 @@ fsp_try_extend_data_file_with_pages( @param[in,out] space tablespace @param[in,out] header tablespace header @param[in,out] mtr mini-transaction -@return whether the tablespace was extended */ -static UNIV_COLD MY_ATTRIBUTE((nonnull)) +@return number of pages added +@retval 0 if the tablespace was not extended */ +UNIV_COLD MY_ATTRIBUTE((nonnull)) +static ulint -fsp_try_extend_data_file( - fil_space_t* space, - fsp_header_t* header, - mtr_t* mtr, - ulint* n_pages_added) +fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr) { ulint size; /* current number of pages in the datafile */ ulint size_increase; /* number of pages to extend this file */ @@ -919,7 +902,7 @@ fsp_try_extend_data_file( << " innodb_data_file_path."; srv_sys_space.set_tablespace_full_status(true); } - return(false); + return(0); } else if (fsp_is_system_temporary(space->id) && !srv_tmp_space.can_auto_extend_last_file()) { @@ -933,7 +916,7 @@ fsp_try_extend_data_file( << " innodb_temp_data_file_path."; srv_tmp_space.set_tablespace_full_status(true); } - return(false); + return(0); } size = mach_read_from_4(header + FSP_SIZE); @@ -956,7 +939,7 @@ fsp_try_extend_data_file( /* Let us first extend the file to extent_size */ if (!fsp_try_extend_data_file_with_pages( space, extent_pages - 1, header, mtr)) { - return(false); + return(0); } size = extent_pages; @@ -966,16 +949,13 @@ fsp_try_extend_data_file( } if (size_increase == 0) { - - return(false); + return(0); } if (!fil_space_extend(space, size + size_increase)) { - return(false); + return(0); } - *n_pages_added = size_increase; - /* We ignore any fragments of a full megabyte when storing the size to the space header */ @@ -985,7 +965,7 @@ fsp_try_extend_data_file( mlog_write_ulint( header + FSP_SIZE, space->size_in_header, MLOG_4BYTES, mtr); - return(true); + return(size_increase); } /** Calculate the number of pages to extend a datafile. @@ -1077,8 +1057,7 @@ fsp_fill_free_list( } if (!skip_resize) { - ulint n_pages = 0; - fsp_try_extend_data_file(space, header, mtr, &n_pages); + fsp_try_extend_data_file(space, header, mtr); size = space->size_in_header; } } @@ -2096,7 +2075,6 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space, space_header, mtr); if (inode == NULL) { - goto funct_exit; } @@ -2800,8 +2778,6 @@ fsp_reserve_free_extents( ulint n_free_up; ulint reserve; size_t total_reserved = 0; - ulint rounds = 0; - ulint n_pages_added = 0; ut_ad(mtr); *n_reserved = n_ext; @@ -2882,23 +2858,8 @@ try_again: return(true); } try_to_extend: - n_pages_added = 0; - - if (fsp_try_extend_data_file(space, space_header, mtr, &n_pages_added)) { - - rounds++; - total_reserved += n_pages_added; - - if (rounds > 10) { - ib::info() << "Space id: " - << space << " trying to reserve: " - << n_ext << " extents actually reserved: " - << n_pages_added << " reserve: " - << reserve << " free: " << n_free - << " size: " << size - << " rounds: " << rounds - << " total_reserved: " << total_reserved << "."; - } + if (ulint n = fsp_try_extend_data_file(space, space_header, mtr)) { + total_reserved += n; goto try_again; } diff --git a/storage/innobase/fsp/fsp0space.cc b/storage/innobase/fsp/fsp0space.cc index 76269a749f9..7ca81898f70 100644 --- a/storage/innobase/fsp/fsp0space.cc +++ b/storage/innobase/fsp/fsp0space.cc @@ -132,8 +132,7 @@ Tablespace::open_or_create(bool is_temp) m_name, m_space_id, FSP_FLAGS_PAGE_SSIZE(), is_temp ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, - it->m_crypt_info, - false); + it->m_crypt_info); } ut_a(fil_validate()); diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc index 974140fe565..e4bb11c9a22 100644 --- a/storage/innobase/fsp/fsp0sysspace.cc +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -834,17 +834,6 @@ SysTablespace::check_file_spec( } } - /* We assume doublewirte blocks in the first data file. */ - if (err == DB_SUCCESS && *create_new_db - && begin->m_size < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3) { - ib::error() << "The " << name() << " data file " - << "'" << begin->name() << "' must be at least " - << TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3 * UNIV_PAGE_SIZE - / (1024 * 1024) << " MB"; - - err = DB_ERROR; - } - return(err); } @@ -941,8 +930,8 @@ SysTablespace::open_or_create( space = fil_space_create( name(), space_id(), flags(), is_temp - ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, m_crypt_info, - false); + ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, + m_crypt_info); } ut_a(fil_validate()); diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 79020fb4442..60cc3c91fef 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1200,7 +1200,6 @@ fts_tokenizer_word_get( /* If it is a stopword, do not index it */ if (!fts_check_token(text, cache->stopword_info.cached_stopword, - index_cache->index->is_ngram, index_cache->charset)) { return(NULL); @@ -1716,21 +1715,6 @@ fts_drop_tables( return(error); } -/** Extract only the required flags from table->flags2 for FTS Aux -tables. -@param[in] in_flags2 Table flags2 -@return extracted flags2 for FTS aux tables */ -static inline -ulint -fts_get_table_flags2_for_aux_tables( - ulint flags2) -{ - /* Extract the file_per_table flag & temporary file flag - from the main FTS table flags2 */ - return((flags2 & DICT_TF2_USE_FILE_PER_TABLE) | - (flags2 & DICT_TF2_TEMPORARY)); -} - /** Create dict_table_t object for FTS Aux tables. @param[in] aux_table_name FTS Aux table name @param[in] table table object of FTS Index @@ -1745,7 +1729,9 @@ fts_create_in_mem_aux_table( { dict_table_t* new_table = dict_mem_table_create( aux_table_name, table->space, n_cols, 0, table->flags, - fts_get_table_flags2_for_aux_tables(table->flags2)); + table->space == TRX_SYS_SPACE + ? 0 : table->space == SRV_TMP_SPACE_ID + ? DICT_TF2_TEMPORARY : DICT_TF2_USE_FILE_PER_TABLE); if (DICT_TF_HAS_DATA_DIR(table->flags)) { ut_ad(table->data_dir_path != NULL); @@ -3241,7 +3227,6 @@ fts_query_expansion_fetch_doc( } doc.charset = doc_charset; - doc.is_ngram = result_doc->is_ngram; if (dfield_is_ext(dfield)) { /* We ignore columns that are stored externally, this @@ -3347,7 +3332,6 @@ fts_fetch_doc_from_rec( doc->found = TRUE; doc->charset = get_doc->index_cache->charset; - doc->is_ngram = index->is_ngram; /* Null Field */ if (doc->text.f_len == UNIV_SQL_NULL || doc->text.f_len == 0) { @@ -4379,13 +4363,10 @@ fts_sync_table( return(err); } -/** Check fts token -1. for ngram token, check whether the token contains any words in stopwords -2. for non-ngram token, check if it's stopword or less than fts_min_token_size +/** Check if a fts token is a stopword or less than fts_min_token_size or greater than fts_max_token_size. @param[in] token token string @param[in] stopwords stopwords rb tree -@param[in] is_ngram is ngram parser @param[in] cs token charset @retval true if it is not stopword and length in range @retval false if it is stopword or lenght not in range */ @@ -4393,96 +4374,16 @@ bool fts_check_token( const fts_string_t* token, const ib_rbt_t* stopwords, - bool is_ngram, const CHARSET_INFO* cs) { ut_ad(cs != NULL || stopwords == NULL); - if (!is_ngram) { - ib_rbt_bound_t parent; - - if (token->f_n_char < fts_min_token_size - || token->f_n_char > fts_max_token_size - || (stopwords != NULL - && rbt_search(stopwords, &parent, token) == 0)) { - return(false); - } else { - return(true); - } - } - - /* Check token for ngram. */ - DBUG_EXECUTE_IF( - "fts_instrument_ignore_ngram_check", - return(true); - ); - - /* We ignore fts_min_token_size when ngram */ - ut_ad(token->f_n_char > 0 - && token->f_n_char <= fts_max_token_size); - - if (stopwords == NULL) { - return(true); - } - - /*Ngram checks whether the token contains any words in stopwords. - We can't simply use CONTAIN to search in stopwords, because it's - built on COMPARE. So we need to tokenize the token into words - from unigram to f_n_char, and check them separately. */ - for (ulint ngram_token_size = 1; ngram_token_size <= token->f_n_char; - ngram_token_size ++) { - const char* start; - const char* next; - const char* end; - ulint char_len; - ulint n_chars; + ib_rbt_bound_t parent; - start = reinterpret_cast<char*>(token->f_str); - next = start; - end = start + token->f_len; - n_chars = 0; - - while (next < end) { - char_len = my_charlen(cs, next, end); - - if (next + char_len > end || char_len == 0) { - break; - } else { - /* Skip SPACE */ - if (char_len == 1 && *next == ' ') { - start = next + 1; - next = start; - n_chars = 0; - - continue; - } - - next += char_len; - n_chars++; - } - - if (n_chars == ngram_token_size) { - fts_string_t ngram_token; - ngram_token.f_str = - reinterpret_cast<byte*>( - const_cast<char*>(start)); - ngram_token.f_len = next - start; - ngram_token.f_n_char = ngram_token_size; - - ib_rbt_bound_t parent; - if (rbt_search(stopwords, &parent, - &ngram_token) == 0) { - return(false); - } - - /* Move a char forward */ - start += my_charlen(cs, start, end); - n_chars = ngram_token_size - 1; - } - } - } - - return(true); + return(token->f_n_char >= fts_min_token_size + && token->f_n_char <= fts_max_token_size + && (stopwords == NULL + || rbt_search(stopwords, &parent, token) != 0)); } /** Add the token and its start position to the token's list of positions. @@ -4499,8 +4400,7 @@ fts_add_token( /* Ignore string whose character number is less than "fts_min_token_size" or more than "fts_max_token_size" */ - if (fts_check_token(&str, NULL, result_doc->is_ngram, - result_doc->charset)) { + if (fts_check_token(&str, NULL, result_doc->charset)) { mem_heap_t* heap; fts_string_t t_str; @@ -7487,7 +7387,6 @@ fts_init_recover_doc( } doc.charset = get_doc->index_cache->charset; - doc.is_ngram = get_doc->index_cache->index->is_ngram; if (dfield_is_ext(dfield)) { dict_table_t* table = cache->sync->table; diff --git a/storage/innobase/fts/fts0plugin.cc b/storage/innobase/fts/fts0plugin.cc index e78dcdacfb9..b7a05deeb34 100644 --- a/storage/innobase/fts/fts0plugin.cc +++ b/storage/innobase/fts/fts0plugin.cc @@ -130,6 +130,7 @@ fts_query_add_word_for_parser( if (cur_node->type != FTS_AST_PARSER_PHRASE_LIST) { break; } + /* fall through */ case FT_TOKEN_WORD: term_node = fts_ast_create_node_term_for_parser( diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index bc1d173cc29..594f337c978 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -2430,7 +2430,8 @@ fts_query_terms_in_document( /*****************************************************************//** Retrieve the document and match the phrase tokens. @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +MY_ATTRIBUTE((nonnull(1,2,3,6), warn_unused_result)) +static dberr_t fts_query_match_document( /*=====================*/ @@ -2692,7 +2693,6 @@ fts_query_phrase_split( if (fts_check_token( &result_str, cache->stopword_info.cached_stopword, - query->index->is_ngram, query->fts_index_table.charset)) { /* Add the word to the RB tree so that we can calculate it's frequencey within a document. */ @@ -4277,7 +4277,6 @@ fts_expand_query( result_doc.charset = index_cache->charset; result_doc.parser = index_cache->index->parser; - result_doc.is_ngram = index_cache->index->is_ngram; query->total_size += SIZEOF_RBT_CREATE; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index fcbd187fd06..4d1b40052dc 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. @@ -329,12 +329,12 @@ thd_destructor_proxy(void *) while (trx_sys_any_active_transactions()) { os_thread_sleep(1000); } - - /* Some background threads might generate undo pages that will - need to be purged, so they have to be shut down before purge - threads if slow shutdown is requested. */ - srv_shutdown_bg_undo_sources(); } + + /* Some background threads might generate undo pages that will + need to be purged, so they have to be shut down before purge + threads if slow shutdown is requested. */ + srv_shutdown_bg_undo_sources(); srv_purge_wakeup(); destroy_thd(thd); @@ -1372,14 +1372,11 @@ innobase_drop_database( handlerton* hton, char* path); -/*******************************************************************//** -Closes an InnoDB database. */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /* in: InnoDB handlerton */ - ha_panic_function type); +innobase_end(handlerton*, ha_panic_function); /*****************************************************************//** Creates an InnoDB transaction struct for the thd if it does not yet have one. @@ -1796,6 +1793,8 @@ void innobase_srv_conc_exit_innodb( row_prebuilt_t* prebuilt) { + ut_ad(!sync_check_iterate(sync_check())); + #ifdef WITH_WSREP if (wsrep_on(prebuilt->trx->mysql_thd) && wsrep_thd_is_BF(prebuilt->trx->mysql_thd, FALSE)) { @@ -1804,13 +1803,6 @@ innobase_srv_conc_exit_innodb( #endif /* WITH_WSREP */ trx_t* trx = prebuilt->trx; -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb @@ -1828,13 +1820,7 @@ innobase_srv_conc_force_exit_innodb( /*================================*/ trx_t* trx) /*!< in: transaction handle */ { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb) { @@ -1921,13 +1907,7 @@ const char* thd_innodb_tmpdir( THD* thd) { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - trx_t* trx = thd_to_trx(thd); - btrsea_sync_check check(trx->has_search_latch); - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); const char* tmp_dir = THDVAR(thd, tmpdir); @@ -1963,28 +1943,6 @@ thd_to_trx_id( #endif /* WITH_WSREP */ /********************************************************************//** -Call this function when mysqld passes control to the client. That is to -avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more -documentation, see handler.cc. -@return 0 */ -inline -int -innobase_release_temporary_latches( -/*===============================*/ - handlerton* hton, /*!< in: handlerton */ - THD* thd) /*!< in: MySQL thread */ -{ - DBUG_ASSERT(hton == innodb_hton_ptr); - - if (!srv_was_started) { - } else if (trx_t* trx __attribute__((unused))= thd_to_trx(thd)) { - trx_assert_no_search_latch(trx); - } - - return(0); -} - -/********************************************************************//** Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth time calls srv_active_wake_master_thread. This function should be used when a single database operation may introduce a small need for @@ -2403,25 +2361,16 @@ Thread unsafe, can only be called from the thread owning the THD. @return SQL statement string */ const char* innobase_get_stmt_unsafe( -/*=====================*/ THD* thd, size_t* length) { - LEX_STRING* stmt; - const char* query=NULL; - - stmt = thd ? thd_query_string(thd) : NULL; - // MySQL 5.7 - //stmt = thd_query_unsafe(thd); - - if (stmt && stmt->str) { + if (const LEX_STRING *stmt = thd_query_string(thd)) { *length = stmt->length; - query = stmt->str; - } else { - *length = 0; + return stmt->str; } - return(query); + *length = 0; + return NULL; } /** Determines the current SQL statement. @@ -2433,7 +2382,6 @@ into the provided buffer. @return Length of the SQL statement */ size_t innobase_get_stmt_safe( -/*===================*/ THD* thd, char* buf, size_t buflen) @@ -3321,7 +3269,6 @@ innobase_query_caching_of_table_permitted( return(false); } - trx_assert_no_search_latch(trx); innobase_srv_conc_force_exit_innodb(trx); if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) @@ -3629,8 +3576,6 @@ ha_innobase::init_table_handle_for_HANDLER(void) /* Initialize the m_prebuilt struct much like it would be inited in external_lock */ - trx_assert_no_search_latch(m_prebuilt->trx); - innobase_srv_conc_force_exit_innodb(m_prebuilt->trx); /* If the transaction is not started yet, start it */ @@ -3685,10 +3630,7 @@ innobase_space_shutdown() srv_tmp_space.shutdown(); #ifdef WITH_INNODB_DISALLOW_WRITES - if (srv_allow_writes_event) { - os_event_destroy(srv_allow_writes_event); - srv_allow_writes_event = NULL; - } + os_event_destroy(srv_allow_writes_event); #endif /* WITH_INNODB_DISALLOW_WRITES */ DBUG_VOID_RETURN; @@ -3818,9 +3760,6 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; - innobase_hton->release_temporary_latches = - innobase_release_temporary_latches; - #ifdef MYSQL_REPLACE_TRX_IN_THD innobase_hton->replace_native_transaction_in_thd = innodb_replace_trx_in_thd; @@ -4386,21 +4325,13 @@ error: DBUG_RETURN(1); } -/*******************************************************************//** -Closes an InnoDB database. -@return TRUE if error */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /*!< in/out: InnoDB handlerton */ - ha_panic_function type MY_ATTRIBUTE((unused))) - /*!< in: ha_panic() parameter */ +innobase_end(handlerton*, ha_panic_function) { - int err= 0; - DBUG_ENTER("innobase_end"); - DBUG_ASSERT(hton == innodb_hton_ptr); if (srv_was_started) { THD *thd= current_thd; @@ -4433,7 +4364,7 @@ innobase_end( mysql_mutex_destroy(&pending_checkpoint_mutex); } - DBUG_RETURN(err); + DBUG_RETURN(0); } /*****************************************************************//** @@ -4631,12 +4562,6 @@ innobase_commit_ordered( trx = check_trx_exists(thd); TrxInInnoDB trx_in_innodb(trx); - /* Since we will reserve the kernel mutex, we must not be holding the - search system latch, or we will disobey the latching order. But we - already released it in innobase_xa_prepare() (if not before), so just - have an assert here.*/ - trx_assert_no_search_latch(trx); - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { /* We cannot throw error here; instead we will catch this error again in innobase_commit() and report it from there. */ @@ -4848,12 +4773,6 @@ innobase_rollback_trx( DBUG_ENTER("innobase_rollback_trx"); DBUG_PRINT("trans", ("aborting transaction")); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ - - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); /* If we had reserved the auto-inc lock for some table (if @@ -6363,37 +6282,19 @@ initialize_auto_increment(dict_table_t* table, const Field* field) dict_table_autoinc_unlock(table); } -/*****************************************************************//** -Creates and opens a handle to a table which already exists in an InnoDB -database. -@return 1 if error, 0 if success */ - +/** Open an InnoDB table +@param[in] name table name +@return error code +@retval 0 on success */ int -ha_innobase::open( -/*==============*/ - const char* name, /*!< in: table name */ - int mode, /*!< in: not used */ - uint test_if_locked) /*!< in: not used */ +ha_innobase::open(const char* name, int, uint) { dict_table_t* ib_table; char norm_name[FN_REFLEN]; - THD* thd; dict_err_ignore_t ignore_err = DICT_ERR_IGNORE_NONE; DBUG_ENTER("ha_innobase::open"); - UT_NOT_USED(mode); - UT_NOT_USED(test_if_locked); - - thd = ha_thd(); - - /* Under some cases MySQL seems to call this function while - holding search latch(es). This breaks the latching order as - we acquire dict_sys->mutex below and leads to a deadlock. */ - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - normalize_table_name(norm_name, name); m_user_thd = NULL; @@ -6408,6 +6309,7 @@ ha_innobase::open( m_upd_buf_size = 0; char* is_part = is_partition(norm_name); + THD* thd = ha_thd(); /* Check whether FOREIGN_KEY_CHECKS is set to 0. If so, the table can be opened even if some FK indexes are missing. If not, the table @@ -6449,10 +6351,6 @@ ha_innobase::open( norm_name); } - ib::warn() << "Cannot open table " << norm_name << " from the" - " internal data dictionary of InnoDB though the .frm" - " file for the table exists. " << TROUBLESHOOTING_MSG; - free_share(m_share); set_my_errno(ENOENT); @@ -6461,8 +6359,6 @@ ha_innobase::open( innobase_copy_frm_flags_from_table_share(ib_table, table->s); - ib_table->thd = (void*)thd; - /* No point to init any statistics if tablespace is still encrypted. */ if (ib_table->is_readable()) { dict_stats_init(ib_table); @@ -6717,11 +6613,6 @@ ha_innobase::open( static_cast<st_mysql_ftparser *>( plugin_decl(parser)->info); - index->is_ngram = strncmp( - plugin_name(parser)->str, - FTS_NGRAM_PARSER_NAME, - plugin_name(parser)->length) == 0; - DBUG_EXECUTE_IF("fts_instrument_use_default_parser", index->parser = &fts_default_parser;); } @@ -6878,12 +6769,6 @@ ha_innobase::close() { DBUG_ENTER("ha_innobase::close"); - THD* thd = ha_thd(); - - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - row_prebuilt_free(m_prebuilt, FALSE); if (m_upd_buf != NULL) { @@ -8313,8 +8198,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -8984,8 +8869,8 @@ calc_row_difference( } } - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { /* The field has changed */ ufield = uvect->fields + n_changed; @@ -12636,7 +12521,8 @@ index_bad: break; } zip_allowed = false; - /* fall through to set row_type = DYNAMIC */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -12644,6 +12530,7 @@ index_bad: m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=DYNAMIC."); + /* fall through */ case ROW_TYPE_DYNAMIC: innodb_row_format = REC_FORMAT_DYNAMIC; break; @@ -12684,7 +12571,7 @@ index_bad: 0); if (m_form->s->table_type == TABLE_TYPE_SEQUENCE) { - m_flags |= 1U << DICT_TF_POS_NO_ROLLBACK; + m_flags |= DICT_TF_MASK_NO_ROLLBACK; } /* Set the flags2 when create table or alter tables */ @@ -12920,10 +12807,6 @@ create_table_info_t::initialize() parent_trx = check_trx_exists(m_thd); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_assert_no_search_latch(parent_trx); DBUG_RETURN(0); } @@ -13767,12 +13650,6 @@ innobase_drop_database( THD* thd = current_thd; - /* In case MySQL calls this in the middle of a SELECT - query, release possible adaptive hash latch to avoid - deadlocks of threads */ - - trx_assert_no_search_latch(check_trx_exists(thd)); - ulint len = 0; char* ptr = strend(path) - 2; @@ -14506,12 +14383,7 @@ ha_innobase::info_low( update_thd(ha_thd()); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - m_prebuilt->trx->op_info = (char*)"returning various info to MariaDB"; - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning various info to MariaDB"; ib_table = m_prebuilt->table; DBUG_ASSERT(ib_table->n_ref_count > 0); @@ -15358,12 +15230,7 @@ ha_innobase::update_table_comment( update_thd(ha_thd()); - m_prebuilt->trx->op_info = (char*)"returning table comment"; - - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning table comment"; #define SSTR( x ) reinterpret_cast< std::ostringstream & >( \ ( std::ostringstream() << std::dec << x ) ).str() @@ -15419,22 +15286,14 @@ ha_innobase::get_foreign_key_create_info(void) update_thd(ha_thd()); - m_prebuilt->trx->op_info = (char*)"getting info on foreign keys"; - - /* In case MySQL calls this in the middle of a SELECT query, - release possible adaptive hash latch to avoid - deadlocks of threads */ - - trx_assert_no_search_latch(m_prebuilt->trx); - - + m_prebuilt->trx->op_info = "getting info on foreign keys"; /* Output the data to a temporary string */ std::string str = dict_print_info_on_foreign_keys( TRUE, m_prebuilt->trx, m_prebuilt->table); - m_prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = ""; /* Allocate buffer for the string */ char* fk_str = (char*) my_malloc(str.length() + 1, MYF(0)); @@ -16437,8 +16296,6 @@ innodb_show_status( trx_t* trx = check_trx_exists(thd); - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); TrxInInnoDB trx_in_innodb(trx); @@ -17754,12 +17611,6 @@ innobase_xa_prepare( thd_get_xid(thd, (MYSQL_XID*) trx->xid); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ - - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); TrxInInnoDB trx_in_innodb(trx); @@ -20736,7 +20587,7 @@ static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size, static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Size of each log file in a log group.", - NULL, NULL, 48*1024*1024L, 4*1024*1024L, LLONG_MAX, 1024*1024L); + NULL, NULL, 48 << 20, 1 << 20, 512ULL << 30, 1 << 20); static MYSQL_SYSVAR_ULONG(log_files_in_group, srv_n_log_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 64147291338..f9ad2007a5f 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -619,8 +619,6 @@ extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id); extern const struct _ft_vft ft_vft_result; -#define FTS_NGRAM_PARSER_NAME "ngram" - /** Structure Returned by ha_innobase::ft_init_ext() */ typedef struct new_ft_info { @@ -656,14 +654,11 @@ and returns true. @return true if the index name matches the reserved name */ bool innobase_index_name_is_reserved( - THD* thd, /*!< in/out: MySQL connection */ - const KEY* key_info, /*!< in: Indexes to be - created */ - ulint num_of_keys) /*!< in: Number of indexes to - be created. */ - MY_ATTRIBUTE((warn_unused_result)); - -extern const char reserved_file_per_table_space_name[]; + THD* thd, /*!< in/out: MySQL connection */ + const KEY* key_info, /*!< in: Indexes to be created */ + ulint num_of_keys) /*!< in: Number of indexes to + be created. */ + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); #ifdef WITH_WSREP //extern "C" int wsrep_trx_is_aborting(void *thd_ptr); @@ -911,18 +906,6 @@ innodb_base_col_setup_for_stored( /** whether this is a computed virtual column */ #define innobase_is_v_fld(field) ((field)->vcol_info && !(field)->stored_in_db()) -/** Release temporary latches. -Call this function when mysqld passes control to the client. That is to -avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more -documentation, see handler.cc. -@param[in] hton Handlerton. -@param[in] thd MySQL thread. -@return 0 */ -int -innobase_release_temporary_latches( - handlerton* hton, - THD* thd); - /** Always normalize table name to lower case on Windows */ #ifdef _WIN32 #define normalize_table_name(norm_name, name) \ diff --git a/storage/innobase/handler/ha_innopart.cc b/storage/innobase/handler/ha_innopart.cc index 30c7cf79eef..f62b08015ca 100644 --- a/storage/innobase/handler/ha_innopart.cc +++ b/storage/innobase/handler/ha_innopart.cc @@ -930,7 +930,6 @@ ha_innopart::open( { dict_table_t* ib_table; char norm_name[FN_REFLEN]; - THD* thd; DBUG_ENTER("ha_innopart::open"); @@ -940,16 +939,11 @@ ha_innopart::open( ut_ad(table->part_info != NULL); m_part_info = table->part_info; } - thd = ha_thd(); /* Under some cases MySQL seems to call this function while holding search latch(es). This breaks the latching order as we acquire dict_sys->mutex below and leads to a deadlock. */ - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - normalize_table_name(norm_name, name); m_user_thd = NULL; @@ -1015,6 +1009,7 @@ share_error: MONITOR_INC(MONITOR_TABLE_OPEN); bool no_tablespace; + THD* thd = ha_thd(); /* TODO: Should we do this check for every partition during ::open()? */ /* TODO: refactor this in ha_innobase so it can increase code reuse. */ @@ -1361,15 +1356,8 @@ void ha_innopart::clear_ins_upd_nodes() int ha_innopart::close() { - THD* thd; - DBUG_ENTER("ha_innopart::close"); - thd = ha_thd(); - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - ut_ad(m_pcur_parts == NULL); ut_ad(m_clust_pcur_parts == NULL); close_partitioning(); @@ -3021,11 +3009,6 @@ ha_innopart::records_in_range( m_prebuilt->trx->op_info = (char*)"estimating records in index range"; - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - trx_assert_no_search_latch(m_prebuilt->trx); - active_index = keynr; key = table->key_info + active_index; @@ -3160,11 +3143,6 @@ ha_innopart::estimate_rows_upper_bound() m_prebuilt->trx->op_info = "calculating upper bound for table rows"; - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - trx_assert_no_search_latch(m_prebuilt->trx); - for (uint i = m_part_info->get_first_used_partition(); i < m_tot_parts; i = m_part_info->get_next_used_partition(i)) { @@ -3276,12 +3254,7 @@ ha_innopart::info_low( update_thd(ha_thd()); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - m_prebuilt->trx->op_info = (char*)"returning various info to MySQL"; - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning various info to MySQL"; ut_ad(m_part_share->get_table_part(0)->n_ref_count > 0); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 60e742f27f7..1eec22874d1 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -591,7 +591,6 @@ ha_innobase::check_if_supported_inplace_alter( } update_thd(); - trx_assert_no_search_latch(m_prebuilt->trx); /* Change on engine specific table options require rebuild of the table */ @@ -2203,7 +2202,6 @@ innobase_create_index_def( memset(index->fields, 0, n_fields * sizeof *index->fields); index->parser = NULL; - index->is_ngram = false; index->key_number = key_number; index->n_fields = n_fields; index->name = mem_heap_strdup(heap, key->name); @@ -2237,12 +2235,6 @@ innobase_create_index_def( static_cast<st_mysql_ftparser*>( plugin_decl(parser)->info); - index->is_ngram = strncmp( - plugin_name(parser)->str, - FTS_NGRAM_PARSER_NAME, - plugin_name(parser)->length) - == 0; - break; } } @@ -2830,10 +2822,10 @@ online_retry_drop_indexes_with_trx( @param drop_fk constraints being dropped @param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ -inline MY_ATTRIBUTE((warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result)) +inline bool innobase_dropping_foreign( -/*======================*/ const dict_foreign_t* foreign, dict_foreign_t** drop_fk, ulint n_drop_fk) @@ -2857,10 +2849,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result)) +static bool innobase_check_foreigns_low( -/*========================*/ const dict_table_t* user_table, dict_foreign_t** drop_fk, ulint n_drop_fk, @@ -2957,10 +2949,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result)) +static bool innobase_check_foreigns( -/*====================*/ Alter_inplace_info* ha_alter_info, const TABLE* altered_table, const TABLE* old_table, diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 0d5029973d3..d1c1dae4783 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 2007, 2016, Oracle and/or its affiliates. -Copyrigth (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -699,8 +699,7 @@ fill_innodb_trx_from_cache( #ifdef BTR_CUR_HASH_ADAPT /* trx_adaptive_hash_latched */ - OK(fields[IDX_TRX_ADAPTIVE_HASH_LATCHED]->store( - row->trx_has_search_latch, true)); + OK(fields[IDX_TRX_ADAPTIVE_HASH_LATCHED]->store(0, true)); #endif /* BTR_CUR_HASH_ADAPT */ /* trx_is_read_only*/ @@ -2971,14 +2970,16 @@ i_s_fts_deleted_generic_fill( fields = table->field; + int ret = 0; + for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) { doc_id_t doc_id; doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j); - OK(fields[I_S_FTS_DOC_ID]->store(doc_id, true)); + BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true)); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); } trx_free_for_background(trx); @@ -2989,7 +2990,7 @@ i_s_fts_deleted_generic_fill( rw_lock_s_unlock(dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3229,13 +3230,13 @@ i_s_fts_index_cache_fill_one_index( /*===============================*/ fts_index_cache_t* index_cache, /*!< in: FTS index cache */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { TABLE* table = (TABLE*) tables->table; Field** fields; CHARSET_INFO* index_charset; const ib_rbt_node_t* rbt_node; - fts_string_t conv_str; uint dummy_errors; char* word_str; @@ -3244,10 +3245,9 @@ i_s_fts_index_cache_fill_one_index( fields = table->field; index_charset = index_cache->charset; - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len)); - conv_str.f_n_char = 0; + conv_str->f_n_char = 0; + + int ret = 0; /* Go through each word in the index cache */ for (rbt_node = rbt_first(index_cache->words); @@ -3259,16 +3259,16 @@ i_s_fts_index_cache_fill_one_index( /* Convert word from index charset to system_charset_info */ if (index_charset->cset != system_charset_info->cset) { - conv_str.f_n_char = my_convert( - reinterpret_cast<char*>(conv_str.f_str), - static_cast<uint32>(conv_str.f_len), + conv_str->f_n_char = my_convert( + reinterpret_cast<char*>(conv_str->f_str), + static_cast<uint32>(conv_str->f_len), system_charset_info, reinterpret_cast<char*>(word->text.f_str), static_cast<uint32>(word->text.f_len), index_charset, &dummy_errors); - ut_ad(conv_str.f_n_char <= conv_str.f_len); - conv_str.f_str[conv_str.f_n_char] = 0; - word_str = reinterpret_cast<char*>(conv_str.f_str); + ut_ad(conv_str->f_n_char <= conv_str->f_len); + conv_str->f_str[conv_str->f_n_char] = 0; + word_str = reinterpret_cast<char*>(conv_str->f_str); } else { word_str = reinterpret_cast<char*>(word->text.f_str); } @@ -3326,9 +3326,7 @@ i_s_fts_index_cache_fill_one_index( } } - ut_free(conv_str.f_str); - - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED @@ -3372,18 +3370,27 @@ i_s_fts_index_cache_fill( ut_a(cache); + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len)); + for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) { fts_index_cache_t* index_cache; index_cache = static_cast<fts_index_cache_t*> ( ib_vector_get(cache->indexes, i)); - i_s_fts_index_cache_fill_one_index(index_cache, thd, tables); + BREAK_IF(ret = i_s_fts_index_cache_fill_one_index( + index_cache, thd, &conv_str, tables)); } + ut_free(conv_str.f_str); + dict_table_close(user_table, FALSE, FALSE); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3685,8 +3692,6 @@ i_s_fts_index_table_fill_one_fetch( } } - i_s_fts_index_table_free_one_fetch(words); - DBUG_RETURN(ret); } @@ -3700,12 +3705,12 @@ i_s_fts_index_table_fill_one_index( /*===============================*/ dict_index_t* index, /*!< in: FTS index */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { ib_vector_t* words; mem_heap_t* heap; CHARSET_INFO* index_charset; - fts_string_t conv_str; dberr_t error; int ret = 0; @@ -3718,10 +3723,6 @@ i_s_fts_index_table_fill_one_index( sizeof(fts_word_t), 256); index_charset = fts_index_get_charset(index); - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len)); - conv_str.f_n_char = 0; /* Iterate through each auxiliary table as described in fts_index_selector */ @@ -3759,17 +3760,17 @@ i_s_fts_index_table_fill_one_index( /* Fill into tables */ ret = i_s_fts_index_table_fill_one_fetch( - index_charset, thd, tables, words, &conv_str, has_more); + index_charset, thd, tables, words, conv_str, + has_more); + i_s_fts_index_table_free_one_fetch(words); if (ret != 0) { - i_s_fts_index_table_free_one_fetch(words); goto func_exit; } } while (has_more); } func_exit: - ut_free(conv_str.f_str); mem_heap_free(heap); DBUG_RETURN(ret); @@ -3811,10 +3812,17 @@ i_s_fts_index_table_fill( DBUG_RETURN(0); } + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast<byte*>(ut_malloc_nokey(conv_str.f_len)); + for (index = dict_table_get_first_index(user_table); index; index = dict_table_get_next_index(index)) { if (index->type & DICT_FTS) { - i_s_fts_index_table_fill_one_index(index, thd, tables); + BREAK_IF(ret = i_s_fts_index_table_fill_one_index( + index, thd, &conv_str, tables)); } } @@ -3822,7 +3830,9 @@ i_s_fts_index_table_fill( rw_lock_s_unlock(dict_operation_lock); - DBUG_RETURN(0); + ut_free(conv_str.f_str); + + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3988,6 +3998,8 @@ i_s_fts_config_fill( DBUG_ASSERT(!dict_index_is_online_ddl(index)); } + int ret = 0; + while (fts_config_key[i]) { fts_string_t value; char* key_name; @@ -4012,13 +4024,14 @@ i_s_fts_config_fill( ut_free(key_name); } - OK(field_store_string( - fields[FTS_CONFIG_KEY], fts_config_key[i])); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_KEY], fts_config_key[i])); - OK(field_store_string( - fields[FTS_CONFIG_VALUE], (const char*) value.f_str)); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_VALUE], + reinterpret_cast<const char*>(value.f_str))); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); i++; } @@ -4031,7 +4044,7 @@ i_s_fts_config_fill( rw_lock_s_unlock(dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4889,15 +4902,14 @@ i_s_innodb_buffer_page_fill( i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store( - page_info->flush_type)); + page_info->flush_type, true)); OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store( - page_info->fix_count)); + page_info->fix_count, true)); #ifdef BTR_CUR_HASH_ADAPT - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], - page_info->hashed ? "YES" : "NO")); + OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); #endif /* BTR_CUR_HASH_ADAPT */ OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store( @@ -4907,7 +4919,7 @@ i_s_innodb_buffer_page_fill( page_info->oldest_mod, true)); OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null(); @@ -4916,32 +4928,36 @@ i_s_innodb_buffer_page_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd); - OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store( - table_name, - uint(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUFFER_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUFFER_PAGE_TABLE_NAME] + ->store(table_name, + static_cast<uint>( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUFFER_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store( @@ -4991,32 +5007,29 @@ i_s_innodb_buffer_page_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; break; case BUF_IO_PIN: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_PIN")); + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD], (page_info->is_old) ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store( page_info->freed_page_clock, true)); - if (schema_table_store_record(thd, table)) { - DBUG_RETURN(1); - } + OK(schema_table_store_record(thd, table)); } DBUG_RETURN(0); @@ -5572,17 +5585,10 @@ i_s_innodb_buf_page_lru_fill( ulint num_page) /*!< in: number of page info cached */ { - TABLE* table; - Field** fields; - mem_heap_t* heap; - DBUG_ENTER("i_s_innodb_buf_page_lru_fill"); - table = tables->table; - - fields = table->field; - - heap = mem_heap_create(1000); + TABLE* table = tables->table; + Field** fields = table->field; /* Iterate through the cached array and fill the I_S table rows */ for (ulint i = 0; i < num_page; i++) { @@ -5619,9 +5625,8 @@ i_s_innodb_buf_page_lru_fill( page_info->fix_count, true)); #ifdef BTR_CUR_HASH_ADAPT - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], - page_info->hashed ? "YES" : "NO")); + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); #endif /* BTR_CUR_HASH_ADAPT */ OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store( @@ -5640,32 +5645,36 @@ i_s_innodb_buf_page_lru_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd); - OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store( - table_name, - uint(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUF_LRU_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME] + ->store(table_name, + static_cast<uint>( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUF_LRU_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store( @@ -5675,8 +5684,8 @@ i_s_innodb_buf_page_lru_fill( page_info->data_size, true)); OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize ? - 512 << page_info->zip_ssize : 0, true)); + page_info->zip_ssize + ? 512 << page_info->zip_ssize : 0, true)); state = static_cast<enum buf_page_state>(page_info->page_state); @@ -5705,35 +5714,31 @@ i_s_innodb_buf_page_lru_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; + break; + case BUF_IO_PIN: + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD], - (page_info->is_old) ? "YES" : "NO")); + page_info->is_old ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store( page_info->freed_page_clock, true)); - if (schema_table_store_record(thd, table)) { - mem_heap_free(heap); - DBUG_RETURN(1); - } - - mem_heap_empty(heap); + OK(schema_table_store_record(thd, table)); } - mem_heap_free(heap); - DBUG_RETURN(0); } @@ -6069,10 +6074,10 @@ i_s_dict_fill_sys_tables( OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format)); - OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(static_cast<double>( + OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store( page_size.is_compressed() ? page_size.physical() - : 0))); + : 0, true)); OK(field_store_string(fields[SYS_TABLES_SPACE_TYPE], space_type)); @@ -6372,7 +6377,7 @@ i_s_dict_fill_sys_tablestats( OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, true)); - OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(static_cast<double>(ref_count))); + OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(ref_count, true)); OK(schema_table_store_record(thd, table_to_fill)); @@ -7308,11 +7313,11 @@ i_s_dict_fill_sys_fields( fields = table_to_fill->field; - OK(fields[SYS_FIELD_INDEX_ID]->store((longlong) index_id, TRUE)); + OK(fields[SYS_FIELD_INDEX_ID]->store(index_id, true)); OK(field_store_string(fields[SYS_FIELD_NAME], field->name)); - OK(fields[SYS_FIELD_POS]->store(static_cast<double>(pos))); + OK(fields[SYS_FIELD_POS]->store(pos, true)); OK(schema_table_store_record(thd, table_to_fill)); diff --git a/storage/innobase/handler/i_s.h b/storage/innobase/handler/i_s.h index 8d34fbf8fbb..e07fe49f7fa 100644 --- a/storage/innobase/handler/i_s.h +++ b/storage/innobase/handler/i_s.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyrigth (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -73,6 +73,8 @@ extern struct st_maria_plugin i_s_innodb_sys_semaphore_waits; DBUG_RETURN(1); \ } +#define BREAK_IF(expr) if ((expr)) break + #define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \ do { \ if (!srv_was_started) { \ diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 014d9f1a1a1..b4cbe7d4480 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -3601,7 +3601,7 @@ fail_exit: if (mode == BTR_MODIFY_PREV) { err = btr_cur_optimistic_insert( - BTR_NO_LOCKING_FLAG, + BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG, cursor, &offsets, &offsets_heap, ibuf_entry, &ins_rec, &dummy_big_rec, 0, thr, &mtr); diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index e1f5286e122..e62a5e90ce2 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -249,15 +249,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -284,15 +286,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** @@ -413,12 +417,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ upd_t* update, /*!< in/out: update vector; this is allowed to also contain trx id and roll ptr fields. Non-updated columns that are moved offpage will @@ -439,7 +443,6 @@ undo log record created. dberr_t btr_cur_del_mark_set_clust_rec( /*===========================*/ - ulint flags, /*!< in: undo logging and locking flags */ buf_block_t* block, /*!< in/out: buffer block of the record */ rec_t* rec, /*!< in/out: record */ dict_index_t* index, /*!< in: clustered index of the record */ diff --git a/storage/innobase/include/btr0defragment.h b/storage/innobase/include/btr0defragment.h index 21ba6d9f426..9c78ec412a2 100644 --- a/storage/innobase/include/btr0defragment.h +++ b/storage/innobase/include/btr0defragment.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. +Copyright (C) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -85,13 +85,13 @@ UNIV_INTERN void btr_defragment_save_defrag_stats_if_needed( dict_index_t* index); /*!< in: index */ -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg); /*!< in: a dummy parameter required by - os_thread_create */ +DECLARE_THREAD(btr_defragment_thread)(void*); + +/** Whether btr_defragment_thread is active */ +extern bool btr_defragment_thread_active; + #endif diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 928521e789e..9d5f373f5de 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1519,20 +1519,13 @@ directory (buf) to see it. Do not use from outside! */ typedef struct { bool reserved; /*!< true if this slot is reserved */ -#ifdef HAVE_LZO - byte* lzo_mem; /*!< Temporal memory used by LZO */ -#endif byte* crypt_buf; /*!< for encryption the data needs to be copied to a separate buffer before it's encrypted&written. this as a page can be read while it's being flushed */ - byte* crypt_buf_free; /*!< for encryption, allocated buffer - that is then alligned */ byte* comp_buf; /*!< for compression we need temporal buffer because page can be read while it's being flushed */ - byte* comp_buf_free; /*!< for compression, allocated - buffer that is then alligned */ byte* out_buf; /*!< resulting buffer after encryption/compression. This is a pointer and not allocated. */ diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index f22dcc48a01..38c52d5e608 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -1286,9 +1286,8 @@ buf_page_release_zip( rw_lock_s_unlock(&block->debug_latch); } } - /* Fall through */ #endif /* UNIV_DEBUG */ - + /* Fall through */ case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_DIRTY: buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage)); diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index e1ecb6baf56..598609e2be4 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -38,24 +38,27 @@ extern buf_dblwr_t* buf_dblwr; /** Set to TRUE when the doublewrite buffer is being created */ extern ibool buf_dblwr_being_created; -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. -@return true if successful, false if not. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ MY_ATTRIBUTE((warn_unused_result)) bool buf_dblwr_create(); -/****************************************************************//** -At a database startup initializes the doublewrite buffer memory structure if +/** +At database startup initializes the doublewrite buffer memory structure if we already have a doublewrite buffer created in the data files. If we are upgrading to an InnoDB version which supports multiple tablespaces, then this function performs the necessary update operations. If we are in a crash recovery, this function loads the pages from double write buffer into memory. +@param[in] file File handle +@param[in] path Path name of file @return DB_SUCCESS or error code */ dberr_t buf_dblwr_init_or_load_pages( - os_file_t file, + pfs_os_file_t file, const char* path); /** Process and remove the double write buffer pages for all tablespaces. */ diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index 9ec85dfaf50..b5a467455b9 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -527,7 +527,8 @@ dtype_get_fixed_size_low( return(len); } } - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index 41f556788a9..8f56c15f95b 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -638,83 +638,69 @@ dict_table_has_fts_index( return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS)); } -/** Validate the table flags. -@param[in] flags Table flags -@return true if valid. */ -UNIV_INLINE +/** Validate the flags for tables that are not ROW_FORMAT=REDUNDANT. +@param[in] flags table flags +@return whether the flags are valid */ +inline bool -dict_tf_is_valid( - ulint flags) +dict_tf_is_valid_not_redundant(ulint flags) { - bool compact = DICT_TF_GET_COMPACT(flags); - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); - bool data_dir = DICT_TF_HAS_DATA_DIR(flags); - ulint unused = DICT_TF_GET_UNUSED(flags); - bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(flags); - ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags); - bool flags_corrupt = false; - - /* Make sure there are no bits that we do not know about. */ - if (unused != 0) { - flags_corrupt = true; - } + const bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); - if (atomic_blobs) { - /* ROW_FORMAT=COMPRESSED and ROW_FORMAT=DYNAMIC both use - atomic_blobs, which build on the page structure introduced - for the COMPACT row format by allowing keys in secondary - indexes to be made from data stored off-page in the - clustered index. */ - - if (!compact) { - flags_corrupt = true; - } + ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - } else if (zip_ssize) { - /* ROW_FORMAT=COMPRESSED implies atomic blobs. */ - flags_corrupt = true; + if (!zip_ssize) { + /* Not ROW_FORMAT=COMPRESSED */ + } else if (!atomic_blobs) { + /* ROW_FORMAT=COMPRESSED implies ROW_FORMAT=DYNAMIC + for the uncompressed page format */ + return(false); + } else if (zip_ssize > PAGE_ZIP_SSIZE_MAX + || zip_ssize > UNIV_PAGE_SIZE_SHIFT + || UNIV_PAGE_SIZE_SHIFT > UNIV_ZIP_SIZE_SHIFT_MAX) { + /* KEY_BLOCK_SIZE is out of bounds, or + ROW_FORMAT=COMPRESSED is not supported with this + innodb_page_size (only up to 16KiB) */ + return(false); } - if (zip_ssize) { - - /* COMPRESSED row format must have compact and atomic_blobs - bits set and validate the number is within allowed range. */ - - if (!compact - || !atomic_blobs - || zip_ssize > PAGE_ZIP_SSIZE_MAX) { - flags_corrupt = true; - } + switch (DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags)) { + case 0: + /* PAGE_COMPRESSION_LEVEL=0 should imply PAGE_COMPRESSED=NO */ + return(!DICT_TF_GET_PAGE_COMPRESSION(flags)); + case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: + /* PAGE_COMPRESSION_LEVEL requires + ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC + (not ROW_FORMAT=COMPRESSED or ROW_FORMAT=REDUNDANT) + and PAGE_COMPRESSED=YES */ + return(!zip_ssize && DICT_TF_GET_PAGE_COMPRESSION(flags)); + default: + /* Invalid PAGE_COMPRESSION_LEVEL value */ + return(false); } +} - if (page_compression || page_compression_level) { - /* Page compression format must have compact and - atomic_blobs and page_compression_level requires - page_compression */ - if (!compact - || !page_compression - || !atomic_blobs) { - flags_corrupt = true; - } +/** Validate the table flags. +@param[in] flags Table flags +@return true if valid. */ +UNIV_INLINE +bool +dict_tf_is_valid( + ulint flags) +{ + ut_ad(flags < 1U << DICT_TF_BITS); + /* The DATA_DIRECTORY flag can be assigned fully independently + of all other persistent table flags. */ + flags &= ~DICT_TF_MASK_DATA_DIR; + if (!(flags & 1)) { + /* Only ROW_FORMAT=REDUNDANT has 0 in the least significant + bit. For ROW_FORMAT=REDUNDANT, only the DATA_DIR flag + (which we cleared above) can be set. If any other flags + are set, the flags are invalid. */ + return(flags == 0); } - - if (flags_corrupt) { - ib::error() - << "InnoDB: Error: table unused flags are:" << flags - << " in the data dictionary and are corrupted:" - << " compact:" << compact - << " atomic_blobs:" << atomic_blobs - << " unused:" << unused - << " data_dir:" << data_dir - << " zip_ssize:" << zip_ssize - << " page_compression:" << page_compression - << " page_compression_level:" << page_compression_level; - return (false); - } else { - return(true); - } + return(dict_tf_is_valid_not_redundant(flags)); } /** Validate both table flags and table flags2 and make sure they @@ -740,115 +726,9 @@ dict_tf2_is_valid( } /********************************************************************//** -Validate a SYS_TABLES TYPE field and return it. -@return Same as input after validating it as a SYS_TABLES TYPE field. -If there is an error, return ULINT_UNDEFINED. */ -UNIV_INLINE -ulint -dict_sys_tables_type_validate( -/*==========================*/ - ulint type, /*!< in: SYS_TABLES.TYPE */ - ulint n_cols) /*!< in: SYS_TABLES.N_COLS */ -{ - ulint low_order_bit = DICT_TF_GET_COMPACT(type); - ulint redundant = !(n_cols & DICT_N_COLS_COMPACT); - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(type); - ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(type); - ulint unused = DICT_TF_GET_UNUSED(type); - bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(type); - ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type); - - /* The low order bit of SYS_TABLES.TYPE is always set to 1. - If !atomic_blobs, this field is the same - as dict_table_t::flags. Zero is not allowed here. */ - if (!low_order_bit) { - return(ULINT_UNDEFINED); - } - - if (redundant) { - if (zip_ssize || atomic_blobs) { - ib::error() - << "SYS_TABLES::TYPE=Redundant, zip_ssize:" << zip_ssize - << " atomic_blobs:" << atomic_blobs; - return(ULINT_UNDEFINED); - } - } - - /* Make sure there are no bits that we do not know about. */ - if (unused) { - ib::error() - << "SYS_TABLES::TYPE=" << type << " unused:" << unused; - return(ULINT_UNDEFINED); - } - - if (atomic_blobs) { - /* ROW_FORMAT=COMPRESSED and ROW_FORMAT=DYNAMIC build on - the page structure introduced for the COMPACT row format - by allowing keys in secondary indexes to be made from - data stored off-page in the clustered index. - - The DICT_N_COLS_COMPACT flag should be in N_COLS, - but we already know that. */ - - } else if (zip_ssize) { - /* ROW_FORMAT=COMPRESSED implies atomic blobs. */ - ib::error() - << "SYS_TABLES::TYPE=" << type - << ", zip_ssize:" << zip_ssize; - - return(ULINT_UNDEFINED); - } - - if (zip_ssize) { - /* COMPRESSED row format must have low_order_bit and - atomic_blobs bits set and the DICT_N_COLS_COMPACT flag - should be in N_COLS, but we already know about the - low_order_bit and DICT_N_COLS_COMPACT flags. */ - if (!atomic_blobs) { - ib::error() << "SYS_TABLES::TYPE=" << type - << ", zip_ssize:" << zip_ssize - << ", atomic_blobs:" << atomic_blobs; - return(ULINT_UNDEFINED); - } - - /* Validate that the number is within allowed range. */ - if (zip_ssize > PAGE_ZIP_SSIZE_MAX) { - ib::error() << "SYS_TABLES::TYPE=" << type - << ", zip_ssize:" << zip_ssize - << " > " << PAGE_ZIP_SSIZE_MAX; - return(ULINT_UNDEFINED); - } - } - - /* There is nothing to validate for the data_dir field. - CREATE TABLE ... DATA DIRECTORY is supported for any row - format, so the DATA_DIR flag is compatible with any other - table flags. However, it is not used with TEMPORARY tables. */ - - if (page_compression || page_compression_level) { - /* page compressed row format must have low_order_bit and - atomic_blobs bits set and the DICT_N_COLS_COMPACT flag - should be in N_COLS, but we already know about the - low_order_bit and DICT_N_COLS_COMPACT flags. */ - - if (!atomic_blobs || !page_compression) { - ib::error() << "SYS_TABLES::TYPE=" << type - << " page_compression:" << page_compression - << " page_compression_level:" << page_compression_level - << " atomic_blobs:" << atomic_blobs; - - return(ULINT_UNDEFINED); - } - } - - /* Return the validated SYS_TABLES.TYPE. */ - return(type); -} - -/********************************************************************//** -Determine the page format from dict_table_t::flags +Determine the file format from dict_table_t::flags The low order bit will be zero for REDUNDANT and 1 for COMPACT. For any -other row_format, flags is nonzero and DICT_TF_COMPACT will also be set. +other row_format, file_format is > 0 and DICT_TF_COMPACT will also be set. @return file format version */ UNIV_INLINE rec_format_t @@ -948,7 +828,6 @@ dict_tf_to_fsp_flags(ulint table_flags) ulint fsp_flags; ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL( table_flags); - ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(table_flags); ut_ad((DICT_TF_GET_PAGE_COMPRESSION(table_flags) == 0) == (page_compression_level == 0)); @@ -975,48 +854,12 @@ dict_tf_to_fsp_flags(ulint table_flags) fsp_flags |= 1U << FSP_FLAGS_MEM_DATA_DIR; } - fsp_flags |= atomic_writes << FSP_FLAGS_MEM_ATOMIC_WRITES; fsp_flags |= page_compression_level << FSP_FLAGS_MEM_COMPRESSION_LEVEL; return(fsp_flags); } /********************************************************************//** -Convert a 32 bit integer from SYS_TABLES.TYPE to dict_table_t::flags -The following chart shows the translation of the low order bit. -Other bits are the same. -========================= Low order bit ========================== - | REDUNDANT | COMPACT | COMPRESSED and DYNAMIC -SYS_TABLES.TYPE | 1 | 1 | 1 -dict_table_t::flags | 0 | 1 | 1 -================================================================== -@return ulint containing SYS_TABLES.TYPE */ -UNIV_INLINE -ulint -dict_sys_tables_type_to_tf( -/*=======================*/ - ulint type, /*!< in: SYS_TABLES.TYPE field */ - ulint n_cols) /*!< in: SYS_TABLES.N_COLS field */ -{ - ulint flags; - ulint redundant = !(n_cols & DICT_N_COLS_COMPACT); - - /* Adjust bit zero. */ - flags = redundant ? 0 : 1; - - /* ZIP_SSIZE, ATOMIC_BLOBS, DATA_DIR, PAGE_COMPRESSION, - PAGE_COMPRESSION_LEVEL are the same. */ - flags |= type & (DICT_TF_MASK_ZIP_SSIZE - | DICT_TF_MASK_ATOMIC_BLOBS - | DICT_TF_MASK_DATA_DIR - | DICT_TF_MASK_PAGE_COMPRESSION - | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL); - - ut_ad(!DICT_TF_GET_ZIP_SSIZE(flags) || DICT_TF_HAS_ATOMIC_BLOBS(flags)); - return(flags); -} - -/********************************************************************//** Convert a 32 bit integer table flags to the 32bit integer that is written to a SYS_TABLES.TYPE field. The following chart shows the translation of the low order bit. Other bits are the same. @@ -1045,7 +888,8 @@ dict_tf_to_sys_tables_type( | DICT_TF_MASK_ATOMIC_BLOBS | DICT_TF_MASK_DATA_DIR | DICT_TF_MASK_PAGE_COMPRESSION - | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL); + | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL + | DICT_TF_MASK_NO_ROLLBACK); return(type); } diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 7135a5a3bb4..04c4af36aa9 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -138,10 +138,6 @@ This flag prevents older engines from attempting to open the table and allows InnoDB to update_create_info() accordingly. */ #define DICT_TF_WIDTH_DATA_DIR 1 -/** Width of the SHARED tablespace flag (Oracle MYSQL 5.7). -Not supported by MariaDB. */ -#define DICT_TF_WIDTH_SHARED_SPACE 1 - /** Width of the page compression flag */ @@ -149,36 +145,20 @@ Width of the page compression flag #define DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL 4 /** -Width of atomic writes flag -DEFAULT=0, ON = 1, OFF = 2 -*/ -#define DICT_TF_WIDTH_ATOMIC_WRITES 2 - -/** -Width of the page encryption flag +The NO_ROLLBACK flag (3=yes; the values 1,2 used stand for +ATOMIC_WRITES=ON and ATOMIC_WRITES=OFF between MariaDB 10.1.0 and 10.2.3) */ -#define DICT_TF_WIDTH_PAGE_ENCRYPTION 1 -#define DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY 8 - -/** Width of the NO_ROLLBACK flag */ -#define DICT_TF_WIDTH_NO_ROLLBACK 1 +#define DICT_TF_WIDTH_NO_ROLLBACK 2 /** Width of all the currently known table flags */ #define DICT_TF_BITS (DICT_TF_WIDTH_COMPACT \ + DICT_TF_WIDTH_ZIP_SSIZE \ + DICT_TF_WIDTH_ATOMIC_BLOBS \ + DICT_TF_WIDTH_DATA_DIR \ - + DICT_TF_WIDTH_SHARED_SPACE \ + DICT_TF_WIDTH_PAGE_COMPRESSION \ + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \ - + DICT_TF_WIDTH_ATOMIC_WRITES \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY \ + DICT_TF_WIDTH_NO_ROLLBACK) -/** A mask of all the known/used bits in table flags */ -#define DICT_TF_BIT_MASK (~(~0U << DICT_TF_BITS)) - /** Zero relative shift position of the COMPACT field */ #define DICT_TF_POS_COMPACT 0 /** Zero relative shift position of the ZIP_SSIZE field */ @@ -190,29 +170,18 @@ Width of the page encryption flag /** Zero relative shift position of the DATA_DIR field */ #define DICT_TF_POS_DATA_DIR (DICT_TF_POS_ATOMIC_BLOBS \ + DICT_TF_WIDTH_ATOMIC_BLOBS) -/** Zero relative shift position of the SHARED TABLESPACE field */ -#define DICT_TF_POS_SHARED_SPACE (DICT_TF_POS_DATA_DIR \ - + DICT_TF_WIDTH_DATA_DIR) /** Zero relative shift position of the PAGE_COMPRESSION field */ -#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_SHARED_SPACE \ - + DICT_TF_WIDTH_SHARED_SPACE) +#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_DATA_DIR \ + + DICT_TF_WIDTH_DATA_DIR) /** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */ #define DICT_TF_POS_PAGE_COMPRESSION_LEVEL (DICT_TF_POS_PAGE_COMPRESSION \ + DICT_TF_WIDTH_PAGE_COMPRESSION) -/** Zero relative shift position of the ATOMIC_WRITES field */ -#define DICT_TF_POS_ATOMIC_WRITES (DICT_TF_POS_PAGE_COMPRESSION_LEVEL \ - + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL) -/** Zero relative shift position of the PAGE_ENCRYPTION field */ -#define DICT_TF_POS_PAGE_ENCRYPTION (DICT_TF_POS_ATOMIC_WRITES \ - + DICT_TF_WIDTH_ATOMIC_WRITES) -/** Zero relative shift position of the PAGE_ENCRYPTION_KEY field */ -#define DICT_TF_POS_PAGE_ENCRYPTION_KEY (DICT_TF_POS_PAGE_ENCRYPTION \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION) /** Zero relative shift position of the NO_ROLLBACK field */ -#define DICT_TF_POS_NO_ROLLBACK (DICT_TF_POS_PAGE_ENCRYPTION_KEY \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) -#define DICT_TF_POS_UNUSED (DICT_TF_POS_NO_ROLLBACK \ +#define DICT_TF_POS_NO_ROLLBACK (DICT_TF_POS_PAGE_COMPRESSION_LEVEL \ + + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL) +#define DICT_TF_POS_UNUSED (DICT_TF_POS_NO_ROLLBACK \ + DICT_TF_WIDTH_NO_ROLLBACK) + /** Bit mask of the COMPACT field */ #define DICT_TF_MASK_COMPACT \ ((~(~0U << DICT_TF_WIDTH_COMPACT)) \ @@ -237,18 +206,10 @@ Width of the page encryption flag #define DICT_TF_MASK_PAGE_COMPRESSION_LEVEL \ ((~(~0U << DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL)) \ << DICT_TF_POS_PAGE_COMPRESSION_LEVEL) -/** Bit mask of the ATOMIC_WRITES field */ -#define DICT_TF_MASK_ATOMIC_WRITES \ - ((~(~0U << DICT_TF_WIDTH_ATOMIC_WRITES)) \ - << DICT_TF_POS_ATOMIC_WRITES) -/** Bit mask of the PAGE_ENCRYPTION field */ -#define DICT_TF_MASK_PAGE_ENCRYPTION \ - ((~(~0U << DICT_TF_WIDTH_PAGE_ENCRYPTION)) \ - << DICT_TF_POS_PAGE_ENCRYPTION) -/** Bit mask of the PAGE_ENCRYPTION_KEY field */ -#define DICT_TF_MASK_PAGE_ENCRYPTION_KEY \ - ((~(~0U << DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY)) \ - << DICT_TF_POS_PAGE_ENCRYPTION_KEY) +/** Bit mask of the NO_ROLLBACK field */ +#define DICT_TF_MASK_NO_ROLLBACK \ + ((~(~0U << DICT_TF_WIDTH_NO_ROLLBACK)) \ + << DICT_TF_POS_NO_ROLLBACK) /** Return the value of the COMPACT field */ #define DICT_TF_GET_COMPACT(flags) \ @@ -274,22 +235,7 @@ Width of the page encryption flag #define DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags) \ ((flags & DICT_TF_MASK_PAGE_COMPRESSION_LEVEL) \ >> DICT_TF_POS_PAGE_COMPRESSION_LEVEL) -/** Return the value of the ATOMIC_WRITES field */ -#define DICT_TF_GET_ATOMIC_WRITES(flags) \ - ((flags & DICT_TF_MASK_ATOMIC_WRITES) \ - >> DICT_TF_POS_ATOMIC_WRITES) -/** Return the contents of the PAGE_ENCRYPTION field */ -#define DICT_TF_GET_PAGE_ENCRYPTION(flags) \ - ((flags & DICT_TF_MASK_PAGE_ENCRYPTION) \ - >> DICT_TF_POS_PAGE_ENCRYPTION) -/** Return the contents of the PAGE_ENCRYPTION KEY field */ -#define DICT_TF_GET_PAGE_ENCRYPTION_KEY(flags) \ - ((flags & DICT_TF_MASK_PAGE_ENCRYPTION_KEY) \ - >> DICT_TF_POS_PAGE_ENCRYPTION_KEY) - -/** Return the contents of the UNUSED bits */ -#define DICT_TF_GET_UNUSED(flags) \ - (flags >> DICT_TF_POS_UNUSED) + /* @} */ /** @brief Table Flags set number 2. @@ -301,9 +247,8 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags for unknown bits in order to protect backward incompatibility. */ /* @{ */ /** Total number of bits in table->flags2. */ -#define DICT_TF2_BITS 9 -#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS | \ - 1U << DICT_TF_POS_SHARED_SPACE) +#define DICT_TF2_BITS 7 +#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS) #define DICT_TF2_BIT_MASK ~DICT_TF2_UNUSED_BIT_MASK /** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */ @@ -926,8 +871,6 @@ struct dict_index_t{ dict_field_t* fields; /*!< array of field descriptions */ st_mysql_ftparser* parser; /*!< fulltext parser plugin */ - bool is_ngram; - /*!< true if it's ngram parser */ bool has_new_v_col; /*!< whether it has a newly added virtual column in ALTER */ @@ -1364,18 +1307,13 @@ struct dict_table_t { /** Acquire the table handle. */ inline void acquire(); - void* thd; /*!< thd */ - bool page_0_read; /*!< true if page 0 has - been already read */ - fil_space_crypt_t *crypt_data; /*!< crypt data if present */ - /** Release the table handle. */ inline void release(); /** @return whether the table supports transactions */ bool no_rollback() const { - return flags & (1U << DICT_TF_POS_NO_ROLLBACK); + return !(~flags & DICT_TF_MASK_NO_ROLLBACK); } /** @return whether this table is readable @retval true normally @@ -1428,8 +1366,6 @@ struct dict_table_t { 5 whether the table is being created its own tablespace, 6 whether the table has been DISCARDed, 7 whether the aux FTS tables names are in hex. - 8 whether the table is instinc table. - 9 whether the table has encryption setting. Use DICT_TF2_FLAG_IS_SET() to parse this flag. */ unsigned flags2:DICT_TF2_BITS; diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h index 752c197f8c3..8846aeda7fd 100644 --- a/storage/innobase/include/dict0stats.h +++ b/storage/innobase/include/dict0stats.h @@ -110,6 +110,13 @@ dict_stats_deinit( dict_table_t* table) /*!< in/out: table */ MY_ATTRIBUTE((nonnull)); +/** Update the table modification counter and if necessary, +schedule new estimates for table and index statistics to be calculated. +@param[in,out] table persistent or temporary table */ +void +dict_stats_update_if_needed(dict_table_t* table) + MY_ATTRIBUTE((nonnull)); + /*********************************************************************//** Calculates new estimates for table and index statistics. The statistics are used in query optimization. diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h index b7bf1b0c170..f99cbeed780 100644 --- a/storage/innobase/include/dict0stats_bg.h +++ b/storage/innobase/include/dict0stats_bg.h @@ -47,17 +47,6 @@ extern my_bool innodb_dict_stats_disabled_debug; #endif /* UNIV_DEBUG */ /*****************************************************************//** -Add a table to the recalc pool, which is processed by the -background stats gathering thread. Only the table id is added to the -list, so the table can be closed after being enqueued and it will be -opened when needed. If the table does not exist later (has been DROPped), -then it will be removed from the pool and skipped. */ -void -dict_stats_recalc_pool_add( -/*=======================*/ - const dict_table_t* table); /*!< in: table to add */ - -/*****************************************************************//** Delete a given table from the auto recalc pool. dict_stats_recalc_pool_del() */ void @@ -151,7 +140,7 @@ DECLARE_THREAD(dict_stats_thread)( void* arg); /*!< in: a dummy parameter required by os_thread_create */ -/** Shutdown the dict stats thread. */ +/** Shut down the dict_stats_thread. */ void dict_stats_shutdown(); diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index d8b6cf33675..8b87a5a2c90 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -180,9 +180,6 @@ struct fil_space_t { /** MariaDB encryption data */ fil_space_crypt_t* crypt_data; - /** tablespace crypt data has been read */ - bool page_0_crypt_read; - /** True if we have already printed compression failure */ bool printed_compression_failure; @@ -217,7 +214,7 @@ struct fil_node_t { /** file name; protected by fil_system->mutex and log_sys->mutex. */ char* name; /** file handle (valid if is_open) */ - os_file_t handle; + pfs_os_file_t handle; /** event that groups and serializes calls to fsync; os_event_set() and os_event_reset() are protected by fil_system_t::mutex */ @@ -586,7 +583,6 @@ Error messages are issued to the server log. @param[in] flags tablespace flags @param[in] purpose tablespace purpose @param[in,out] crypt_data encryption information -@param[in] create_table whether this is CREATE TABLE @param[in] mode encryption mode @return pointer to created tablespace, to be filled in with fil_node_create() @retval NULL on failure (such as when the same tablespace exists) */ @@ -597,7 +593,6 @@ fil_space_create( ulint flags, fil_type_t purpose, fil_space_crypt_t* crypt_data, - bool create_table, fil_encryption_t mode = FIL_ENCRYPTION_DEFAULT) MY_ATTRIBUTE((warn_unused_result)); @@ -1064,7 +1059,7 @@ fil_ibd_create( ulint size, fil_encryption_t mode, uint32_t key_id) - MY_ATTRIBUTE((warn_unused_result)); + MY_ATTRIBUTE((nonnull(2), warn_unused_result)); /** Try to adjust FSP_SPACE_FLAGS if they differ from the expectations. (Typically when upgrading from MariaDB 10.1.0..10.1.20.) @@ -1111,8 +1106,7 @@ fil_ibd_open( ulint id, ulint flags, const char* tablename, - const char* path_in, - dict_table_t* table) /*!< in: table */ + const char* path_in) MY_ATTRIBUTE((warn_unused_result)); enum fil_load_status { @@ -1174,7 +1168,6 @@ fil_space_for_table_exists_in_mem( when find table space mismatch */ mem_heap_t* heap, /*!< in: heap memory */ table_id_t table_id, /*!< in: table id */ - dict_table_t* table, /*!< in: table or NULL */ ulint table_flags); /*!< in: table flags */ /** Try to extend a tablespace if it is smaller than the specified size. @@ -1404,19 +1397,19 @@ struct PageCallback { /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; + block->frame + UNIV_PAGE_SIZE; @param offset physical offset within the file @param block block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ virtual dberr_t operator()( - os_offset_t offset, + os_offset_t offset, buf_block_t* block) UNIV_NOTHROW = 0; /** Set the name of the physical file and the file handle that is used to open it for the file that is being iterated over. - @param filename then physical name of the tablespace file. + @param filename the name of the tablespace file @param file OS file handle */ - void set_file(const char* filename, os_file_t file) UNIV_NOTHROW + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW { m_file = file; m_filepath = filename; @@ -1441,7 +1434,7 @@ struct PageCallback { page_size_t m_page_size; /** File handle to the tablespace */ - os_file_t m_file; + pfs_os_file_t m_file; /** Physical file path. */ const char* m_filepath; diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h index 67ff7895b02..be10f99d0f0 100644 --- a/storage/innobase/include/fil0pagecompress.h +++ b/storage/innobase/include/fil0pagecompress.h @@ -46,9 +46,8 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len); /*!< out: actual length of compressed page */ - byte* lzo_mem); /*!< in: temporal memory used by LZO */ /****************************************************************//** For page compressed pages decompress the page after actual read diff --git a/storage/innobase/include/fsp0file.h b/storage/innobase/include/fsp0file.h index 1c4ac8152c9..974de9c75ed 100644 --- a/storage/innobase/include/fsp0file.h +++ b/storage/innobase/include/fsp0file.h @@ -54,7 +54,7 @@ public: m_name(), m_filepath(), m_filename(), - m_handle(OS_FILE_CLOSED), + m_handle(), m_open_flags(OS_FILE_OPEN), m_size(), m_order(), @@ -77,7 +77,7 @@ public: m_name(mem_strdup(name)), m_filepath(), m_filename(), - m_handle(OS_FILE_CLOSED), + m_handle(), m_open_flags(OS_FILE_OPEN), m_size(size), m_order(order), @@ -274,7 +274,7 @@ public: /** Get Datafile::m_handle. @return m_handle */ - os_file_t handle() const + pfs_os_file_t handle() const { return(m_handle); } @@ -416,7 +416,7 @@ private: char* m_filename; /** Open file handle */ - os_file_t m_handle; + pfs_os_file_t m_handle; /** Flags to use for opening the data file */ os_file_create_t m_open_flags; diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index f6fee72300c..63fc211c88d 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -397,18 +397,12 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ - -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr); +@param[in,out] mtr mini-transaction */ +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr); /**********************************************************************//** Increases the space size field of a space. */ diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h index e1070ee6e06..6eac78c04b2 100644 --- a/storage/innobase/include/fsp0types.h +++ b/storage/innobase/include/fsp0types.h @@ -278,10 +278,8 @@ The flags below only exist in fil_space_t::flags, not in FSP_SPACE_FLAGS: /** Zero relative shift position of the DATA_DIR flag */ #define FSP_FLAGS_MEM_DATA_DIR 25 -/** Zero relative shift position of the ATOMIC_WRITES field */ -#define FSP_FLAGS_MEM_ATOMIC_WRITES 26 /** Zero relative shift position of the COMPRESSION_LEVEL field */ -#define FSP_FLAGS_MEM_COMPRESSION_LEVEL 28 +#define FSP_FLAGS_MEM_COMPRESSION_LEVEL 26 /** Zero relative shift position of the POST_ANTELOPE field */ #define FSP_FLAGS_POS_POST_ANTELOPE 0 @@ -327,10 +325,6 @@ these are only used in MySQL 5.7 and used for compatibility. */ ((~(~0U << FSP_FLAGS_WIDTH_PAGE_COMPRESSION)) \ << FSP_FLAGS_POS_PAGE_COMPRESSION) -/** Bit mask of the in-memory ATOMIC_WRITES field */ -#define FSP_FLAGS_MASK_MEM_ATOMIC_WRITES \ - (3U << FSP_FLAGS_MEM_ATOMIC_WRITES) - /** Bit mask of the in-memory COMPRESSION_LEVEL field */ #define FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL \ (15U << FSP_FLAGS_MEM_COMPRESSION_LEVEL) @@ -371,10 +365,6 @@ these are only used in MySQL 5.7 and used for compatibility. */ #define FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags) \ ((flags & FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL) \ >> FSP_FLAGS_MEM_COMPRESSION_LEVEL) -/** @return the ATOMIC_WRITES field */ -#define FSP_FLAGS_GET_ATOMIC_WRITES(flags) \ - ((flags & FSP_FLAGS_MASK_MEM_ATOMIC_WRITES) \ - >> FSP_FLAGS_MEM_ATOMIC_WRITES) /* @} */ diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h index 80ebcf09d6d..f9d5d07a44c 100644 --- a/storage/innobase/include/fts0priv.h +++ b/storage/innobase/include/fts0priv.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -215,13 +216,10 @@ fts_write_node( fts_node_t* node) /*!< in: node columns */ MY_ATTRIBUTE((warn_unused_result)); -/** Check fts token -1. for ngram token, check whether the token contains any words in stopwords -2. for non-ngram token, check if it's stopword or less than fts_min_token_size +/** Check if a fts token is a stopword or less than fts_min_token_size or greater than fts_max_token_size. @param[in] token token string @param[in] stopwords stopwords rb tree -@param[in] is_ngram is ngram parser @param[in] cs token charset @retval true if it is not stopword and length in range @retval false if it is stopword or length not in range */ @@ -229,7 +227,6 @@ bool fts_check_token( const fts_string_t* token, const ib_rbt_t* stopwords, - bool is_ngram, const CHARSET_INFO* cs); /******************************************************************//** diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h index c1db160602f..55a698e8b66 100644 --- a/storage/innobase/include/fts0types.h +++ b/storage/innobase/include/fts0types.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -277,8 +277,6 @@ struct fts_doc_t { st_mysql_ftparser* parser; /*!< fts plugin parser */ - bool is_ngram; /*!< Whether it is a ngram parser */ - ib_rbt_t* stopwords; /*!< Stopwords */ }; diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic index 417a1010919..a8712751412 100644 --- a/storage/innobase/include/fts0types.ic +++ b/storage/innobase/include/fts0types.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -115,19 +116,14 @@ bool fts_is_charset_cjk( const CHARSET_INFO* cs) { - if (strcmp(cs->name, "gb2312_chinese_ci") == 0 - || strcmp(cs->name, "gbk_chinese_ci") == 0 - || strcmp(cs->name, "big5_chinese_ci") == 0 - || strcmp(cs->name, "gb18030_chinese_ci") == 0 - || strcmp(cs->name, "ujis_japanese_ci") == 0 - || strcmp(cs->name, "sjis_japanese_ci") == 0 - || strcmp(cs->name, "cp932_japanese_ci") == 0 - || strcmp(cs->name, "eucjpms_japanese_ci") == 0 - || strcmp(cs->name, "euckr_korean_ci") == 0) { - return(true); - } else { - return(false); - } + return cs == &my_charset_gb2312_chinese_ci + || cs == &my_charset_gbk_chinese_ci + || cs == &my_charset_big5_chinese_ci + || cs == &my_charset_ujis_japanese_ci + || cs == &my_charset_sjis_japanese_ci + || cs == &my_charset_cp932_japanese_ci + || cs == &my_charset_eucjpms_japanese_ci + || cs == &my_charset_euckr_korean_ci; } /** Select the FTS auxiliary index for the given character by range. diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index d1aae64227e..05e53e23f28 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -151,24 +151,24 @@ UNIV_INLINE lsn_t log_get_max_modified_age_async(void); /*================================*/ -/******************************************************//** -Initializes the log. */ +/** Initializes the redo logging subsystem. */ void -log_init(void); -/*==========*/ -/******************************************************************//** -Inits a log group to the log system. -@return true if success, false if not */ -MY_ATTRIBUTE((warn_unused_result)) +log_sys_init(); + +/** Initialize the redo log. +@param[in] n_files number of files +@param[in] file_size file size in bytes */ +void +log_init(ulint n_files, lsn_t file_size); +/** Calculate the recommended highest values for lsn - last_checkpoint_lsn +and lsn - buf_get_oldest_modification(). +@retval true on success +@retval false if the smallest log group is too small to +accommodate the number of OS threads in the database server */ bool -log_group_init( -/*===========*/ - ulint id, /*!< in: group id */ - ulint n_files, /*!< in: number of log files */ - lsn_t file_size, /*!< in: log file size in bytes */ - ulint space_id); /*!< in: space id of the file space - which contains the log files of this - group */ +log_set_capacity() + MY_ATTRIBUTE((warn_unused_result)); + /******************************************************//** Completes an i/o to a log file. */ void @@ -552,16 +552,12 @@ Currently, this is only protected by log_sys->mutex. However, in the case of log_write_up_to(), we will access some members only with the protection of log_sys->write_mutex, which should affect nothing for now. */ struct log_group_t{ - /** log group identifier (always 0) */ - ulint id; /** number of files in the group */ ulint n_files; /** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */ ulint format; /** individual log file size in bytes, including the header */ - lsn_t file_size - /** file space which implements the log group */; - ulint space_id; + lsn_t file_size; /** corruption status */ log_group_state_t state; /** lsn used to fix coordinates within the log group */ @@ -580,8 +576,6 @@ struct log_group_t{ byte* checkpoint_buf_ptr; /** buffer for writing a checkpoint header */ byte* checkpoint_buf; - /** list of log groups */ - UT_LIST_NODE_T(log_group_t) log_groups; /** @return whether the redo log is encrypted */ bool is_encrypted() const @@ -639,8 +633,8 @@ struct log_t{ max_checkpoint_age; this flag is peeked at by log_free_check(), which does not reserve the log mutex */ - UT_LIST_BASE_NODE_T(log_group_t) - log_groups; /*!< log groups */ + /** the redo log */ + log_group_t log; /** The fields involved in the log buffer flush @{ */ @@ -729,7 +723,7 @@ struct log_t{ /** @return whether the redo log is encrypted */ bool is_encrypted() const { - return(UT_LIST_GET_FIRST(log_groups)->is_encrypted()); + return(log.is_encrypted()); } }; diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index 82a94265776..1e09c263975 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -479,11 +480,11 @@ void log_free_check(void) /*================*/ { -#ifdef UNIV_DEBUG /* During row_log_table_apply(), this function will be called while we are holding some latches. This is OK, as long as we are not holding any latches on buffer blocks. */ +#ifdef UNIV_DEBUG static const latch_level_t latches[] = { SYNC_DICT, /* dict_sys->mutex during commit_try_rebuild() */ @@ -491,13 +492,12 @@ log_free_check(void) commit_try_rebuild() */ SYNC_INDEX_TREE /* index->lock */ }; - - sync_allowed_latches check( - latches, latches + sizeof(latches)/sizeof(*latches)); - - ut_ad(!sync_check_iterate(check)); #endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate( + sync_allowed_latches(latches, + latches + UT_ARR_SIZE(latches)))); + if (log_sys->check_flush_or_checkpoint) { log_check_margins(); diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 74ea6c95036..784699279d4 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -41,6 +41,13 @@ Created 9/20/1997 Heikki Tuuri /** @return whether recovery is currently running. */ #define recv_recovery_is_on() recv_recovery_on +/** Find the latest checkpoint in the log header. +@param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 +@return error code or DB_SUCCESS */ +dberr_t +recv_find_max_checkpoint(ulint* max_field) + MY_ATTRIBUTE((nonnull, warn_unused_result)); + /** Apply the hashed log records to the page, if the page lsn is less than the lsn of a log record. @param just_read_in whether the page recently arrived to the I/O handler diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic index 34d375aa1e8..c89e4960480 100644 --- a/storage/innobase/include/mach0data.ic +++ b/storage/innobase/include/mach0data.ic @@ -827,13 +827,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 452f6a0b64f..1852dbdf1df 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -89,6 +89,30 @@ typedef int os_file_t; static const os_file_t OS_FILE_CLOSED = os_file_t(~0); +/** File descriptor with optional PERFORMANCE_SCHEMA instrumentation */ +struct pfs_os_file_t +{ + /** Default constructor */ + pfs_os_file_t(os_file_t file = OS_FILE_CLOSED) : m_file(file) +#ifdef UNIV_PFS_IO + , m_psi(NULL) +#endif + {} + + /** The wrapped file handle */ + os_file_t m_file; +#ifdef UNIV_PFS_IO + /** PERFORMANCE_SCHEMA descriptor */ + struct PSI_file *m_psi; +#endif + /** Implicit type conversion. + @return the wrapped file handle */ + operator os_file_t() const { return m_file; } + /** Assignment operator. + @param[in] file file handle to be assigned */ + void operator=(os_file_t file) { m_file = file; } +}; + /** The next value should be smaller or equal to the smallest sector size used on any disk. A log block is required to be a portion of disk which is written so that if the start and the end of a block get written to disk, then the @@ -583,7 +607,7 @@ A simple function to open or create a file. @param[out] success true if succeed, false if error @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_func( const char* name, ulint create_mode, @@ -603,7 +627,7 @@ A simple function to open or create a file. @param[out] success true if succeeded @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( const char* name, ulint create_mode, @@ -641,7 +665,7 @@ Opens an existing file or creates a new. @param[in] success true if succeeded @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_func( const char* name, ulint create_mode, @@ -694,6 +718,8 @@ extern mysql_pfs_key_t innodb_temp_file_key; various file I/O operations with performance schema. 1) register_pfs_file_open_begin() and register_pfs_file_open_end() are used to register file creation, opening, closing and renaming. +2) register_pfs_file_rename_begin() and register_pfs_file_rename_end() +are used to register file renaming 2) register_pfs_file_io_begin() and register_pfs_file_io_end() are used to register actual file read, write and flush 3) register_pfs_file_close_begin() and register_pfs_file_close_end() @@ -709,11 +735,23 @@ do { \ } \ } while (0) -# define register_pfs_file_open_end(locker, file) \ +# define register_pfs_file_open_end(locker, file, result) \ do { \ if (locker != NULL) { \ - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\ - locker, file); \ + file.m_psi = PSI_FILE_CALL(end_file_open_wait)( \ + locker, result); \ + } \ +} while (0) + +# define register_pfs_file_rename_begin(state, locker, key, op, name, \ + src_file, src_line) \ + register_pfs_file_open_begin(state, locker, key, op, name, \ + src_file, src_line) \ + +# define register_pfs_file_rename_end(locker, result) \ +do { \ + if (locker != NULL) { \ + PSI_FILE_CALL(end_file_open_wait)(locker, result); \ } \ } while (0) @@ -739,8 +777,8 @@ do { \ # define register_pfs_file_io_begin(state, locker, file, count, op, \ src_file, src_line) \ do { \ - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \ - state, file, op); \ + locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \ + state, file.m_psi, op); \ if (locker != NULL) { \ PSI_FILE_CALL(start_file_wait)( \ locker, count, src_file, src_line); \ @@ -766,7 +804,9 @@ os_file_rename os_aio os_file_read os_file_read_no_error_handling +os_file_read_no_error_handling_int_fd os_file_write +os_file_write_int_fd The wrapper functions have the prefix of "innodb_". */ @@ -802,11 +842,19 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_read_no_error_handling_func( \ type, file, buf, offset, n, o, __FILE__, __LINE__) +# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \ + pfs_os_file_read_no_error_handling_int_fd_func( \ + type, file, buf, offset, n, __FILE__, __LINE__) + # define os_file_write(type, name, file, buf, offset, n) \ pfs_os_file_write_func(type, name, file, buf, offset, \ - n,__FILE__, __LINE__) + n, __FILE__, __LINE__) + +# define os_file_write_int_fd(type, name, file, buf, offset, n) \ + pfs_os_file_write_int_fd_func(type, name, file, buf, offset, \ + n, __FILE__, __LINE__) -# define os_file_flush(file) \ +# define os_file_flush(file) \ pfs_os_file_flush_func(file, __FILE__, __LINE__) # define os_file_rename(key, oldpath, newpath) \ @@ -834,7 +882,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( mysql_pfs_key_t key, const char* name, @@ -865,7 +913,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( mysql_pfs_key_t key, const char* name, @@ -898,7 +946,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( mysql_pfs_key_t key, const char* name, @@ -921,7 +969,7 @@ A performance schema instrumented wrapper function for os_file_close(). UNIV_INLINE bool pfs_os_file_close_func( - os_file_t file, + pfs_os_file_t file, const char* src_file, uint src_line); @@ -941,7 +989,7 @@ UNIV_INLINE dberr_t pfs_os_file_read_func( IORequest& type, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -966,7 +1014,7 @@ UNIV_INLINE dberr_t pfs_os_file_read_no_error_handling_func( IORequest& type, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -1002,7 +1050,7 @@ pfs_os_aio_func( IORequest& type, ulint mode, const char* name, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -1031,7 +1079,7 @@ dberr_t pfs_os_file_write_func( IORequest& type, const char* name, - os_file_t file, + pfs_os_file_t file, const void* buf, os_offset_t offset, ulint n, @@ -1050,7 +1098,7 @@ Flushes the write buffers of a given file to the disk. UNIV_INLINE bool pfs_os_file_flush_func( - os_file_t file, + pfs_os_file_t file, const char* src_file, uint src_line); @@ -1142,9 +1190,12 @@ to original un-instrumented file I/O APIs */ # define os_file_read_no_error_handling(type, file, buf, offset, n, o) \ os_file_read_no_error_handling_func(type, file, buf, offset, n, o) +# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \ + os_file_read_no_error_handling_func(type, file, buf, offset, n, NULL) # define os_file_write(type, name, file, buf, offset, n) \ os_file_write_func(type, name, file, buf, offset, n) +# define os_file_write_int_fd os_file_write_func # define os_file_flush(file) os_file_flush_func(file) @@ -1400,7 +1451,7 @@ os_aio_func( IORequest& type, ulint mode, const char* name, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -1536,19 +1587,6 @@ os_is_sparse_file_supported( @return DB_SUCCESS or error code */ dberr_t os_file_punch_hole( - IORequest& type, - os_file_t fh, - os_offset_t off, - os_offset_t len) - MY_ATTRIBUTE((warn_unused_result)); - -/** Free storage space associated with a section of the file. -@param[in] fh Open file handle -@param[in] off Starting offset (SEEK_SET) -@param[in] len Size of the hole -@return DB_SUCCESS or error code */ -dberr_t -os_file_punch_hole( os_file_t fh, os_offset_t off, os_offset_t len) diff --git a/storage/innobase/include/os0file.ic b/storage/innobase/include/os0file.ic index 807d3254b9d..07c2b6212ed 100644 --- a/storage/innobase/include/os0file.ic +++ b/storage/innobase/include/os0file.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -43,7 +43,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( mysql_pfs_key_t key, const char* name, @@ -64,11 +64,12 @@ pfs_os_file_create_simple_func( ? PSI_FILE_CREATE : PSI_FILE_OPEN, name, src_file, src_line); - os_file_t file = os_file_create_simple_func( + pfs_os_file_t file = os_file_create_simple_func( name, create_mode, access_type, read_only, success); - /* Regsiter the returning "file" value with the system */ - register_pfs_file_open_end(locker, file); + /* Register psi value for the file */ + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -92,7 +93,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( mysql_pfs_key_t key, const char* name, @@ -113,10 +114,11 @@ pfs_os_file_create_simple_no_error_handling_func( ? PSI_FILE_CREATE : PSI_FILE_OPEN, name, src_file, src_line); - os_file_t file = os_file_create_simple_no_error_handling_func( + pfs_os_file_t file = os_file_create_simple_no_error_handling_func( name, create_mode, access_type, read_only, success); - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -142,7 +144,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( mysql_pfs_key_t key, const char* name, @@ -164,10 +166,11 @@ pfs_os_file_create_func( ? PSI_FILE_CREATE : PSI_FILE_OPEN, name, src_file, src_line); - os_file_t file = os_file_create_func( + pfs_os_file_t file = os_file_create_func( name, create_mode, purpose, type, read_only, success); - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -182,7 +185,7 @@ A performance schema instrumented wrapper function for os_file_close(). UNIV_INLINE bool pfs_os_file_close_func( - os_file_t file, + pfs_os_file_t file, const char* src_file, uint src_line) { @@ -228,7 +231,7 @@ pfs_os_aio_func( IORequest& type, ulint mode, const char* name, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -273,7 +276,7 @@ UNIV_INLINE dberr_t pfs_os_file_read_func( IORequest& type, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -315,7 +318,7 @@ UNIV_INLINE dberr_t pfs_os_file_read_no_error_handling_func( IORequest& type, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -337,6 +340,49 @@ pfs_os_file_read_no_error_handling_func( return(result); } +/** NOTE! Please use the corresponding macro +os_file_read_no_error_handling_int_fd() to request +a synchronous read operation. +@param[in,out] type read request +@param[in] file file handle +@param[out] buf buffer where to read +@param[in] offset file offset where to read +@param[in] n number of bytes to read +@param[in] src_file caller file name +@param[in] src_line caller line number +@return whether the request was successful */ +UNIV_INLINE +bool +pfs_os_file_read_no_error_handling_int_fd_func( + IORequest& type, + int file, + void* buf, + os_offset_t offset, + ulint n, + const char* src_file, + uint src_line) +{ + PSI_file_locker_state state; + + PSI_file_locker* locker = PSI_FILE_CALL( + get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_READ); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ulint fulfilled; + bool success = DB_SUCCESS == os_file_read_no_error_handling_func( + type, OS_FILE_FROM_FD(file), buf, offset, n, &fulfilled); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(success); +} + /** NOTE! Please use the corresponding macro os_file_write(), not directly this function! This is the performance schema instrumented wrapper function for @@ -350,13 +396,14 @@ os_file_write() which requests a synchronous write operation. @param[in] n number of bytes to read @param[in] src_file file name where func invoked @param[in] src_line line where the func invoked -@return DB_SUCCESS if request was successful */ +@return error code +@retval DB_SUCCESS if the request was successfully fulfilled */ UNIV_INLINE dberr_t pfs_os_file_write_func( IORequest& type, const char* name, - os_file_t file, + pfs_os_file_t file, const void* buf, os_offset_t offset, ulint n, @@ -378,6 +425,52 @@ pfs_os_file_write_func( return(result); } +/** NOTE! Please use the corresponding macro os_file_write_int_fd(), +not directly this function! +This is the performance schema instrumented wrapper function for +os_file_write_int_fd() which requests a synchronous write operation. +@param[in,out] type write request +@param[in] name file name +@param[in] file file handle +@param[in] buf buffer to write +@param[in] offset file offset +@param[in] n number of bytes +@param[in] src_file file name where func invoked +@param[in] src_line line where the func invoked +@return whether the request was successful */ +UNIV_INLINE +bool +pfs_os_file_write_int_fd_func( + IORequest& type, + const char* name, + int file, + const void* buf, + os_offset_t offset, + ulint n, + const char* src_file, + uint src_line) +{ + PSI_file_locker_state state; + struct PSI_file_locker* locker; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_WRITE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + + bool success = DB_SUCCESS == os_file_write_func( + type, name, OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(success); +} + /** NOTE! Please use the corresponding macro os_file_flush(), not directly this function! This is the performance schema instrumented wrapper function for @@ -390,7 +483,7 @@ Flushes the write buffers of a given file to the disk. UNIV_INLINE bool pfs_os_file_flush_func( - os_file_t file, + pfs_os_file_t file, const char* src_file, uint src_line) { @@ -436,7 +529,7 @@ pfs_os_file_rename_func( bool result = os_file_rename_func(oldpath, newpath); - register_pfs_file_open_end(locker, 0); + register_pfs_file_rename_end(locker, 0); return(result); } diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h index 6f521b5a2ec..071e7422894 100644 --- a/storage/innobase/include/os0thread.h +++ b/storage/innobase/include/os0thread.h @@ -151,23 +151,10 @@ os_thread_sleep( /*============*/ ulint tm); /*!< in: time in microseconds */ -/** -Initializes OS thread management data structures. */ -void -os_thread_init(); -/*============*/ - -/** -Frees OS thread management data structures. */ -void -os_thread_free(); -/*============*/ - /*****************************************************************//** Check if there are threads active. @return true if the thread count > 0. */ bool os_thread_active(); -/*==============*/ #endif diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic index 5f754e1f993..fa03279f9bc 100644 --- a/storage/innobase/include/page0zip.ic +++ b/storage/innobase/include/page0zip.ic @@ -164,7 +164,8 @@ page_zip_rec_needs_ext( ulint n_fields, const page_size_t& page_size) { - ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES); + ut_ad(rec_size + > ulint(comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES)); ut_ad(comp || !page_size.is_compressed()); #if UNIV_PAGE_SIZE_MAX > REC_MAX_DATA_SIZE diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h index 245fefae944..216e3a7655b 100644 --- a/storage/innobase/include/rem0cmp.h +++ b/storage/innobase/include/rem0cmp.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -62,7 +63,7 @@ cmp_data_data( ulint len1, const byte* data2, ulint len2) - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /** Compare two data fields. @param[in] dfield1 data field; must have type field set diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h index 1b61c475c6f..50c3361a3f9 100644 --- a/storage/innobase/include/row0merge.h +++ b/storage/innobase/include/row0merge.h @@ -129,7 +129,6 @@ struct index_def_t { index_field_t* fields; /*!< field definitions */ st_mysql_ftparser* parser; /*!< fulltext parser plugin */ - bool is_ngram; /*!< true if it's ngram parser */ }; /** Structure for reporting duplicate records. */ @@ -195,7 +194,7 @@ row_merge_drop_temp_indexes(void); /** Create temporary merge files in the given paramater path, and if UNIV_PFS_IO defined, register the file descriptor with Performance Schema. -@param[in] path location for creating temporary merge files. +@param[in] path location for creating temporary merge files, or NULL @return File descriptor */ int row_merge_file_create_low( @@ -398,13 +397,13 @@ row_merge_buf_empty( /** Create a merge file in the given location. @param[out] merge_file merge file structure -@param[in] path location for creating temporary file +@param[in] path location for creating temporary file, or NULL @return file descriptor, or -1 on failure */ int row_merge_file_create( merge_file_t* merge_file, const char* path) - MY_ATTRIBUTE((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull(1))); /** Merge disk files. @param[in] trx transaction @@ -464,10 +463,9 @@ row_merge_file_destroy( merge_file_t* merge_file) /*!< in/out: merge file structure */ MY_ATTRIBUTE((nonnull)); -/********************************************************************//** -Read a merge block from the file system. -@return TRUE if request was successful, FALSE if fail */ -ibool +/** Read a merge block from the file system. +@return whether the request was successful */ +bool row_merge_read( /*===========*/ int fd, /*!< in: file descriptor */ diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 7507c96ea5f..6164366628e 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -204,6 +204,7 @@ row_update_prebuilt_trx( row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct in MySQL handle */ trx_t* trx); /*!< in: transaction handle */ + /*********************************************************************//** Sets an AUTO_INC type lock on the table mentioned in prebuilt. The AUTO_INC lock gives exclusive access to the auto-inc counter of the diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 8010bcea25c..344507aae20 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index 4f2f4a312ff..47b42725541 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,43 +38,20 @@ struct dict_table_t; only one buffer pool instance is used. */ #define BUF_POOL_SIZE_THRESHOLD (1024 * 1024 * 1024) -/*********************************************************************//** -Parse temporary tablespace configuration. -@return true if ok, false on parse error */ -bool -srv_parse_temp_data_file_paths_and_sizes( -/*=====================================*/ - char* str); /*!< in/out: the data file path string */ -/*********************************************************************//** -Frees the memory allocated by srv_parse_data_file_paths_and_sizes() -and srv_parse_log_group_home_dirs(). */ -void -srv_free_paths_and_sizes(void); -/*==========================*/ -/*********************************************************************//** -Adds a slash or a backslash to the end of a string if it is missing -and the string is not empty. -@return string which has the separator if the string is not empty */ -char* -srv_add_path_separator_if_needed( -/*=============================*/ - char* str); /*!< in: null-terminated character string */ - /****************************************************************//** Starts Innobase and creates a new database if database files are not found and the user wants. @return DB_SUCCESS or error code */ dberr_t -innobase_start_or_create_for_mysql(void); -/*====================================*/ +innobase_start_or_create_for_mysql(); + /** Shut down InnoDB. */ void innodb_shutdown(); -/****************************************************************//** -Shuts down background threads that can generate undo pages. */ +/** Shut down background threads that can generate undo log. */ void -srv_shutdown_bg_undo_sources(void); +srv_shutdown_bg_undo_sources(); /*************************************************************//** Copy the file path component of the physical file to parameter. It will @@ -128,6 +105,22 @@ extern bool srv_startup_is_before_trx_rollback_phase; /** TRUE if a raw partition is in use */ extern ibool srv_start_raw_disk_in_use; +/** Undo tablespaces starts with space_id. */ +extern ulint srv_undo_space_id_start; + +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +inline +bool +srv_is_undo_tablespace(ulint space_id) +{ + return srv_undo_space_id_start > 0 + && space_id >= srv_undo_space_id_start + && space_id < (srv_undo_space_id_start + + srv_undo_tablespaces_open); +} + /** Shutdown state */ enum srv_shutdown_t { SRV_SHUTDOWN_NONE = 0, /*!< Database running normally */ @@ -144,6 +137,9 @@ enum srv_shutdown_t { SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */ }; +/** Whether any undo log records can be generated */ +extern bool srv_undo_sources; + /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ extern enum srv_shutdown_t srv_shutdown_state; diff --git a/storage/innobase/include/sync0debug.h b/storage/innobase/include/sync0debug.h index ba697b70e13..ecc742918f0 100644 --- a/storage/innobase/include/sync0debug.h +++ b/storage/innobase/include/sync0debug.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -84,10 +84,10 @@ sync_check_find(latch_level_t level); /** Checks that the level array for the current thread is empty. Terminate iteration if the functor returns true. -@param[in,out] functor called for each element. -@return true if the functor returns true */ +@param[in] functor called for each element. +@return true if the functor returns true for any element */ bool -sync_check_iterate(sync_check_functor_t& functor); +sync_check_iterate(const sync_check_functor_t& functor); /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync, because the debug mutex is also acquired in sync0arr while holding the OS diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index 044a36f0c63..b2a82ec5fe7 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -352,7 +352,6 @@ enum latch_id_t { LATCH_ID_EVENT_MANAGER, LATCH_ID_EVENT_MUTEX, LATCH_ID_SYNC_ARRAY_MUTEX, - LATCH_ID_THREAD_MUTEX, LATCH_ID_ZIP_PAD_MUTEX, LATCH_ID_OS_AIO_READ_MUTEX, LATCH_ID_OS_AIO_WRITE_MUTEX, @@ -1077,108 +1076,43 @@ struct latch_t { /** Subclass this to iterate over a thread's acquired latch levels. */ struct sync_check_functor_t { - virtual ~sync_check_functor_t() { } - virtual bool operator()(const latch_level_t) = 0; - virtual bool result() const = 0; + virtual bool operator()(const latch_level_t) const = 0; }; -#ifdef BTR_CUR_HASH_ADAPT -/** Functor to check whether the calling thread owns the btr search mutex. */ -struct btrsea_sync_check : public sync_check_functor_t { - - /** Constructor - @param[in] has_search_latch true if owns the latch */ - explicit btrsea_sync_check(bool has_search_latch) - : - m_result(), - m_has_search_latch(has_search_latch) { } - - /** Destructor */ - virtual ~btrsea_sync_check() { } - - /** Called for every latch owned by the calling thread. - @param[in] level Level of the existing latch - @return true if the predicate check is successful */ - virtual bool operator()(const latch_level_t level) - { - /* If calling thread doesn't hold search latch then - check if there are latch level exception provided. */ - - if (!m_has_search_latch - && (level != SYNC_SEARCH_SYS - && level != SYNC_FTS_CACHE)) { - - m_result = true; - - return(m_result); - } - - return(false); - } - - /** @return result from the check */ - virtual bool result() const - { - return(m_result); - } - -private: - /** True if all OK */ - bool m_result; - - /** If the caller owns the search latch */ - const bool m_has_search_latch; -}; -#endif /* BTR_CUR_HASH_ADAPT */ - -/** Functor to check for dictionay latching constraints. */ -struct dict_sync_check : public sync_check_functor_t { - - /** Constructor - @param[in] dict_mutex_allow true if the dict mutex - is allowed */ - explicit dict_sync_check(bool dict_mutex_allowed) - : - m_result(), - m_dict_mutex_allowed(dict_mutex_allowed) { } - - /** Destructor */ - virtual ~dict_sync_check() { } - +/** Check that no latch is being held. +@tparam some_allowed whether some latches are allowed to be held */ +template<bool some_allowed = false> +struct sync_checker : public sync_check_functor_t +{ /** Check the latching constraints - @param[in] level The level held by the thread */ - virtual bool operator()(const latch_level_t level) + @param[in] level The level held by the thread + @return whether a latch violation was detected */ + bool operator()(const latch_level_t level) const { - if (!m_dict_mutex_allowed - || (level != SYNC_DICT - && level != SYNC_DICT_OPERATION - && level != SYNC_FTS_CACHE - /* This only happens in recv_apply_hashed_log_recs. */ - && level != SYNC_RECV_WRITER - && level != SYNC_NO_ORDER_CHECK)) { - - m_result = true; - - return(true); + if (some_allowed) { + switch (level) { + case SYNC_RECV_WRITER: + /* This only happens in + recv_apply_hashed_log_recs. */ + case SYNC_DICT: + case SYNC_DICT_OPERATION: + case SYNC_FTS_CACHE: + case SYNC_NO_ORDER_CHECK: + return(false); + default: + return(true); + } } - return(false); - } - - /** @return the result of the check */ - virtual bool result() const - { - return(m_result); + return(true); } - -private: - /** True if all OK */ - bool m_result; - - /** True if it is OK to hold the dict mutex */ - const bool m_dict_mutex_allowed; }; +/** The strict latch checker (no InnoDB latches may be held) */ +typedef struct sync_checker<false> sync_check; +/** The sloppy latch checker (can hold InnoDB dictionary or SQL latches) */ +typedef struct sync_checker<true> dict_sync_check; + /** Functor to check for given latching constraints. */ struct sync_allowed_latches : public sync_check_functor_t { @@ -1188,9 +1122,7 @@ struct sync_allowed_latches : public sync_check_functor_t { sync_allowed_latches( const latch_level_t* from, const latch_level_t* to) - : - m_result(), - m_latches(from, to) { } + : begin(from), end(to) { } /** Checks whether the given latch_t violates the latch constraint. This object maintains a list of allowed latch levels, and if the given @@ -1198,41 +1130,17 @@ struct sync_allowed_latches : public sync_check_functor_t { then it is a violation. @param[in] latch The latch level to check - @return true if there is a latch ordering violation */ - virtual bool operator()(const latch_level_t level) + @return true if there is a latch violation */ + bool operator()(const latch_level_t level) const { - for (latches_t::const_iterator it = m_latches.begin(); - it != m_latches.end(); - ++it) { - - if (level == *it) { - - m_result = false; - - /* No violation */ - return(false); - } - } - - return(true); - } - - /** @return the result of the check */ - virtual bool result() const - { - return(m_result); + return(std::find(begin, end, level) == end); } private: - /** Save the result of validation check here - True if all OK */ - bool m_result; - - typedef std::vector<latch_level_t, ut_allocator<latch_level_t> > - latches_t; - - /** List of latch levels that are allowed to be held */ - latches_t m_latches; + /** First element in an array of allowed latch levels */ + const latch_level_t* const begin; + /** First element after the end of the array of allowed latch levels */ + const latch_level_t* const end; }; /** Get the latch id from a latch name. @@ -1282,7 +1190,10 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter { compile_time_assert(!atomic || sizeof(Type) == sizeof(lint)); if (atomic) { - return Type(my_atomic_addlint(&m_counter, i)); + /* Silence MSVS warnings when instantiating + this template with atomic=false. */ + return Type(my_atomic_addlint(reinterpret_cast<lint*> + (&m_counter), i)); } else { return m_counter += i; } diff --git a/storage/innobase/include/trx0i_s.h b/storage/innobase/include/trx0i_s.h index 17a297527af..e02c5d88a29 100644 --- a/storage/innobase/include/trx0i_s.h +++ b/storage/innobase/include/trx0i_s.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -162,10 +163,6 @@ struct i_s_trx_row_t { /*!< check_foreigns in trx_t */ const char* trx_foreign_key_error; /*!< detailed_error in trx_t */ -#ifdef BTR_CUR_HASH_ADAPT - ibool trx_has_search_latch; - /*!< has_search_latch in trx_t */ -#endif /* BTR_CUR_HASH_ADAPT */ ulint trx_is_read_only; /*!< trx_t::read_only */ ulint trx_is_autocommit_non_locking; diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h index 50304ce3631..3b4a195735b 100644 --- a/storage/innobase/include/trx0rec.h +++ b/storage/innobase/include/trx0rec.h @@ -188,28 +188,28 @@ transaction. dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, index entry to insert into the - clustered index, otherwise NULL */ + clustered index; in updates, + may contain a clustered index + record tuple that also contains + virtual columns of the table; + otherwise, NULL */ const upd_t* update, /*!< in: in the case of an update, the update vector, otherwise NULL */ ulint cmpl_info, /*!< in: compiler info on secondary index updates */ const rec_t* rec, /*!< in: case of an update or delete marking, the record in the clustered - index, otherwise NULL */ + index; NULL if insert */ const ulint* offsets, /*!< in: rec_get_offsets(rec) */ roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the inserted undo log record, 0 if BTR_NO_UNDO_LOG flag was specified */ - MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result)); /** status bit used for trx_undo_prev_version_build() */ @@ -341,10 +341,6 @@ record */ storage fields: used by purge to free the external storage */ -/* Operation type flags used in trx_undo_report_row_operation */ -#define TRX_UNDO_INSERT_OP 1U -#define TRX_UNDO_MODIFY_OP 2U - #include "trx0rec.ic" #endif /* trx0rec_h */ diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h index 4c162526384..3078aa8faf1 100644 --- a/storage/innobase/include/trx0rseg.h +++ b/storage/innobase/include/trx0rseg.h @@ -110,9 +110,12 @@ void trx_rseg_mem_free(trx_rseg_t* rseg); /** Create a persistent rollback segment. -@param[in] space_id system or undo tablespace id */ +@param[in] space_id system or undo tablespace id +@return pointer to new rollback segment +@retval NULL on failure */ trx_rseg_t* -trx_rseg_create(ulint space_id); +trx_rseg_create(ulint space_id) + MY_ATTRIBUTE((warn_unused_result)); /** Create the temporary rollback segments. */ void diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 75753b53467..b2d4952318c 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2017, MariaDB Corporation. +Copyright (c) 2015, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -58,15 +58,6 @@ class FlushObserver; /** Dummy session used currently in MySQL interface */ extern sess_t* trx_dummy_sess; -#ifdef BTR_CUR_HASH_ADAPT -/** Assert that the transaction is not holding the adaptive hash index latch. -@param[in] trx transaction */ -# define trx_assert_no_search_latch(trx) \ - ut_ad(!trx->has_search_latch) -#else /* BTR_CUR_HASH_ADAPT */ -# define trx_assert_no_search_latch(trx) -#endif - /** Set flush observer for the transaction @param[in/out] trx transaction struct @param[in] observer flush observer */ @@ -1072,11 +1063,6 @@ struct trx_t { flush the log in trx_commit_complete_for_mysql() */ ulint duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */ -#ifdef BTR_CUR_HASH_ADAPT - bool has_search_latch; - /*!< TRUE if this trx has latched the - search system latch in S-mode */ -#endif /* BTR_CUR_HASH_ADAPT */ trx_dict_op_t dict_operation; /**< @see enum trx_dict_op_t */ /* Fields protected by the srv_conc_mutex. */ @@ -1508,17 +1494,11 @@ private: } /* Avoid excessive mutex acquire/release */ - if (++trx->in_depth > 1) { + if (trx->in_depth++) { /* The transaction is already inside InnoDB. */ - ut_ad(trx->in_depth > 1); return; } - /* Only the owning thread should release the latch. */ - - ut_ad(trx->in_depth == 1); - trx_assert_no_search_latch(trx); - trx_mutex_enter(trx); wait(trx); @@ -1543,16 +1523,10 @@ private: ut_ad(trx->in_depth > 0); - if (--trx->in_depth > 0) { - ut_ad(trx->in_depth); + if (--trx->in_depth) { return; } - /* Only the owning thread should release the latch. */ - - ut_ad(trx->in_depth == 0); - trx_assert_no_search_latch(trx); - trx_mutex_enter(trx); ut_ad((trx->in_innodb & TRX_FORCE_ROLLBACK_MASK) > 0); diff --git a/storage/innobase/include/trx0xa.h b/storage/innobase/include/trx0xa.h index b333f32cd73..4d5adc68dcd 100644 --- a/storage/innobase/include/trx0xa.h +++ b/storage/innobase/include/trx0xa.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,12 +24,7 @@ this program; if not, write to the Free Software Foundation, Inc., #ifndef XA_H #define XA_H -/* Missing MySQL 5.7 header */ -#ifdef HAVE_XA_H -#include "xa.h" -#else #include "handler.h" -#endif /* * Transaction branch identification: XID and NULLXID: diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index e71ce92e1f7..bc95d386a86 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -41,7 +41,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 7 -#define INNODB_VERSION_BUGFIX 14 +#define INNODB_VERSION_BUGFIX 18 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; @@ -121,14 +121,7 @@ HAVE_PSI_INTERFACE is defined. */ #ifdef HAVE_PSI_INTERFACE # define UNIV_PFS_MUTEX # define UNIV_PFS_RWLOCK -/* For I/O instrumentation, performance schema rely -on a native descriptor to identify the file, this -descriptor could conflict with our OS level descriptor. -Disable IO instrumentation on Windows until this is -resolved */ -# ifndef _WIN32 -# define UNIV_PFS_IO -# endif +# define UNIV_PFS_IO # define UNIV_PFS_THREAD // JAN: TODO: MySQL 5.7 PSI @@ -191,9 +184,9 @@ command. */ #define UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT */ -#if defined HAVE_valgrind && defined HAVE_VALGRIND +#if defined HAVE_valgrind && defined HAVE_VALGRIND_MEMCHECK_H # define UNIV_DEBUG_VALGRIND -#endif /* HAVE_VALGRIND */ +#endif #ifdef DBUG_OFF # undef UNIV_DEBUG diff --git a/storage/innobase/include/ut0new.h b/storage/innobase/include/ut0new.h index 5a9022e8a77..955e7b026c7 100644 --- a/storage/innobase/include/ut0new.h +++ b/storage/innobase/include/ut0new.h @@ -235,8 +235,10 @@ struct ut_new_pfx_t { #endif }; -/** Allocator class for allocating memory from inside std::* containers. */ -template <class T> +/** Allocator class for allocating memory from inside std::* containers. +@tparam T type of allocated object +@tparam oom_fatal whether to commit suicide when running out of memory */ +template <class T, bool oom_fatal = true> class ut_allocator { public: typedef T* pointer; @@ -249,13 +251,10 @@ public: /** Default constructor. */ explicit - ut_allocator( - PSI_memory_key key = PSI_NOT_INSTRUMENTED) - : + ut_allocator(PSI_memory_key key = PSI_NOT_INSTRUMENTED) #ifdef UNIV_PFS_MEMORY - m_key(key), + : m_key(key) #endif /* UNIV_PFS_MEMORY */ - m_oom_fatal(true) { } @@ -263,30 +262,10 @@ public: template <class U> ut_allocator( const ut_allocator<U>& other) - : m_oom_fatal(other.is_oom_fatal()) - { #ifdef UNIV_PFS_MEMORY - const PSI_memory_key other_key = other.get_mem_key(NULL); - - m_key = (other_key != mem_key_std) - ? other_key - : PSI_NOT_INSTRUMENTED; + : m_key(other.m_key) #endif /* UNIV_PFS_MEMORY */ - } - - /** When out of memory (OOM) happens, report error and do not - make it fatal. - @return a reference to the allocator. */ - ut_allocator& - set_oom_not_fatal() { - m_oom_fatal = false; - return(*this); - } - - /** Check if allocation failure is a fatal error. - @return true if allocation failure is fatal, false otherwise. */ - bool is_oom_fatal() const { - return(m_oom_fatal); + { } /** Return the maximum number of objects that can be allocated by @@ -364,7 +343,7 @@ public: } if (ptr == NULL) { - ib::fatal_or_error(m_oom_fatal) + ib::fatal_or_error(oom_fatal) << "Cannot allocate " << total_bytes << " bytes of memory after " << alloc_max_retries << " retries over " @@ -499,14 +478,13 @@ public: } if (pfx_new == NULL) { - ib::fatal_or_error(m_oom_fatal) + ib::fatal_or_error(oom_fatal) << "Cannot reallocate " << total_bytes << " bytes of memory after " << alloc_max_retries << " retries over " << alloc_max_retries << " seconds. OS error: " << strerror(errno) << " (" << errno << "). " << OUT_OF_MEMORY_MSG; - /* not reached */ return(NULL); } @@ -739,10 +717,6 @@ private: void operator=( const ut_allocator<U>&); - - /** A flag to indicate whether out of memory (OOM) error is considered - fatal. If true, it is fatal. */ - bool m_oom_fatal; }; /** Compare two allocators of the same type. @@ -882,9 +856,8 @@ ut_delete_array( n_bytes, NULL, __FILE__, true, false)) #define ut_zalloc_nokey_nofatal(n_bytes) static_cast<void*>( \ - ut_allocator<byte>(PSI_NOT_INSTRUMENTED). \ - set_oom_not_fatal(). \ - allocate(n_bytes, NULL, __FILE__, true, false)) + ut_allocator<byte, false>(PSI_NOT_INSTRUMENTED).allocate( \ + n_bytes, NULL, __FILE__, true, false)) #define ut_realloc(ptr, n_bytes) static_cast<void*>( \ ut_allocator<byte>(PSI_NOT_INSTRUMENTED).reallocate( \ diff --git a/storage/innobase/include/ut0rnd.ic b/storage/innobase/include/ut0rnd.ic index 503c9482ea3..16dccb545d8 100644 --- a/storage/innobase/include/ut0rnd.ic +++ b/storage/innobase/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -217,16 +218,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake index 9d6ac0eb0e8..fe2d537c50e 100644 --- a/storage/innobase/innodb.cmake +++ b/storage/innobase/innodb.cmake @@ -24,12 +24,14 @@ INCLUDE(lzo.cmake) INCLUDE(lzma.cmake) INCLUDE(bzip2.cmake) INCLUDE(snappy.cmake) +INCLUDE(numa) MYSQL_CHECK_LZ4() MYSQL_CHECK_LZO() MYSQL_CHECK_LZMA() MYSQL_CHECK_BZIP2() MYSQL_CHECK_SNAPPY() +MYSQL_CHECK_NUMA() IF(CMAKE_CROSSCOMPILING) # Use CHECK_C_SOURCE_COMPILES instead of CHECK_C_SOURCE_RUNS when @@ -123,8 +125,8 @@ ENDIF() OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF) IF(WITH_INNODB_EXTRA_DEBUG) - IF(NOT WITH_DEBUG) - MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only when WITH_DEBUG is enabled") + IF(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only in debug builds") ENDIF() SET(EXTRA_DEBUG_FLAGS "") diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 2dbd4172a38..c4017bcd15b 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -795,12 +795,19 @@ lock_reset_lock_and_trx_wait( const char* stmt2=NULL; size_t stmt_len; trx_id_t trx_id = 0; - stmt = innobase_get_stmt_unsafe(lock->trx->mysql_thd, &stmt_len); + stmt = lock->trx->mysql_thd + ? innobase_get_stmt_unsafe( + lock->trx->mysql_thd, &stmt_len) + : NULL; if (lock->trx->lock.wait_lock && lock->trx->lock.wait_lock->trx) { trx_id = lock->trx->lock.wait_lock->trx->id; - stmt2 = innobase_get_stmt_unsafe(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len); + stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd + ? innobase_get_stmt_unsafe( + lock->trx->lock.wait_lock + ->trx->mysql_thd, &stmt_len) + : NULL; } ib::error() << @@ -5059,8 +5066,6 @@ lock_rec_unlock( lock_t* first_lock; lock_t* lock; ulint heap_no; - const char* stmt; - size_t stmt_len; ut_ad(trx); ut_ad(rec); @@ -5088,13 +5093,15 @@ lock_rec_unlock( lock_mutex_exit(); trx_mutex_exit(trx); - stmt = innobase_get_stmt_unsafe(trx->mysql_thd, &stmt_len); - { ib::error err; err << "Unlock row could not find a " << lock_mode << " mode lock on the record. Current statement: "; - err.write(stmt, stmt_len); + size_t stmt_len; + if (const char* stmt = innobase_get_stmt_unsafe( + trx->mysql_thd, &stmt_len)) { + err.write(stmt, stmt_len); + } } return; diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 79301254a0a..69cfec10fed 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -120,7 +120,8 @@ log_crypt(byte* buf, ulint size, bool decrypt) for (const byte* const end = buf + size; buf != end; buf += OS_FILE_LOG_BLOCK_SIZE) { - byte dst[OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE]; + uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE) + / sizeof(uint32_t)]; const ulint log_block_no = log_block_get_hdr_no(buf); /* The log block number is not encrypted. */ @@ -130,8 +131,7 @@ log_crypt(byte* buf, ulint size, bool decrypt) #else ~(LOG_BLOCK_FLUSH_BIT_MASK >> 24) #endif - & (*reinterpret_cast<uint32_t*>(dst) - = *reinterpret_cast<const uint32_t*>( + & (*dst = *reinterpret_cast<const uint32_t*>( buf + LOG_BLOCK_HDR_NO)); #if LOG_BLOCK_HDR_NO + 4 != LOG_CRYPT_HDR_SIZE # error "LOG_BLOCK_HDR_NO has been moved; redo log format affected!" @@ -143,7 +143,8 @@ log_crypt(byte* buf, ulint size, bool decrypt) log_block_no)); int rc = encryption_crypt( - buf + LOG_CRYPT_HDR_SIZE, sizeof dst, dst, &dst_len, + buf + LOG_CRYPT_HDR_SIZE, sizeof dst, + reinterpret_cast<byte*>(dst), &dst_len, const_cast<byte*>(info.crypt_key.bytes), sizeof info.crypt_key, reinterpret_cast<byte*>(aes_ctr_iv), sizeof aes_ctr_iv, @@ -155,19 +156,6 @@ log_crypt(byte* buf, ulint size, bool decrypt) ut_a(rc == MY_AES_OK); ut_a(dst_len == sizeof dst); - if (decrypt) { - std::ostringstream s; - ut_print_buf_hex(s, buf + LOG_CRYPT_HDR_SIZE, - OS_FILE_LOG_BLOCK_SIZE - - LOG_CRYPT_HDR_SIZE); - ib::info() << "S: " << s.str(); - std::ostringstream d; - ut_print_buf_hex(d, dst, - OS_FILE_LOG_BLOCK_SIZE - - LOG_CRYPT_HDR_SIZE); - ib::info() << "c: " << d.str(); - } - memcpy(buf + LOG_CRYPT_HDR_SIZE, dst, sizeof dst); } } diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 7d4e0459610..4f924e064a8 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -44,6 +44,7 @@ Created 12/9/1995 Heikki Tuuri #include "fil0fil.h" #include "dict0boot.h" #include "dict0stats_bg.h" +#include "btr0defragment.h" #include "srv0srv.h" #include "srv0start.h" #include "trx0sys.h" @@ -666,42 +667,23 @@ log_group_set_fields( group->lsn = lsn; } -/*****************************************************************//** -Calculates the recommended highest values for lsn - last_checkpoint_lsn +/** Calculate the recommended highest values for lsn - last_checkpoint_lsn and lsn - buf_get_oldest_modification(). @retval true on success @retval false if the smallest log group is too small to accommodate the number of OS threads in the database server */ -static MY_ATTRIBUTE((warn_unused_result)) bool -log_calc_max_ages(void) -/*===================*/ +log_set_capacity() { - log_group_t* group; lsn_t margin; ulint free; - bool success = true; - lsn_t smallest_capacity; - - log_mutex_enter(); - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - ut_ad(group); - - smallest_capacity = LSN_MAX; - - while (group) { - if (log_group_get_capacity(group) < smallest_capacity) { - - smallest_capacity = log_group_get_capacity(group); - } - - group = UT_LIST_GET_NEXT(log_groups, group); - } + lsn_t smallest_capacity = ((srv_log_file_size_requested + << srv_page_size_shift) + - LOG_FILE_HDR_SIZE) + * srv_n_log_files; /* Add extra safety */ - smallest_capacity = smallest_capacity - smallest_capacity / 10; + smallest_capacity -= smallest_capacity / 10; /* For each OS thread we must reserve so much free space in the smallest log group that it can accommodate the log entries produced @@ -711,15 +693,20 @@ log_calc_max_ages(void) free = LOG_CHECKPOINT_FREE_PER_THREAD * (10 + srv_thread_concurrency) + LOG_CHECKPOINT_EXTRA_FREE; if (free >= smallest_capacity / 2) { - success = false; - - goto failure; - } else { - margin = smallest_capacity - free; + ib::error() << "Cannot continue operation. ib_logfiles are too" + " small for innodb_thread_concurrency=" + << srv_thread_concurrency << ". The combined size of" + " ib_logfiles should be bigger than" + " 200 kB * innodb_thread_concurrency. " + << INNODB_PARAMETERS_MSG; + return(false); } + margin = smallest_capacity - free; margin = margin - margin / 10; /* Add still some extra safety */ + log_mutex_enter(); + log_sys->log_group_capacity = smallest_capacity; log_sys->max_modified_age_async = margin @@ -731,26 +718,14 @@ log_calc_max_ages(void) / LOG_POOL_CHECKPOINT_RATIO_ASYNC; log_sys->max_checkpoint_age = margin; -failure: log_mutex_exit(); - if (!success) { - ib::error() << "Cannot continue operation. ib_logfiles are too" - " small for innodb_thread_concurrency=" - << srv_thread_concurrency << ". The combined size of" - " ib_logfiles should be bigger than" - " 200 kB * innodb_thread_concurrency. " - << INNODB_PARAMETERS_MSG; - } - - return(success); + return(true); } -/******************************************************//** -Initializes the log. */ +/** Initializes the redo logging subsystem. */ void -log_init(void) -/*==========*/ +log_sys_init() { log_sys = static_cast<log_t*>(ut_zalloc_nokey(sizeof(log_t))); @@ -779,7 +754,6 @@ log_init(void) log_sys->max_buf_free = log_sys->buf_size / LOG_BUF_FLUSH_RATIO - LOG_BUF_FLUSH_MARGIN; log_sys->check_flush_or_checkpoint = true; - UT_LIST_INIT(log_sys->log_groups, &log_group_t::log_groups); log_sys->n_log_ios_old = log_sys->n_log_ios; log_sys->last_printout_time = time(NULL); @@ -823,32 +797,20 @@ log_init(void) } } -/******************************************************************//** -Inits a log group to the log system. -@return true if success, false if not */ -MY_ATTRIBUTE((warn_unused_result)) -bool -log_group_init( -/*===========*/ - ulint id, /*!< in: group id */ - ulint n_files, /*!< in: number of log files */ - lsn_t file_size, /*!< in: log file size in bytes */ - ulint space_id) /*!< in: space id of the file space - which contains the log files of this - group */ +/** Initialize the redo log. +@param[in] n_files number of files +@param[in] file_size file size in bytes */ +void +log_init(ulint n_files, lsn_t file_size) { ulint i; - log_group_t* group; - - group = static_cast<log_group_t*>(ut_malloc_nokey(sizeof(log_group_t))); + log_group_t* group = &log_sys->log; - group->id = id; group->n_files = n_files; group->format = srv_encrypt_log ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED : LOG_HEADER_FORMAT_CURRENT; group->file_size = file_size; - group->space_id = space_id; group->state = LOG_GROUP_OK; group->lsn = LOG_START_LSN; group->lsn_offset = LOG_FILE_HDR_SIZE; @@ -874,9 +836,6 @@ log_group_init( group->checkpoint_buf = static_cast<byte*>( ut_align(group->checkpoint_buf_ptr,OS_FILE_LOG_BLOCK_SIZE)); - - UT_LIST_ADD_LAST(log_sys->log_groups, group); - return(log_calc_max_ages()); } /******************************************************//** @@ -899,12 +858,11 @@ log_io_complete( case SRV_O_DIRECT: case SRV_O_DIRECT_NO_FSYNC: case SRV_ALL_O_DIRECT_FSYNC: - fil_flush(group->space_id); + fil_flush(SRV_LOG_SPACE_FIRST_ID); } - DBUG_PRINT("ib_log", ("checkpoint info written to group %u", - unsigned(group->id))); + DBUG_PRINT("ib_log", ("checkpoint info written")); log_io_complete_checkpoint(); return; @@ -931,7 +889,6 @@ log_group_file_header_flush( ut_ad(log_write_mutex_own()); ut_ad(!recv_no_log_write); - ut_ad(group->id == 0); ut_a(nth_file < group->n_files); ut_ad((group->format & ~LOG_HEADER_FORMAT_ENCRYPTED) == LOG_HEADER_FORMAT_CURRENT); @@ -950,9 +907,8 @@ log_group_file_header_flush( dest_offset = nth_file * group->file_size; DBUG_PRINT("ib_log", ("write " LSN_PF - " group " ULINTPF " file " ULINTPF " header", - start_lsn, group->id, nth_file)); + start_lsn, nth_file)); log_sys->n_log_ios++; @@ -964,7 +920,7 @@ log_group_file_header_flush( = (ulint) (dest_offset / univ_page_size.physical()); fil_io(IORequestLogWrite, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (dest_offset % univ_page_size.physical()), OS_FILE_LOG_BLOCK_SIZE, buf, group); @@ -1050,10 +1006,10 @@ loop: DBUG_PRINT("ib_log", ("write " LSN_PF " to " LSN_PF - ": group " ULINTPF " len " ULINTPF + ": len " ULINTPF " blocks " ULINTPF ".." ULINTPF, start_lsn, next_offset, - group->id, write_len, + write_len, log_block_get_hdr_no(buf), log_block_get_hdr_no( buf + write_len @@ -1091,7 +1047,7 @@ loop: = (ulint) (next_offset / univ_page_size.physical()); fil_io(IORequestLogWrite, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (next_offset % UNIV_PAGE_SIZE), write_len, buf, group); @@ -1259,7 +1215,6 @@ loop: return; } - log_group_t* group; ulint start_offset; ulint end_offset; ulint area_start; @@ -1303,9 +1258,7 @@ loop: log_buffer_switch(); - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - log_group_set_fields(group, log_sys->write_lsn); + log_group_set_fields(&log_sys->log, log_sys->write_lsn); log_mutex_exit(); /* Calculate pad_size if needed. */ @@ -1316,7 +1269,7 @@ loop: end_offset = log_group_calc_lsn_offset( ut_uint64_align_up(write_lsn, OS_FILE_LOG_BLOCK_SIZE), - group); + &log_sys->log); end_offset_in_unit = (ulint) (end_offset % write_ahead_size); if (end_offset_in_unit > 0 @@ -1335,7 +1288,7 @@ loop: } /* Do the write to the log files */ log_group_write_buf( - group, write_buf + area_start, + &log_sys->log, write_buf + area_start, area_end - area_start + pad_size, #ifdef UNIV_DEBUG pad_size, @@ -1538,11 +1491,10 @@ log_io_complete_checkpoint(void) } /** Write checkpoint info to the log header. -@param[in,out] group redo log @param[in] end_lsn start LSN of the MLOG_CHECKPOINT mini-transaction */ static void -log_group_checkpoint(log_group_t* group, lsn_t end_lsn) +log_group_checkpoint(lsn_t end_lsn) { lsn_t lsn_offset; byte* buf; @@ -1555,10 +1507,11 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn) || srv_shutdown_state != SRV_SHUTDOWN_NONE); DBUG_PRINT("ib_log", ("checkpoint " UINT64PF " at " LSN_PF - " written to group " ULINTPF, + " written", log_sys->next_checkpoint_no, - log_sys->next_checkpoint_lsn, - group->id)); + log_sys->next_checkpoint_lsn)); + + log_group_t* group = &log_sys->log; buf = group->checkpoint_buf; memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE); @@ -1600,7 +1553,7 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn) file write and a checkpoint field write */ fil_io(IORequestLogWrite, false, - page_id_t(group->space_id, 0), + page_id_t(SRV_LOG_SPACE_FIRST_ID, 0), univ_page_size, (log_sys->next_checkpoint_no & 1) ? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1, @@ -1625,7 +1578,8 @@ log_group_header_read( MONITOR_INC(MONITOR_LOG_IO); fil_io(IORequestLogRead, true, - page_id_t(group->space_id, header / univ_page_size.physical()), + page_id_t(SRV_LOG_SPACE_FIRST_ID, + header / univ_page_size.physical()), univ_page_size, header % univ_page_size.physical(), OS_FILE_LOG_BLOCK_SIZE, log_sys->checkpoint_buf, NULL); } @@ -1639,12 +1593,7 @@ log_write_checkpoint_info(bool sync, lsn_t end_lsn) ut_ad(log_mutex_own()); ut_ad(!srv_read_only_mode); - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; - group = UT_LIST_GET_NEXT(log_groups, group)) { - - log_group_checkpoint(group, end_lsn); - } + log_group_checkpoint(end_lsn); log_mutex_exit(); @@ -2010,6 +1959,8 @@ loop: thread_name = "lock_wait_timeout_thread"; } else if (srv_buf_dump_thread_active) { thread_name = "buf_dump_thread"; + } else if (btr_defragment_thread_active) { + thread_name = "btr_defragment_thread"; } else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) { thread_name = "rollback of recovered transactions"; } else { @@ -2031,8 +1982,8 @@ wait_suspend_loop: switch (srv_get_active_thread_type()) { case SRV_NONE: - srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; if (!srv_n_fil_crypt_threads_started) { + srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; break; } os_event_set(fil_crypt_threads_event); @@ -2280,13 +2231,11 @@ log_refresh_stats(void) log_sys->last_printout_time = time(NULL); } -/********************************************************//** -Closes a log group. */ +/** Close a log group. +@param[in,out] group log group to close */ static void -log_group_close( -/*===========*/ - log_group_t* group) /* in,own: log group to close */ +log_group_close(log_group_t* group) { ulint i; @@ -2297,7 +2246,10 @@ log_group_close( ut_free(group->file_header_bufs_ptr); ut_free(group->file_header_bufs); ut_free(group->checkpoint_buf_ptr); - ut_free(group); + group->n_files = 0; + group->file_header_bufs_ptr = NULL; + group->file_header_bufs = NULL; + group->checkpoint_buf_ptr = NULL; } /********************************************************//** @@ -2306,19 +2258,7 @@ void log_group_close_all(void) /*=====================*/ { - log_group_t* group; - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - while (UT_LIST_GET_LEN(log_sys->log_groups) > 0) { - log_group_t* prev_group = group; - - group = UT_LIST_GET_NEXT(log_groups, group); - - UT_LIST_REMOVE(log_sys->log_groups, prev_group); - - log_group_close(prev_group); - } + log_group_close(&log_sys->log); } /********************************************************//** diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index f3a00e7b5e6..e48e185274a 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -717,7 +717,7 @@ loop: = (ulint) (source_offset / univ_page_size.physical()); fil_io(IORequestLogRead, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (source_offset % univ_page_size.physical()), len, buf, NULL); @@ -787,20 +787,13 @@ recv_synchronize_groups() const lsn_t start_lsn = ut_uint64_align_down(recovered_lsn, OS_FILE_LOG_BLOCK_SIZE); - log_group_read_log_seg(log_sys->buf, - UT_LIST_GET_FIRST(log_sys->log_groups), + log_group_read_log_seg(log_sys->buf, &log_sys->log, start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE); - ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1); + /* Update the fields in the group struct to correspond to + recovered_lsn */ - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; - group = UT_LIST_GET_NEXT(log_groups, group)) { - /* Update the fields in the group struct to correspond to - recovered_lsn */ - - log_group_set_fields(group, recovered_lsn); - } + log_group_set_fields(&log_sys->log, recovered_lsn); /* Copy the checkpoint info to the log; remember that we have incremented checkpoint_no by one, and the info will not be written @@ -831,17 +824,14 @@ recv_check_log_header_checksum( @return error code or DB_SUCCESS */ static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_find_max_checkpoint_0( - log_group_t** max_group, - ulint* max_field) +recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) { - log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); + log_group_t* group = &log_sys->log; ib_uint64_t max_no = 0; ib_uint64_t checkpoint_no; byte* buf = log_sys->checkpoint_buf; ut_ad(group->format == 0); - ut_ad(UT_LIST_GET_NEXT(log_groups, group) == NULL); /** Offset of the first checkpoint checksum */ static const uint CHECKSUM_1 = 288; @@ -852,6 +842,8 @@ recv_find_max_checkpoint_0( /** Least significant bits of the checkpoint offset */ static const uint OFFSET_LOW32 = 16; + *max_group = NULL; + for (ulint field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { log_group_header_read(group, field); @@ -883,9 +875,8 @@ recv_find_max_checkpoint_0( } DBUG_PRINT("ib_log", - ("checkpoint " UINT64PF " at " LSN_PF - " found in group " ULINTPF, - checkpoint_no, group->lsn, group->id)); + ("checkpoint " UINT64PF " at " LSN_PF " found", + checkpoint_no, group->lsn)); if (checkpoint_no >= max_no) { *max_group = group; @@ -916,7 +907,7 @@ dberr_t recv_log_format_0_recover(lsn_t lsn) { log_mutex_enter(); - log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); + log_group_t* group = &log_sys->log; const lsn_t source_offset = log_group_calc_lsn_offset(lsn, group); log_mutex_exit(); @@ -932,7 +923,7 @@ recv_log_format_0_recover(lsn_t lsn) REFMAN "upgrading.html"; fil_io(IORequestLogRead, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) ((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1)) % univ_page_size.physical()), @@ -968,14 +959,10 @@ recv_log_format_0_recover(lsn_t lsn) } /** Find the latest checkpoint in the log header. -@param[out] max_group log group, or NULL @param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 @return error code or DB_SUCCESS */ -static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_find_max_checkpoint( - log_group_t** max_group, - ulint* max_field) +recv_find_max_checkpoint(ulint* max_field) { log_group_t* group; ib_uint64_t max_no; @@ -983,101 +970,92 @@ recv_find_max_checkpoint( ulint field; byte* buf; - group = UT_LIST_GET_FIRST(log_sys->log_groups); + group = &log_sys->log; max_no = 0; - *max_group = NULL; *max_field = 0; buf = log_sys->checkpoint_buf; - while (group) { - group->state = LOG_GROUP_CORRUPTED; + group->state = LOG_GROUP_CORRUPTED; - log_group_header_read(group, 0); - /* Check the header page checksum. There was no - checksum in the first redo log format (version 0). */ - group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT); - if (group->format != 0 - && !recv_check_log_header_checksum(buf)) { - ib::error() << "Invalid redo log header checksum."; - return(DB_CORRUPTION); - } + log_group_header_read(group, 0); + /* Check the header page checksum. There was no + checksum in the first redo log format (version 0). */ + group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT); + if (group->format != 0 + && !recv_check_log_header_checksum(buf)) { + ib::error() << "Invalid redo log header checksum."; + return(DB_CORRUPTION); + } - switch (group->format) { - case 0: - return(recv_find_max_checkpoint_0( - max_group, max_field)); - case LOG_HEADER_FORMAT_CURRENT: - case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED: - break; - default: - /* Ensure that the string is NUL-terminated. */ - buf[LOG_HEADER_CREATOR_END] = 0; - ib::error() << "Unsupported redo log format." - " The redo log was created" - " with " << buf + LOG_HEADER_CREATOR << - ". Please follow the instructions at " - REFMAN "upgrading-downgrading.html"; - /* Do not issue a message about a possibility - to cleanly shut down the newer server version - and to remove the redo logs, because the - format of the system data structures may - radically change after MySQL 5.7. */ - return(DB_ERROR); - } + switch (group->format) { + case 0: + return(recv_find_max_checkpoint_0(&group, max_field)); + case LOG_HEADER_FORMAT_CURRENT: + case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED: + break; + default: + /* Ensure that the string is NUL-terminated. */ + buf[LOG_HEADER_CREATOR_END] = 0; + ib::error() << "Unsupported redo log format." + " The redo log was created" + " with " << buf + LOG_HEADER_CREATOR << + ". Please follow the instructions at " + REFMAN "upgrading-downgrading.html"; + /* Do not issue a message about a possibility + to cleanly shut down the newer server version + and to remove the redo logs, because the + format of the system data structures may + radically change after MySQL 5.7. */ + return(DB_ERROR); + } - for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; - field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { + for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; + field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { - log_group_header_read(group, field); + log_group_header_read(group, field); - const ulint crc32 = log_block_calc_checksum_crc32(buf); - const ulint cksum = log_block_get_checksum(buf); + const ulint crc32 = log_block_calc_checksum_crc32(buf); + const ulint cksum = log_block_get_checksum(buf); - if (crc32 != cksum) { - DBUG_PRINT("ib_log", - ("invalid checkpoint," - " group " ULINTPF " at " ULINTPF - ", checksum %x expected %x", - group->id, field, - (unsigned) cksum, - (unsigned) crc32)); - continue; - } + if (crc32 != cksum) { + DBUG_PRINT("ib_log", + ("invalid checkpoint," + " at " ULINTPF + ", checksum " ULINTPFx + " expected " ULINTPFx, + field, cksum, crc32)); + continue; + } - if (group->is_encrypted() - && !log_crypt_read_checkpoint_buf(buf)) { - ib::error() << "Reading checkpoint" - " encryption info failed."; - continue; - } + if (group->is_encrypted() + && !log_crypt_read_checkpoint_buf(buf)) { + ib::error() << "Reading checkpoint" + " encryption info failed."; + continue; + } + + group->state = LOG_GROUP_OK; - group->state = LOG_GROUP_OK; + group->lsn = mach_read_from_8( + buf + LOG_CHECKPOINT_LSN); + group->lsn_offset = mach_read_from_8( + buf + LOG_CHECKPOINT_OFFSET); + checkpoint_no = mach_read_from_8( + buf + LOG_CHECKPOINT_NO); - group->lsn = mach_read_from_8( - buf + LOG_CHECKPOINT_LSN); - group->lsn_offset = mach_read_from_8( - buf + LOG_CHECKPOINT_OFFSET); - checkpoint_no = mach_read_from_8( - buf + LOG_CHECKPOINT_NO); + DBUG_PRINT("ib_log", + ("checkpoint " UINT64PF " at " LSN_PF " found ", + checkpoint_no, group->lsn)); - DBUG_PRINT("ib_log", - ("checkpoint " UINT64PF " at " LSN_PF - " found in group " ULINTPF, - checkpoint_no, group->lsn, group->id)); - - if (checkpoint_no >= max_no) { - *max_group = group; - *max_field = field; - max_no = checkpoint_no; - } + if (checkpoint_no >= max_no) { + *max_field = field; + max_no = checkpoint_no; } - - group = UT_LIST_GET_NEXT(log_groups, group); } - if (*max_group == NULL) { + if (*max_field == 0) { /* Before 5.7.9, we could get here during database initialization if we created an ib_logfile0 file that was filled with zeroes, and were killed. After @@ -1798,8 +1776,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) while (recv) { end_lsn = recv->end_lsn; - ut_ad(end_lsn - <= UT_LIST_GET_FIRST(log_sys->log_groups)->scanned_lsn); + ut_ad(end_lsn <= log_sys->log.scanned_lsn); if (recv->len > RECV_DATA_BLOCK_SIZE) { /* We have to copy the record body to a separate @@ -2988,10 +2965,9 @@ recv_group_scan_log_recs( DBUG_RETURN(false); } - DBUG_PRINT("ib_log", ("%s " LSN_PF - " completed for log group " ULINTPF, + DBUG_PRINT("ib_log", ("%s " LSN_PF " completed", last_phase ? "rescan" : "scan", - group->scanned_lsn, group->id)); + group->scanned_lsn)); DBUG_RETURN(store_to_hash == STORE_NO); } @@ -3142,11 +3118,9 @@ recv_init_crash_recovery_spaces() of first system tablespace page @return error code or DB_SUCCESS */ dberr_t -recv_recovery_from_checkpoint_start( - lsn_t flush_lsn) +recv_recovery_from_checkpoint_start(lsn_t flush_lsn) { log_group_t* group; - log_group_t* max_cp_group; ulint max_cp_field; lsn_t checkpoint_lsn; bool rescan; @@ -3172,14 +3146,14 @@ recv_recovery_from_checkpoint_start( /* Look for the latest checkpoint from any of the log groups */ - err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field); + err = recv_find_max_checkpoint(&max_cp_field); if (err != DB_SUCCESS) { log_mutex_exit(); return(err); } - log_group_header_read(max_cp_group, max_cp_field); + log_group_header_read(&log_sys->log, max_cp_field); buf = log_sys->checkpoint_buf; @@ -3194,8 +3168,7 @@ recv_recovery_from_checkpoint_start( ut_ad(RECV_SCAN_SIZE <= log_sys->buf_size); - ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1); - group = UT_LIST_GET_FIRST(log_sys->log_groups); + group = &log_sys->log; const lsn_t end_lsn = mach_read_from_8( buf + LOG_CHECKPOINT_END_LSN); @@ -3483,11 +3456,8 @@ recv_reset_logs( log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; group = UT_LIST_GET_NEXT(log_groups, group)) { - group->lsn = log_sys->lsn; - group->lsn_offset = LOG_FILE_HDR_SIZE; - } + log_sys->log.lsn = log_sys->lsn; + log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE; log_sys->buf_next_to_write = 0; log_sys->write_lsn = log_sys->lsn; diff --git a/storage/innobase/mysql-test/storage_engine/disabled.def b/storage/innobase/mysql-test/storage_engine/disabled.def index bad10099bbf..1d67f9311ca 100644 --- a/storage/innobase/mysql-test/storage_engine/disabled.def +++ b/storage/innobase/mysql-test/storage_engine/disabled.def @@ -4,4 +4,6 @@ insert_high_prio : InnoDB does not use table-level locking insert_low_prio : InnoDB does not use table-level locking select_high_prio : InnoDB does not use table-level locking update_low_prio : InnoDB does not use table-level locking - +insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock +lock_concurrent : MDEV-12882 - Assertion failure +tbl_opt_index_dir : INDEX DIRECTORY option is not supported anymore diff --git a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff index be3709c5833..717d437b2d1 100644 --- a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff @@ -111,7 +111,7 @@ -test.t1 check error Corrupt +test.t1 check status OK SELECT a,b FROM t1; --ERROR HY000: Incorrect key file for table 't1'; try to repair it +-ERROR HY000: Index for table 't1' is corrupt; try to repair it -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file -INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/innobase/mysql-test/storage_engine/suite.opt b/storage/innobase/mysql-test/storage_engine/suite.opt index 034b58f2628..627becdbfb5 100644 --- a/storage/innobase/mysql-test/storage_engine/suite.opt +++ b/storage/innobase/mysql-test/storage_engine/suite.opt @@ -1 +1 @@ ---innodb --ignore-builtin-innodb --plugin-load=ha_innodb +--innodb diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff index e09e50b17ec..e09e50b17ec 100644 --- a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff index a6572ffa7f0..daa5fc67dec 100644 --- a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff +++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff @@ -1,10 +1,44 @@ ---- suite/storage_engine/tbl_opt_row_format.result 2012-06-24 23:55:19.539380000 +0400 -+++ suite/storage_engine/tbl_opt_row_format.reject 2012-07-15 19:26:02.235049157 +0400 -@@ -1,5 +1,7 @@ - DROP TABLE IF EXISTS t1; - CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED; -+Warnings: -+Warning 1478 <STORAGE_ENGINE>: assuming ROW_FORMAT=COMPACT. +--- ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:40:12.854181048 +0300 ++++ ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:49:06.578191030 +0300 +@@ -7,19 +7,39 @@ + `b` char(8) DEFAULT NULL + ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=FIXED; ++ERROR HY000: Table storage engine '<STORAGE_ENGINE>' does not support the create option 'ROW_TYPE' ++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# [ ALTER TABLE t1 ROW_FORMAT=FIXED ] ++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION. ++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=PAGE; ++ERROR HY000: Table storage engine '<STORAGE_ENGINE>' does not support the create option 'ROW_TYPE' ++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# [ ALTER TABLE t1 ROW_FORMAT=PAGE ] ++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION. ++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=COMPACT; + SHOW CREATE TABLE t1; + Table Create Table diff --git a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff deleted file mode 100644 index 154116b748c..00000000000 --- a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff +++ /dev/null @@ -1,712 +0,0 @@ ---- suite/storage_engine/type_spatial_indexes.result 2013-08-05 18:08:49.000000000 +0400 -+++ suite/storage_engine/type_spatial_indexes.reject 2013-08-05 18:25:24.000000000 +0400 -@@ -702,699 +702,15 @@ - DROP DATABASE IF EXISTS gis_ogs; - CREATE DATABASE gis_ogs; - CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --USE gis_ogs; --CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --aliases CHAR(64) <CUSTOM_COL_OPTIONS>, --num_lanes INT <CUSTOM_COL_OPTIONS>, --centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --num_lanes INT <CUSTOM_COL_OPTIONS>, --centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --position POINT NOT NULL, --footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --type CHAR(64) <CUSTOM_COL_OPTIONS>, --shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>, --name CHAR(64) <CUSTOM_COL_OPTIONS>, --boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>, --neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; --USE test; --SHOW FIELDS FROM gis_point; --Field Type Null Key Default Extra --fid int(11) YES NULL --g point NO MUL NULL --SHOW FIELDS FROM gis_line; --Field Type Null Key Default Extra --fid int(11) YES NULL --g linestring NO MUL NULL --SHOW FIELDS FROM gis_polygon; --Field Type Null Key Default Extra --fid int(11) YES NULL --g polygon NO MUL NULL --SHOW FIELDS FROM gis_multi_point; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multipoint NO MUL NULL --SHOW FIELDS FROM gis_multi_line; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multilinestring NO MUL NULL --SHOW FIELDS FROM gis_multi_polygon; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multipolygon NO MUL NULL --SHOW FIELDS FROM gis_geometrycollection; --Field Type Null Key Default Extra --fid int(11) YES NULL --g geometrycollection NO MUL NULL --SHOW FIELDS FROM gis_geometry; --Field Type Null Key Default Extra --fid int(11) YES NULL --g geometry NO NULL --INSERT INTO gis_point (fid,g) VALUES --(101, PointFromText('POINT(10 10)')), --(102, PointFromText('POINT(20 10)')), --(103, PointFromText('POINT(20 20)')), --(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); --INSERT INTO gis_line (fid,g) VALUES --(105, LineFromText('LINESTRING(0 0,0 10,10 0)')), --(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')), --(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10))))); --INSERT INTO gis_polygon (fid,g) VALUES --(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')), --(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')), --(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))))); --INSERT INTO gis_multi_point (fid,g) VALUES --(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')), --(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')), --(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10))))); --INSERT INTO gis_multi_line (fid,g) VALUES --(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')), --(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')), --(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))))); --INSERT INTO gis_multi_polygon (fid,g) VALUES --(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), --(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), --(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))))); --INSERT INTO gis_geometrycollection (fid,g) VALUES --(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')), --(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))), --(122, GeomFromText('GeometryCollection()')), --(123, GeomFromText('GeometryCollection EMPTY')); --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection; --SELECT fid, AsText(g) FROM gis_point; --fid AsText(g) --101 POINT(10 10) --102 POINT(20 10) --103 POINT(20 20) --104 POINT(10 20) --SELECT fid, AsText(g) FROM gis_line; --fid AsText(g) --105 LINESTRING(0 0,0 10,10 0) --106 LINESTRING(10 10,20 10,20 20,10 20,10 10) --107 LINESTRING(10 10,40 10) --SELECT fid, AsText(g) FROM gis_polygon; --fid AsText(g) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) --110 POLYGON((0 0,30 0,30 30,0 0)) --SELECT fid, AsText(g) FROM gis_multi_point; --fid AsText(g) --111 MULTIPOINT(0 0,10 10,10 20,20 20) --112 MULTIPOINT(1 1,11 11,11 21,21 21) --113 MULTIPOINT(3 6,4 10) --SELECT fid, AsText(g) FROM gis_multi_line; --fid AsText(g) --114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) --115 MULTILINESTRING((10 48,10 21,10 0)) --116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) --SELECT fid, AsText(g) FROM gis_multi_polygon; --fid AsText(g) --117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) --SELECT fid, AsText(g) FROM gis_geometrycollection; --fid AsText(g) --120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) --121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, AsText(g) FROM gis_geometry; --fid AsText(g) --101 POINT(10 10) --102 POINT(20 10) --103 POINT(20 20) --104 POINT(10 20) --105 LINESTRING(0 0,0 10,10 0) --106 LINESTRING(10 10,20 10,20 20,10 20,10 10) --107 LINESTRING(10 10,40 10) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) --110 POLYGON((0 0,30 0,30 30,0 0)) --111 MULTIPOINT(0 0,10 10,10 20,20 20) --112 MULTIPOINT(1 1,11 11,11 21,21 21) --113 MULTIPOINT(3 6,4 10) --114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) --115 MULTILINESTRING((10 48,10 21,10 0)) --116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) --117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) --120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) --121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, Dimension(g) FROM gis_geometry; --fid Dimension(g) --101 0 --102 0 --103 0 --104 0 --105 1 --106 1 --107 1 --108 2 --109 2 --110 2 --111 0 --112 0 --113 0 --114 1 --115 1 --116 1 --117 2 --118 2 --119 2 --120 1 --121 1 --122 0 --123 0 --SELECT fid, GeometryType(g) FROM gis_geometry; --fid GeometryType(g) --101 POINT --102 POINT --103 POINT --104 POINT --105 LINESTRING --106 LINESTRING --107 LINESTRING --108 POLYGON --109 POLYGON --110 POLYGON --111 MULTIPOINT --112 MULTIPOINT --113 MULTIPOINT --114 MULTILINESTRING --115 MULTILINESTRING --116 MULTILINESTRING --117 MULTIPOLYGON --118 MULTIPOLYGON --119 MULTIPOLYGON --120 GEOMETRYCOLLECTION --121 GEOMETRYCOLLECTION --122 GEOMETRYCOLLECTION --123 GEOMETRYCOLLECTION --SELECT fid, IsEmpty(g) FROM gis_geometry; --fid IsEmpty(g) --101 0 --102 0 --103 0 --104 0 --105 0 --106 0 --107 0 --108 0 --109 0 --110 0 --111 0 --112 0 --113 0 --114 0 --115 0 --116 0 --117 0 --118 0 --119 0 --120 0 --121 0 --122 0 --123 0 --SELECT fid, AsText(Envelope(g)) FROM gis_geometry; --fid AsText(Envelope(g)) --101 POLYGON((10 10,10 10,10 10,10 10,10 10)) --102 POLYGON((20 10,20 10,20 10,20 10,20 10)) --103 POLYGON((20 20,20 20,20 20,20 20,20 20)) --104 POLYGON((10 20,10 20,10 20,10 20,10 20)) --105 POLYGON((0 0,10 0,10 10,0 10,0 0)) --106 POLYGON((10 10,20 10,20 20,10 20,10 10)) --107 POLYGON((10 10,40 10,40 10,10 10,10 10)) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0)) --110 POLYGON((0 0,30 0,30 30,0 30,0 0)) --111 POLYGON((0 0,20 0,20 20,0 20,0 0)) --112 POLYGON((1 1,21 1,21 21,1 21,1 1)) --113 POLYGON((3 6,4 6,4 10,3 10,3 6)) --114 POLYGON((10 0,16 0,16 48,10 48,10 0)) --115 POLYGON((10 0,10 0,10 48,10 48,10 0)) --116 POLYGON((1 2,21 2,21 8,1 8,1 2)) --117 POLYGON((28 0,84 0,84 42,28 42,28 0)) --118 POLYGON((28 0,84 0,84 42,28 42,28 0)) --119 POLYGON((0 0,3 0,3 3,0 3,0 0)) --120 POLYGON((0 0,10 0,10 10,0 10,0 0)) --121 POLYGON((3 6,44 6,44 9,3 9,3 6)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, X(g) FROM gis_point; --fid X(g) --101 10 --102 20 --103 20 --104 10 --SELECT fid, Y(g) FROM gis_point; --fid Y(g) --101 10 --102 10 --103 20 --104 20 --SELECT fid, AsText(StartPoint(g)) FROM gis_line; --fid AsText(StartPoint(g)) --105 POINT(0 0) --106 POINT(10 10) --107 POINT(10 10) --SELECT fid, AsText(EndPoint(g)) FROM gis_line; --fid AsText(EndPoint(g)) --105 POINT(10 0) --106 POINT(10 10) --107 POINT(40 10) --SELECT fid, GLength(g) FROM gis_line; --fid GLength(g) --105 24.14213562373095 --106 40 --107 30 --SELECT fid, NumPoints(g) FROM gis_line; --fid NumPoints(g) --105 3 --106 5 --107 2 --SELECT fid, AsText(PointN(g, 2)) FROM gis_line; --fid AsText(PointN(g, 2)) --105 POINT(0 10) --106 POINT(20 10) --107 POINT(40 10) --SELECT fid, IsClosed(g) FROM gis_line; --fid IsClosed(g) --105 0 --106 1 --107 0 --SELECT fid, AsText(Centroid(g)) FROM gis_polygon; --fid AsText(Centroid(g)) --108 POINT(15 15) --109 POINT(25.416666666666668 25.416666666666668) --110 POINT(20 10) --SELECT fid, Area(g) FROM gis_polygon; --fid Area(g) --108 100 --109 2400 --110 450 --SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon; --fid AsText(ExteriorRing(g)) --108 LINESTRING(10 10,20 10,20 20,10 20,10 10) --109 LINESTRING(0 0,50 0,50 50,0 50,0 0) --110 LINESTRING(0 0,30 0,30 30,0 0) --SELECT fid, NumInteriorRings(g) FROM gis_polygon; --fid NumInteriorRings(g) --108 0 --109 1 --110 0 --SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon; --fid AsText(InteriorRingN(g, 1)) --108 NULL --109 LINESTRING(10 10,20 10,20 20,10 20,10 10) --110 NULL --SELECT fid, IsClosed(g) FROM gis_multi_line; --fid IsClosed(g) --114 0 --115 0 --116 0 --SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon; --fid AsText(Centroid(g)) --117 POINT(55.58852775304245 17.426536064113982) --118 POINT(55.58852775304245 17.426536064113982) --119 POINT(2 2) --SELECT fid, Area(g) FROM gis_multi_polygon; --fid Area(g) --117 1684.5 --118 1684.5 --119 4.5 --SELECT fid, NumGeometries(g) from gis_multi_point; --fid NumGeometries(g) --111 4 --112 4 --113 2 --SELECT fid, NumGeometries(g) from gis_multi_line; --fid NumGeometries(g) --114 2 --115 1 --116 2 --SELECT fid, NumGeometries(g) from gis_multi_polygon; --fid NumGeometries(g) --117 2 --118 2 --119 1 --SELECT fid, NumGeometries(g) from gis_geometrycollection; --fid NumGeometries(g) --120 2 --121 2 --122 0 --123 0 --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point; --fid AsText(GeometryN(g, 2)) --111 POINT(10 10) --112 POINT(11 11) --113 POINT(4 10) --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line; --fid AsText(GeometryN(g, 2)) --114 LINESTRING(16 0,16 23,16 48) --115 NULL --116 LINESTRING(2 5,5 8,21 7) --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon; --fid AsText(GeometryN(g, 2)) --117 POLYGON((59 18,67 18,67 13,59 13,59 18)) --118 POLYGON((59 18,67 18,67 13,59 13,59 18)) --119 NULL --SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection; --fid AsText(GeometryN(g, 2)) --120 LINESTRING(0 0,10 10) --121 LINESTRING(3 6,7 9) --122 NULL --123 NULL --SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection; --fid AsText(GeometryN(g, 1)) --120 POINT(0 0) --121 POINT(44 6) --122 NULL --123 NULL --SELECT g1.fid as first, g2.fid as second, --Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o, --Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t, --Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r --FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second; --first second w c o e d t i r --120 120 1 1 0 1 0 0 1 0 --120 121 0 0 1 0 0 0 1 0 --120 122 NULL NULL NULL NULL NULL NULL NULL NULL --120 123 NULL NULL NULL NULL NULL NULL NULL NULL --121 120 0 0 1 0 0 0 1 0 --121 121 1 1 0 1 0 0 1 0 --121 122 NULL NULL NULL NULL NULL NULL NULL NULL --121 123 NULL NULL NULL NULL NULL NULL NULL NULL --122 120 NULL NULL NULL NULL NULL NULL NULL NULL --122 121 NULL NULL NULL NULL NULL NULL NULL NULL --122 122 NULL NULL NULL NULL NULL NULL NULL NULL --122 123 NULL NULL NULL NULL NULL NULL NULL NULL --123 120 NULL NULL NULL NULL NULL NULL NULL NULL --123 121 NULL NULL NULL NULL NULL NULL NULL NULL --123 122 NULL NULL NULL NULL NULL NULL NULL NULL --123 123 NULL NULL NULL NULL NULL NULL NULL NULL --DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry; --USE gis_ogs; --# Lakes --INSERT INTO lakes (fid,name,shore) VALUES ( --101, 'BLUE LAKE', --PolyFromText( --'POLYGON( -- (52 18,66 23,73 9,48 6,52 18), -- (59 18,67 18,67 13,59 13,59 18) -- )', --101)); --# Road Segments --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2, --LineFromText( --'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4, --LineFromText( --'LINESTRING( 44 31, 56 34, 70 38 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2, --LineFromText( --'LINESTRING( 70 38, 72 48 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4, --LineFromText( --'LINESTRING( 70 38, 84 42 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL, --1, --LineFromText( --'LINESTRING( 28 26, 28 0 )',101)); --# DividedRoutes --INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4, --MLineFromText( --'MULTILINESTRING((10 48,10 21,10 0), -- (16 0,16 23,16 48))', 101)); --# Forests --INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest', --MPolyFromText( --'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26), -- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', --101)); --# Bridges --INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText( --'POINT( 44 31 )', 101)); --# Streams --INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream', --LineFromText( --'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101)); --INSERT INTO streams (fid,name,centerline) VALUES(112, NULL, --LineFromText( --'LINESTRING( 76 0, 78 4, 73 9 )', 101)); --# Buildings --INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street', --PointFromText( --'POINT( 52 30 )', 101), --PolyFromText( --'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101)); --INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street', --PointFromText( --'POINT( 64 33 )', 101), --PolyFromText( --'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101)); --# Ponds --INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond', --MPolyFromText( --'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), -- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101)); --# Named Places --INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton', --PolyFromText( --'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101)); --INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island', --PolyFromText( --'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101)); --# Map Neatlines --INSERT INTO map_neatlines (fid,neatline) VALUES(115, --PolyFromText( --'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101)); --SELECT Dimension(shore) --FROM lakes --WHERE name = 'Blue Lake'; --Dimension(shore) --2 --SELECT GeometryType(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --GeometryType(centerlines) --MULTILINESTRING --SELECT AsText(boundary) --FROM named_places --WHERE name = 'Goose Island'; --AsText(boundary) --POLYGON((67 13,67 18,59 18,59 13,67 13)) --SELECT AsText(PolyFromWKB(AsBinary(boundary),101)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(PolyFromWKB(AsBinary(boundary),101)) --POLYGON((67 13,67 18,59 18,59 13,67 13)) --SELECT SRID(boundary) --FROM named_places --WHERE name = 'Goose Island'; --SRID(boundary) --101 --SELECT IsEmpty(centerline) --FROM road_segments --WHERE name = 'Route 5' --AND aliases = 'Main Street'; --IsEmpty(centerline) --0 --SELECT AsText(Envelope(boundary)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(Envelope(boundary)) --POLYGON((59 13,67 13,67 18,59 18,59 13)) --SELECT X(position) --FROM bridges --WHERE name = 'Cam Bridge'; --X(position) --44 --SELECT Y(position) --FROM bridges --WHERE name = 'Cam Bridge'; --Y(position) --31 --SELECT AsText(StartPoint(centerline)) --FROM road_segments --WHERE fid = 102; --AsText(StartPoint(centerline)) --POINT(0 18) --SELECT AsText(EndPoint(centerline)) --FROM road_segments --WHERE fid = 102; --AsText(EndPoint(centerline)) --POINT(44 31) --SELECT GLength(centerline) --FROM road_segments --WHERE fid = 106; --GLength(centerline) --26 --SELECT NumPoints(centerline) --FROM road_segments --WHERE fid = 102; --NumPoints(centerline) --5 --SELECT AsText(PointN(centerline, 1)) --FROM road_segments --WHERE fid = 102; --AsText(PointN(centerline, 1)) --POINT(0 18) --SELECT AsText(Centroid(boundary)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(Centroid(boundary)) --POINT(63 15.5) --SELECT Area(boundary) --FROM named_places --WHERE name = 'Goose Island'; --Area(boundary) --40 --SELECT AsText(ExteriorRing(shore)) --FROM lakes --WHERE name = 'Blue Lake'; --AsText(ExteriorRing(shore)) --LINESTRING(52 18,66 23,73 9,48 6,52 18) --SELECT NumInteriorRings(shore) --FROM lakes --WHERE name = 'Blue Lake'; --NumInteriorRings(shore) --1 --SELECT AsText(InteriorRingN(shore, 1)) --FROM lakes --WHERE name = 'Blue Lake'; --AsText(InteriorRingN(shore, 1)) --LINESTRING(59 18,67 18,67 13,59 13,59 18) --SELECT NumGeometries(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --NumGeometries(centerlines) --2 --SELECT AsText(GeometryN(centerlines, 2)) --FROM divided_routes --WHERE name = 'Route 75'; --AsText(GeometryN(centerlines, 2)) --LINESTRING(16 0,16 23,16 48) --SELECT IsClosed(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --IsClosed(centerlines) --0 --SELECT GLength(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --GLength(centerlines) --96 --SELECT AsText(Centroid(shores)) --FROM ponds --WHERE fid = 120; --AsText(Centroid(shores)) --POINT(25 42) --SELECT Area(shores) --FROM ponds --WHERE fid = 120; --Area(shores) --8 --SELECT ST_Equals(boundary, --PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) --FROM named_places --WHERE name = 'Goose Island'; --ST_Equals(boundary, --PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) --1 --SELECT ST_Disjoint(centerlines, boundary) --FROM divided_routes, named_places --WHERE divided_routes.name = 'Route 75' --AND named_places.name = 'Ashton'; --ST_Disjoint(centerlines, boundary) --1 --SELECT ST_Touches(centerline, shore) --FROM streams, lakes --WHERE streams.name = 'Cam Stream' --AND lakes.name = 'Blue Lake'; --ST_Touches(centerline, shore) --1 --SELECT Crosses(road_segments.centerline, divided_routes.centerlines) --FROM road_segments, divided_routes --WHERE road_segments.fid = 102 --AND divided_routes.name = 'Route 75'; --Crosses(road_segments.centerline, divided_routes.centerlines) --1 --SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines) --FROM road_segments, divided_routes --WHERE road_segments.fid = 102 --AND divided_routes.name = 'Route 75'; --ST_Intersects(road_segments.centerline, divided_routes.centerlines) --1 --SELECT ST_Contains(forests.boundary, named_places.boundary) --FROM forests, named_places --WHERE forests.name = 'Green Forest' --AND named_places.name = 'Ashton'; --ST_Contains(forests.boundary, named_places.boundary) --0 --SELECT ST_Distance(position, boundary) --FROM bridges, named_places --WHERE bridges.name = 'Cam Bridge' --AND named_places.name = 'Ashton'; --ST_Distance(position, boundary) --12 --SELECT AsText(ST_Difference(named_places.boundary, forests.boundary)) --FROM named_places, forests --WHERE named_places.name = 'Ashton' --AND forests.name = 'Green Forest'; --AsText(ST_Difference(named_places.boundary, forests.boundary)) --POLYGON((56 34,62 48,84 48,84 42,56 34)) --SELECT AsText(ST_Union(shore, boundary)) --FROM lakes, named_places --WHERE lakes.name = 'Blue Lake' --AND named_places.name = 'Goose Island'; --AsText(ST_Union(shore, boundary)) --POLYGON((48 6,52 18,66 23,73 9,48 6)) --SELECT AsText(ST_SymDifference(shore, boundary)) --FROM lakes, named_places --WHERE lakes.name = 'Blue Lake' --AND named_places.name = 'Ashton'; --AsText(ST_SymDifference(shore, boundary)) --MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30))) --SELECT count(*) --FROM buildings, bridges --WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1; --count(*) --1 -+ERROR HY000: The storage engine <STORAGE_ENGINE> doesn't support SPATIAL indexes -+# ERROR: Statement ended with errno 1464, errname ER_TABLE_CANT_HANDLE_SPKEYS (expected to succeed) -+# ------------ UNEXPECTED RESULT ------------ -+# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=InnoDB /*!*/ /*Custom table options*/ ] -+# The statement|command finished with ER_TABLE_CANT_HANDLE_SPKEYS. -+# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. -+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. -+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. -+# Also, this problem may cause a chain effect (more errors of different kinds in the test). -+# ------------------------------------------- - DROP DATABASE gis_ogs; - USE test; diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index e3dd592052d..94021a5b0f4 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -226,7 +226,7 @@ struct Slot { os_offset_t offset; /** file where to read or write */ - os_file_t file; + pfs_os_file_t file; /** file name or path */ const char* name; @@ -319,7 +319,7 @@ public: IORequest& type, fil_node_t* m1, void* m2, - os_file_t file, + pfs_os_file_t file, const char* name, void* buf, os_offset_t offset, @@ -2248,7 +2248,7 @@ AIO::is_linux_native_aio_supported() strcpy(name + dirnamelen, "ib_logfile0"); - fd = ::open(name, O_RDONLY); + fd = open(name, O_RDONLY); if (fd == -1) { @@ -2578,7 +2578,7 @@ A simple function to open or create a file. @param[out] success true if succeed, false if error @return handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_func( const char* name, ulint create_mode, @@ -2586,7 +2586,7 @@ os_file_create_simple_func( bool read_only, bool* success) { - os_file_t file; + pfs_os_file_t file; *success = false; @@ -2656,7 +2656,7 @@ os_file_create_simple_func( bool retry; do { - file = ::open(name, create_flag, os_innodb_umask); + file = open(name, create_flag, os_innodb_umask); if (file == -1) { *success = false; @@ -2871,7 +2871,7 @@ Opens an existing file or creates a new. @param[in] success true if succeeded @return handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_func( const char* name, ulint create_mode, @@ -2958,7 +2958,7 @@ os_file_create_func( bool retry; do { - file = ::open(name, create_flag, os_innodb_umask); + file = open(name, create_flag, os_innodb_umask); if (file == -1) { const char* operation; @@ -3037,7 +3037,7 @@ A simple function to open or create a file. @param[out] success true if succeeded @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( const char* name, ulint create_mode, @@ -3092,7 +3092,7 @@ os_file_create_simple_no_error_handling_func( return(OS_FILE_CLOSED); } - file = ::open(name, create_flag, os_innodb_umask); + file = open(name, create_flag, os_innodb_umask); *success = (file != -1); @@ -3324,8 +3324,8 @@ os_file_get_status_posix( && (stat_info->type == OS_FILE_TYPE_FILE || stat_info->type == OS_FILE_TYPE_BLOCK)) { - int access = !read_only ? O_RDWR : O_RDONLY; - int fh = ::open(path, access, os_innodb_umask); + int fh = open(path, read_only ? O_RDONLY : O_RDWR, + os_innodb_umask); if (fh == -1) { stat_info->rw_perm = false; @@ -3788,7 +3788,7 @@ A simple function to open or create a file. @param[out] success true if succeed, false if error @return handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_func( const char* name, ulint create_mode, @@ -4105,7 +4105,7 @@ Opens an existing file or creates a new. @param[in] success true if succeeded @return handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_func( const char* name, ulint create_mode, @@ -4320,7 +4320,7 @@ A simple function to open or create a file. @param[out] success true if succeeded @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( const char* name, ulint create_mode, @@ -6163,7 +6163,7 @@ AIO::reserve_slot( IORequest& type, fil_node_t* m1, void* m2, - os_file_t file, + pfs_os_file_t file, const char* name, void* buf, os_offset_t offset, @@ -6555,10 +6555,11 @@ os_aio_windows_handler( /* This read/write does not go through os_file_read and os_file_write APIs, need to register with performance schema explicitly here. */ + PSI_file_locker_state state; struct PSI_file_locker* locker = NULL; register_pfs_file_io_begin( - locker, slot->file, slot->len, + &state, locker, slot->file, slot->len, slot->type.is_write() ? PSI_FILE_WRITE : PSI_FILE_READ, __FILE__, __LINE__); #endif /* UNIV_PFS_IO */ @@ -6616,7 +6617,7 @@ os_aio_func( IORequest& type, ulint mode, const char* name, - os_file_t file, + pfs_os_file_t file, void* buf, os_offset_t offset, ulint n, @@ -6672,7 +6673,6 @@ try_again: ret = ReadFile( file, slot->ptr, slot->len, &slot->n_bytes, &slot->control); - #elif defined(LINUX_NATIVE_AIO) if (!array->linux_dispatch(slot)) { goto err_exit; @@ -6691,7 +6691,6 @@ try_again: ret = WriteFile( file, slot->ptr, slot->len, &slot->n_bytes, &slot->control); - #elif defined(LINUX_NATIVE_AIO) if (!array->linux_dispatch(slot)) { goto err_exit; @@ -7551,7 +7550,7 @@ AIO::to_file(FILE* file) const fprintf(file, "%s IO for %s (offset=" UINT64PF - ", size=%lu)\n", + ", size=" ULINTPF ")\n", slot.type.is_read() ? "read" : "write", slot.name, slot.offset, slot.len); } diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc index 72199b4cf0b..0462c62f4b2 100644 --- a/storage/innobase/os/os0thread.cc +++ b/storage/innobase/os/os0thread.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -31,14 +32,9 @@ Created 9/8/1995 Heikki Tuuri #include "os0event.h" #include <map> -/** Mutex that tracks the thread count. Used by innorwlocktest.cc -FIXME: the unit tests should use APIs */ -SysMutex thread_mutex; - /** Number of threads active. */ ulint os_thread_count; - /***************************************************************//** Compares two thread ids for equality. @return TRUE if equal */ @@ -127,11 +123,7 @@ os_thread_create_func( CloseHandle(handle); - mutex_enter(&thread_mutex); - - os_thread_count++; - - mutex_exit(&thread_mutex); + my_atomic_addlint(&os_thread_count, 1); return((os_thread_t)new_thread_id); #else /* _WIN32 else */ @@ -140,9 +132,7 @@ os_thread_create_func( pthread_attr_init(&attr); - mutex_enter(&thread_mutex); - ++os_thread_count; - mutex_exit(&thread_mutex); + my_atomic_addlint(&os_thread_count, 1); int ret = pthread_create(&new_thread_id, &attr, func, arg); @@ -197,16 +187,11 @@ os_thread_exit( pfs_delete_thread(); #endif - mutex_enter(&thread_mutex); - - os_thread_count--; + my_atomic_addlint(&os_thread_count, -1); #ifdef _WIN32 - mutex_exit(&thread_mutex); - ExitThread(0); #else - mutex_exit(&thread_mutex); if (detach) { pthread_detach(pthread_self()); } @@ -260,10 +245,6 @@ bool os_thread_active() /*==============*/ { - mutex_enter(&thread_mutex); - - bool active = (os_thread_count > 0); - /* All the threads have exited or are just exiting; NOTE that the threads may not have completed their exit yet. Should we use pthread_join() to make sure @@ -272,30 +253,5 @@ os_thread_active() os_thread_exit(). Now we just sleep 0.1 seconds and hope that is enough! */ - mutex_exit(&thread_mutex); - - return(active); -} - -/** -Initializes OS thread management data structures. */ -void -os_thread_init() -/*============*/ -{ - mutex_create(LATCH_ID_THREAD_MUTEX, &thread_mutex); -} - -/** -Frees OS thread management data structures. */ -void -os_thread_free() -/*============*/ -{ - if (os_thread_count != 0) { - ib::warn() << "Some (" << os_thread_count << ") threads are" - " still active"; - } - - mutex_destroy(&thread_mutex); + return(my_atomic_loadlint(&os_thread_count) > 0); } diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc index fa0a429c283..5a3af9dfaeb 100644 --- a/storage/innobase/que/que0que.cc +++ b/storage/innobase/que/que0que.cc @@ -417,7 +417,8 @@ que_graph_free_recursive( } DBUG_PRINT("que_graph_free_recursive", - ("node: %p, type: %lu", node, que_node_get_type(node))); + ("node: %p, type: " ULINTPF, node, + que_node_get_type(node))); switch (que_node_get_type(node)) { diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index 26bb12e8a03..c78df489179 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -1354,8 +1354,10 @@ rec_convert_dtuple_to_rec_comp( } } - memcpy(end, dfield_get_data(field), len); - end += len; + if (len) { + memcpy(end, dfield_get_data(field), len); + end += len; + } } if (!num_v) { diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index fca6ae5a1bf..321b55e9894 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -94,7 +94,6 @@ row_merge_create_fts_sort_index( new_index->n_def = FTS_NUM_FIELDS_SORT; new_index->cached = TRUE; new_index->parser = index->parser; - new_index->is_ngram = index->is_ngram; idx_field = dict_index_get_nth_field(index, 0); charset = fts_index_get_charset(index); @@ -515,7 +514,6 @@ row_merge_fts_doc_tokenize( ulint data_size[FTS_NUM_AUX_INDEX]; ulint n_tuple[FTS_NUM_AUX_INDEX]; st_mysql_ftparser* parser; - bool is_ngram; t_str.f_n_char = 0; t_ctx->buf_used = 0; @@ -524,7 +522,6 @@ row_merge_fts_doc_tokenize( memset(data_size, 0, FTS_NUM_AUX_INDEX * sizeof(ulint)); parser = sort_buf[0]->index->parser; - is_ngram = sort_buf[0]->index->is_ngram; /* Tokenize the data and add each word string, its corresponding doc id and position to sort buffer */ @@ -570,7 +567,7 @@ row_merge_fts_doc_tokenize( /* Ignore string whose character number is less than "fts_min_token_size" or more than "fts_max_token_size" */ - if (!fts_check_token(&str, NULL, is_ngram, NULL)) { + if (!fts_check_token(&str, NULL, NULL)) { if (parser != NULL) { UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token); ut_free(fts_token); @@ -589,7 +586,7 @@ row_merge_fts_doc_tokenize( /* if "cached_stopword" is defined, ignore words in the stopword list */ - if (!fts_check_token(&str, t_ctx->cached_stopword, is_ngram, + if (!fts_check_token(&str, t_ctx->cached_stopword, doc->charset)) { if (parser != NULL) { UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token); diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index a2773baa34e..b2ac33a209b 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -1926,6 +1926,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( block->page.id.page_no(), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: @@ -3001,21 +3002,19 @@ row_import_read_v1( cfg->m_n_cols = mach_read_from_4(ptr); if (!dict_tf_is_valid(cfg->m_flags)) { + ib_errf(thd, IB_LOG_LEVEL_ERROR, + ER_TABLE_SCHEMA_MISMATCH, + "Invalid table flags: " ULINTPF, cfg->m_flags); return(DB_CORRUPTION); + } - } else if ((err = row_import_read_columns(file, thd, cfg)) - != DB_SUCCESS) { - - return(err); - - } else if ((err = row_import_read_indexes(file, thd, cfg)) - != DB_SUCCESS) { + err = row_import_read_columns(file, thd, cfg); - return(err); + if (err == DB_SUCCESS) { + err = row_import_read_indexes(file, thd, cfg); } - ut_a(err == DB_SUCCESS); return(err); } @@ -3555,7 +3554,7 @@ row_import_for_mysql( err = fil_ibd_open( true, true, FIL_TYPE_IMPORT, table->space, - fsp_flags, table->name.m_name, filepath, table); + fsp_flags, table->name.m_name, filepath); DBUG_EXECUTE_IF("ib_import_open_tablespace_failure", err = DB_TABLESPACE_NOT_FOUND;); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index b7133a3e1ee..87c2b61a3cb 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2277,18 +2277,14 @@ for a clustered index! @retval DB_SUCCESS if no error @retval DB_DUPLICATE_KEY if error, @retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate -record -@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found -in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */ +record */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust( -/*=============================*/ ulint flags, /*!< in: undo logging and locking flags */ btr_cur_t* cursor, /*!< in: B-tree cursor */ const dtuple_t* entry, /*!< in: entry to insert */ - que_thr_t* thr, /*!< in: query thread */ - mtr_t* mtr) /*!< in: mtr */ + que_thr_t* thr) /*!< in: query thread */ { dberr_t err; rec_t* rec; @@ -2299,8 +2295,6 @@ row_ins_duplicate_error_in_clust( ulint* offsets = offsets_; rec_offs_init(offsets_); - UT_NOT_USED(mtr); - ut_ad(dict_index_is_clust(cursor->index)); /* NOTE: For unique non-clustered indexes there may be any number @@ -2662,7 +2656,7 @@ row_ins_clust_index_entry_low( DB_LOCK_WAIT */ err = row_ins_duplicate_error_in_clust( - flags, cursor, entry, thr, &mtr); + flags, cursor, entry, thr); } if (err != DB_SUCCESS) { diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 31c4d6cf77f..f561a04e413 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -354,11 +354,10 @@ row_log_online_op( b += size; if (mrec_size >= avail_size) { - dberr_t err; - IORequest request(IORequest::WRITE); const os_offset_t byte_offset = (os_offset_t) log->tail.blocks * srv_sort_buf_size; + IORequest request(IORequest::WRITE); if (byte_offset + srv_sort_buf_size >= srv_online_max_size) { goto write_failed; @@ -379,13 +378,12 @@ row_log_online_op( goto err_exit; } - err = os_file_write( - request, - "(modification log)", - OS_FILE_FROM_FD(log->fd), - log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; - if (err != DB_SUCCESS) { + if (!os_file_write_int_fd( + request, + "(modification log)", + log->fd, + log->tail.block, byte_offset, srv_sort_buf_size)) { write_failed: /* We set the flag directly instead of invoking dict_set_corrupted_index_cache_only(index) here, @@ -472,11 +470,10 @@ row_log_table_close_func( ut_ad(mutex_own(&log->mutex)); if (size >= avail) { - dberr_t err; - IORequest request(IORequest::WRITE); const os_offset_t byte_offset = (os_offset_t) log->tail.blocks * srv_sort_buf_size; + IORequest request(IORequest::WRITE); if (byte_offset + srv_sort_buf_size >= srv_online_max_size) { goto write_failed; @@ -497,13 +494,12 @@ row_log_table_close_func( goto err_exit; } - err = os_file_write( - request, - "(modification log)", - OS_FILE_FROM_FD(log->fd), - log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; - if (err != DB_SUCCESS) { + if (!os_file_write_int_fd( + request, + "(modification log)", + log->fd, + log->tail.block, byte_offset, srv_sort_buf_size)) { write_failed: log->error = DB_ONLINE_LOG_TOO_BIG; } @@ -2038,6 +2034,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; @@ -2888,16 +2885,14 @@ all_done: goto func_exit; } - IORequest request; + IORequest request(IORequest::READ); - dberr_t err = os_file_read_no_error_handling( - request, - OS_FILE_FROM_FD(index->online_log->fd), - index->online_log->head.block, ofs, - srv_sort_buf_size, - NULL); - if (err != DB_SUCCESS) { + if (!os_file_read_no_error_handling_int_fd( + request, + index->online_log->fd, + index->online_log->head.block, ofs, + srv_sort_buf_size)) { ib::error() << "Unable to read temporary file" " for table " << index->table_name; @@ -3707,10 +3702,10 @@ all_done: goto func_exit; } } else { - os_offset_t ofs; - - ofs = (os_offset_t) index->online_log->head.blocks + os_offset_t ofs = static_cast<os_offset_t>( + index->online_log->head.blocks) * srv_sort_buf_size; + IORequest request(IORequest::READ); ut_ad(has_index_lock); has_index_lock = false; @@ -3723,16 +3718,11 @@ all_done: goto func_exit; } - IORequest request; - - dberr_t err = os_file_read_no_error_handling( - request, - OS_FILE_FROM_FD(index->online_log->fd), - index->online_log->head.block, ofs, - srv_sort_buf_size, - NULL); - - if (err != DB_SUCCESS) { + if (!os_file_read_no_error_handling_int_fd( + request, + index->online_log->fd, + index->online_log->head.block, ofs, + srv_sort_buf_size)) { ib::error() << "Unable to read temporary file" " for index " << index->name; diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 62cab870e9e..ea332adfdc3 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -1155,10 +1155,9 @@ row_merge_heap_create( return(heap); } -/********************************************************************//** -Read a merge block from the file system. -@return TRUE if request was successful, FALSE if fail */ -ibool +/** Read a merge block from the file system. +@return whether the request was successful */ +bool row_merge_read( /*===========*/ int fd, /*!< in: file descriptor */ @@ -1176,11 +1175,9 @@ row_merge_read( DBUG_LOG("ib_merge_sort", "fd=" << fd << " ofs=" << ofs); DBUG_EXECUTE_IF("row_merge_read_failure", DBUG_RETURN(FALSE);); - IORequest request; - - dberr_t err = os_file_read_no_error_handling( - request, - OS_FILE_FROM_FD(fd), buf, ofs, srv_sort_buf_size, NULL); + IORequest request(IORequest::READ); + const bool success = os_file_read_no_error_handling_int_fd( + request, fd, buf, ofs, srv_sort_buf_size); /* For encrypted tables, decrypt data after reading and copy data */ if (crypt_data && crypt_buf) { @@ -1194,11 +1191,11 @@ row_merge_read( posix_fadvise(fd, ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED); #endif /* POSIX_FADV_DONTNEED */ - if (err != DB_SUCCESS) { + if (!success) { ib::error() << "Failed to read merge block at " << ofs; } - DBUG_RETURN(err == DB_SUCCESS); + DBUG_RETURN(success); } /********************************************************************//** @@ -1223,7 +1220,6 @@ row_merge_write( DBUG_LOG("ib_merge_sort", "fd=" << fd << " ofs=" << ofs); DBUG_EXECUTE_IF("row_merge_write_failure", DBUG_RETURN(FALSE);); - IORequest request(IORequest::WRITE); if (crypt_data && crypt_buf) { row_merge_encrypt_buf(crypt_data, offset, space, (const byte *)buf, (byte *)crypt_buf); out_buf = crypt_buf; @@ -1232,9 +1228,9 @@ row_merge_write( mach_write_to_4((byte *)out_buf, 0); } - dberr_t err = os_file_write( - request, - "(merge)", OS_FILE_FROM_FD(fd), out_buf, ofs, buf_len); + IORequest request(IORequest::WRITE); + const bool success = os_file_write_int_fd( + request, "(merge)", fd, out_buf, ofs, buf_len); #ifdef POSIX_FADV_DONTNEED /* The block will be needed on the next merge pass, @@ -1242,7 +1238,7 @@ row_merge_write( posix_fadvise(fd, ofs, buf_len, POSIX_FADV_DONTNEED); #endif /* POSIX_FADV_DONTNEED */ - DBUG_RETURN(err == DB_SUCCESS); + DBUG_RETURN(success); } /********************************************************************//** @@ -4030,7 +4026,7 @@ row_merge_drop_temp_indexes(void) /** Create temporary merge files in the given paramater path, and if UNIV_PFS_IO defined, register the file descriptor with Performance Schema. -@param[in] path location for creating temporary merge files. +@param[in] path location for creating temporary merge files, or NULL @return File descriptor */ int row_merge_file_create_low( @@ -4041,16 +4037,23 @@ row_merge_file_create_low( /* This temp file open does not go through normal file APIs, add instrumentation to register with performance schema */ - struct PSI_file_locker* locker = NULL; + struct PSI_file_locker* locker; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, innodb_temp_file_key, - PSI_FILE_OPEN, - "Innodb Merge Temp File", - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_name_locker)( + &state, innodb_temp_file_key, PSI_FILE_OPEN, + "Innodb Merge Temp File", &locker); + if (locker != NULL) { + PSI_FILE_CALL(start_file_open_wait)(locker, + __FILE__, + __LINE__); + } #endif fd = innobase_mysql_tmpfile(path); #ifdef UNIV_PFS_IO - register_pfs_file_open_end(locker, fd); + if (locker != NULL) { + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( + locker, fd); + } #endif if (fd < 0) { @@ -4063,7 +4066,7 @@ row_merge_file_create_low( /** Create a merge file in the given location. @param[out] merge_file merge file structure -@param[in] path location for creating temporary file +@param[in] path location for creating temporary file, or NULL @return file descriptor, or -1 on failure */ int row_merge_file_create( @@ -4094,15 +4097,20 @@ row_merge_file_destroy_low( #ifdef UNIV_PFS_IO struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_io_begin(&state, locker, - fd, 0, PSI_FILE_CLOSE, - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, fd, PSI_FILE_CLOSE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, 0, __FILE__, __LINE__); + } #endif if (fd >= 0) { close(fd); } #ifdef UNIV_PFS_IO - register_pfs_file_io_end(locker, 0); + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, 0); + } #endif } /*********************************************************************//** @@ -4373,7 +4381,8 @@ row_merge_rename_tables_dict( @param[in,out] index index @param[in] add_v new virtual columns added along with add index call @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result)) +static dberr_t row_merge_create_index_graph( trx_t* trx, @@ -4495,7 +4504,6 @@ row_merge_create_index( ut_a(index); index->parser = index_def->parser; - index->is_ngram = index_def->is_ngram; index->has_new_v_col = has_new_v_col; /* Note the id of the transaction that created this @@ -4729,6 +4737,7 @@ row_merge_build_indexes( for (i = 0; i < n_indexes; i++) { merge_files[i].fd = -1; + merge_files[i].offset = 0; } total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 57ba35a57f6..6d314f09bb0 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1197,58 +1197,6 @@ row_get_prebuilt_insert_row( } /*********************************************************************//** -Updates the table modification counter and calculates new estimates -for table and index statistics if necessary. */ -UNIV_INLINE -void -row_update_statistics_if_needed( -/*============================*/ - dict_table_t* table) /*!< in: table */ -{ - ib_uint64_t counter; - ib_uint64_t n_rows; - - if (!table->stat_initialized) { - DBUG_EXECUTE_IF( - "test_upd_stats_if_needed_not_inited", - fprintf(stderr, "test_upd_stats_if_needed_not_inited" - " was executed\n"); - ); - return; - } - - counter = table->stat_modified_counter++; - n_rows = dict_table_get_n_rows(table); - - if (dict_stats_is_persistent_enabled(table)) { - if (counter > n_rows / 10 /* 10% */ - && dict_stats_auto_recalc_is_enabled(table)) { - - dict_stats_recalc_pool_add(table); - table->stat_modified_counter = 0; - } - return; - } - - /* Calculate new statistics if 1 / 16 of table has been modified - since the last time a statistics batch was run. - We calculate statistics at most every 16th round, since we may have - a counter table which is very small and updated very often. */ - ib_uint64_t threshold= 16 + n_rows / 16; /* 6.25% */ - - if (srv_stats_modified_counter) { - threshold= ut_min((ib_uint64_t)srv_stats_modified_counter, threshold); - } - - if (counter > threshold) { - - ut_ad(!mutex_own(&dict_sys->mutex)); - /* this will reset table->stat_modified_counter to 0 */ - dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT); - } -} - -/*********************************************************************//** Sets an AUTO_INC type lock on the table mentioned in prebuilt. The AUTO_INC lock gives exclusive access to the auto-inc counter of the table. The lock is reserved only for the duration of an SQL statement. @@ -1649,7 +1597,7 @@ error_exit: ut_memcpy(prebuilt->row_id, node->row_id_buf, DATA_ROW_ID_LEN); } - row_update_statistics_if_needed(table); + dict_stats_update_if_needed(table); trx->op_info = ""; if (blob_heap != NULL) { @@ -1895,6 +1843,7 @@ row_update_for_mysql_using_upd_graph( ut_ad(trx); ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED); ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED); + ut_ad(table->stat_initialized); UT_NOT_USED(mysql_rec); if (!table->is_readable()) { @@ -1931,6 +1880,8 @@ row_update_for_mysql_using_upd_graph( } node = prebuilt->upd_node; + const bool is_delete = node->is_delete; + ut_ad(node->table == table); if (node->cascade_heap) { mem_heap_empty(node->cascade_heap); @@ -2101,8 +2052,11 @@ run_again: thr->fk_cascade_depth = 0; - /* Update the statistics only after completing all cascaded - operations */ + /* Update the statistics of each involved table + only after completing all operations, including + FOREIGN KEY...ON...CASCADE|SET NULL. */ + bool update_statistics; + for (upd_cascade_t::iterator i = processed_cascades->begin(); i != processed_cascades->end(); ++i) { @@ -2116,16 +2070,25 @@ run_again: than protecting the following code with a latch. */ dict_table_n_rows_dec(node->table); + update_statistics = !srv_stats_include_delete_marked; srv_stats.n_rows_deleted.inc(size_t(trx->id)); } else { + update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); srv_stats.n_rows_updated.inc(size_t(trx->id)); } - row_update_statistics_if_needed(node->table); + if (update_statistics) { + dict_stats_update_if_needed(node->table); + } else { + /* Always update the table modification counter. */ + node->table->stat_modified_counter++; + } + que_graph_free_recursive(node); } - if (node->is_delete) { + if (is_delete) { /* Not protected by dict_table_stats_lock() for performance reasons, we would rather get garbage in stat_n_rows (which is just an estimate anyway) than protecting the following code @@ -2137,25 +2100,24 @@ run_again: } else { srv_stats.n_rows_deleted.inc(size_t(trx->id)); } + + update_statistics = !srv_stats_include_delete_marked; } else { if (table->is_system_db) { srv_stats.n_system_rows_updated.inc(size_t(trx->id)); } else { srv_stats.n_rows_updated.inc(size_t(trx->id)); } + + update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); } - /* We update table statistics only if it is a DELETE or UPDATE - that changes indexed columns, UPDATEs that change only non-indexed - columns would not affect statistics. */ - if (node->is_delete || !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) { - row_update_statistics_if_needed(prebuilt->table); + if (update_statistics) { + dict_stats_update_if_needed(prebuilt->table); } else { - /* Update the table modification counter even when - non-indexed columns change if statistics is initialized. */ - if (prebuilt->table->stat_initialized) { - prebuilt->table->stat_modified_counter++; - } + /* Always update the table modification counter. */ + prebuilt->table->stat_modified_counter++; } trx->op_info = ""; @@ -3596,8 +3558,7 @@ row_drop_single_table_tablespace( /* If the tablespace is not in the cache, just delete the file. */ if (!fil_space_for_table_exists_in_mem( - space_id, tablename, true, false, NULL, 0, NULL, - table_flags)) { + space_id, tablename, true, false, NULL, 0, table_flags)) { /* Force a delete of any discarded or temporary files. */ fil_delete_file(filepath); @@ -3935,19 +3896,6 @@ row_drop_table_for_mysql( we need to avoid running removal of these entries. */ if (!dict_table_is_temporary(table)) { - /* If table has not yet have crypt_data, try to read it to - make freeing the table easier. */ - if (!table->crypt_data) { - if (fil_space_t* space = fil_space_acquire_silent( - table->space)) { - /* We use crypt data in dict_table_t - in ha_innodb.cc to push warnings to - user thread. */ - table->crypt_data = space->crypt_data; - fil_space_release(space); - } - } - /* We use the private SQL parser of Innobase to generate the query graphs needed in deleting the dictionary data from system tables in Innobase. Deleting a row from SYS_INDEXES table also @@ -5122,12 +5070,10 @@ loop: case DB_INTERRUPTED: goto func_exit; default: - { - const char* doing = "CHECK TABLE"; - ib::warn() << doing << " on index " << index->name << " of" + ib::warn() << "CHECK TABLE on index " << index->name << " of" " table " << index->table->name << " returned " << ret; - /* fall through (this error is ignored by CHECK TABLE) */ - } + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: ret = DB_SUCCESS; func_exit: diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index e49fd7f0f8c..86a9e1259ac 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -27,6 +27,7 @@ Created 3/14/1997 Heikki Tuuri #include "row0purge.h" #include "fsp0fsp.h" #include "mach0data.h" +#include "dict0stats.h" #include "trx0rseg.h" #include "trx0trx.h" #include "trx0roll.h" @@ -536,8 +537,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: @@ -952,10 +954,13 @@ row_purge_record_func( switch (node->rec_type) { case TRX_UNDO_DEL_MARK_REC: purged = row_purge_del_mark(node); - if (!purged) { - break; + if (purged) { + if (node->table->stat_initialized + && srv_stats_include_delete_marked) { + dict_stats_update_if_needed(node->table); + } + MONITOR_INC(MONITOR_N_DEL_ROW_PURGE); } - MONITOR_INC(MONITOR_N_DEL_ROW_PURGE); break; default: if (!updated_extern) { diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index ef7a4b917c0..0b7d9bb33f3 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -2231,16 +2231,7 @@ stop_for_a_while: btr_pcur_store_position(&(plan->pcur), &mtr); mtr_commit(&mtr); - -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); err = DB_SUCCESS; goto func_exit; @@ -2258,14 +2249,7 @@ commit_mtr_for_a_while: mtr_commit(&mtr); mtr_has_extra_clust_latch = FALSE; - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); goto table_loop; @@ -2280,20 +2264,13 @@ lock_wait_or_error: mtr_commit(&mtr); -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ - func_exit: #ifdef BTR_CUR_HASH_ADAPT if (search_latch_locked) { btr_search_s_unlock(index); } #endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(dict_sync_check())); if (heap != NULL) { mem_heap_free(heap); @@ -2995,27 +2972,32 @@ row_sel_field_store_in_mysql_format_func( # define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ row_sel_store_mysql_field_func(m,p,r,o,f,t) #endif /* UNIV_DEBUG */ -/**************************************************************//** -Convert a field in the Innobase format to a field in the MySQL format. */ +/** Convert a field in the Innobase format to a field in the MySQL format. +@param[out] mysql_rec record in the MySQL format +@param[in,out] prebuilt prebuilt struct +@param[in] rec InnoDB record; must be protected + by a page latch +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets() +@param[in] field_no templ->rec_field_no or + templ->clust_rec_field_no + or templ->icp_rec_field_no + or sec field no if clust_templ_for_sec + is TRUE +@param[in] templ row template +*/ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_field_func( -/*===========================*/ - byte* mysql_rec, /*!< out: record in the - MySQL format */ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */ - const rec_t* rec, /*!< in: InnoDB record; - must be protected by - a page latch */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, #ifdef UNIV_DEBUG - const dict_index_t* index, /*!< in: index of rec */ -#endif /* UNIV_DEBUG */ - const ulint* offsets, /*!< in: array returned by - rec_get_offsets() */ - ulint field_no, /*!< in: templ->rec_field_no or - templ->clust_rec_field_no or - templ->icp_rec_field_no */ - const mysql_row_templ_t*templ) /*!< in: row template */ + const dict_index_t* index, +#endif + const ulint* offsets, + ulint field_no, + const mysql_row_templ_t*templ) { DBUG_ENTER("row_sel_store_mysql_field_func"); @@ -3036,7 +3018,6 @@ row_sel_store_mysql_field_func( mem_heap_t* heap; /* Copy an externally stored field to a temporary heap */ - trx_assert_no_search_latch(prebuilt->trx); ut_ad(field_no == templ->clust_rec_field_no); ut_ad(templ->type != DATA_POINT); @@ -3151,31 +3132,36 @@ row_sel_store_mysql_field_func( DBUG_RETURN(TRUE); } -/**************************************************************//** -Convert a row in the Innobase format to a row in the MySQL format. +/** Convert a row in the Innobase format to a row in the MySQL format. Note that the template in prebuilt may advise us to copy only a few columns to mysql_rec, other columns are left blank. All columns may not be needed in the query. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt prebuilt structure +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] vrow virtual columns +@param[in] rec_clust whether the rec in the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) +@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index + but the prebuilt->template is in + clustered index format and it is + used only for end range comparison @return TRUE on success, FALSE if not all columns could be retrieved */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_rec( -/*====================*/ - byte* mysql_rec, /*!< out: row in the MySQL format */ - row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */ - const rec_t* rec, /*!< in: Innobase record in the index - which was described in prebuilt's - template, or in the clustered index; - must be protected by a page latch */ - const dtuple_t* vrow, /*!< in: virtual columns */ - ibool rec_clust, /*!< in: TRUE if rec is in the - clustered index instead of - prebuilt->index */ - const dict_index_t* index, /*!< in: index of rec */ - const ulint* offsets) /*!< in: array returned by - rec_get_offsets(rec) */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const ulint* offsets) { - ulint i; DBUG_ENTER("row_sel_store_mysql_rec"); ut_ad(rec_clust || index == prebuilt->index); @@ -3185,7 +3171,7 @@ row_sel_store_mysql_rec( row_mysql_prebuilt_free_blob_heap(prebuilt); } - for (i = 0; i < prebuilt->n_template; i++) { + for (ulint i = 0; i < prebuilt->n_template; i++) { const mysql_row_templ_t*templ = &prebuilt->mysql_template[i]; if (templ->is_virtual && dict_index_is_clust(index)) { @@ -3918,11 +3904,7 @@ row_sel_try_search_shortcut_for_mysql( ut_ad(!prebuilt->templ_contains_blob); btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, - BTR_SEARCH_LEAF, pcur, - (trx->has_search_latch) - ? RW_S_LATCH - : 0, - mtr); + BTR_SEARCH_LEAF, pcur, RW_S_LATCH, mtr); rec = btr_pcur_get_rec(pcur); if (!page_rec_is_user_rec(rec)) { @@ -4027,7 +4009,7 @@ row_search_idx_cond_check( if (!prebuilt->need_to_access_clustered || dict_index_is_clust(prebuilt->index)) { if (!row_sel_store_mysql_rec( - mysql_rec, prebuilt, rec, NULL, FALSE, + mysql_rec, prebuilt, rec, NULL, false, prebuilt->index, offsets)) { ut_ad(dict_index_is_clust(prebuilt->index)); return(ICP_NO_MATCH); @@ -4180,14 +4162,7 @@ row_search_mvcc( DBUG_RETURN(DB_END_OF_INDEX); } -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); if (dict_table_is_discarded(prebuilt->table)) { @@ -4212,8 +4187,6 @@ row_search_mvcc( && (prebuilt->read_just_key || prebuilt->m_read_virtual_key); - trx_assert_no_search_latch(trx); - /* Reset the new record lock info if srv_locks_unsafe_for_binlog is set or session is using a READ COMMITED isolation level. Then we are able to remove the record locks set here on an individual @@ -4368,9 +4341,7 @@ row_search_mvcc( and if we try that, we can deadlock on the adaptive hash index semaphore! */ - trx_assert_no_search_latch(trx); rw_lock_s_lock(btr_get_search_latch(index)); - trx->has_search_latch = true; switch (row_sel_try_search_shortcut_for_mysql( &rec, prebuilt, &offsets, &heap, @@ -4399,7 +4370,8 @@ row_search_mvcc( if (!row_sel_store_mysql_rec( buf, prebuilt, - rec, NULL, FALSE, index, offsets)) { + rec, NULL, false, index, + offsets)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such @@ -4424,7 +4396,6 @@ row_search_mvcc( err = DB_SUCCESS; rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; goto func_exit; @@ -4435,7 +4406,6 @@ row_search_mvcc( err = DB_RECORD_NOT_FOUND; rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; /* NOTE that we do NOT store the cursor position */ @@ -4453,7 +4423,6 @@ row_search_mvcc( mtr_start(&mtr); rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; } } #endif /* BTR_CUR_HASH_ADAPT */ @@ -4461,8 +4430,6 @@ row_search_mvcc( /*-------------------------------------------------------------*/ /* PHASE 3: Open or restore index cursor position */ - trx_assert_no_search_latch(trx); - spatial_search = dict_index_is_spatial(index) && mode >= PAGE_CUR_CONTAIN; @@ -5371,7 +5338,7 @@ requires_clust_rec: appropriate version of the clustered index record. */ if (!row_sel_store_mysql_rec( buf, prebuilt, result_rec, vrow, - TRUE, clust_index, offsets)) { + true, clust_index, offsets)) { goto next_rec; } } @@ -5776,15 +5743,7 @@ func_exit: } } -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); DEBUG_SYNC_C("innodb_row_search_for_mysql_exit"); diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 9288adb21a4..934b5ad5a7a 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -25,6 +25,7 @@ Created 2/25/1997 Heikki Tuuri #include "row0uins.h" #include "dict0dict.h" +#include "dict0stats.h" #include "dict0boot.h" #include "dict0crea.h" #include "trx0undo.h" @@ -508,6 +509,23 @@ row_undo_ins( mutex_exit(&dict_sys->mutex); } + + if (err == DB_SUCCESS && node->table->stat_initialized) { + /* Not protected by dict_table_stats_lock() for + performance reasons, we would rather get garbage + in stat_n_rows (which is just an estimate anyway) + than protecting the following code with a latch. */ + dict_table_n_rows_dec(node->table); + + /* Do not attempt to update statistics when + executing ROLLBACK in the InnoDB SQL + interpreter, because in that case we would + already be holding dict_sys->mutex, which + would be acquired when updating statistics. */ + if (!dict_locked) { + dict_stats_update_if_needed(node->table); + } + } } dict_table_close(node->table, dict_locked, FALSE); diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index 9de108ba141..c6e188c1ba3 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,6 +28,7 @@ Created 2/27/1997 Heikki Tuuri #include "row0umod.h" #include "dict0dict.h" +#include "dict0stats.h" #include "dict0boot.h" #include "trx0undo.h" #include "trx0roll.h" @@ -1250,8 +1252,38 @@ row_undo_mod( } if (err == DB_SUCCESS) { - err = row_undo_mod_clust(node, thr); + + bool update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); + + if (err == DB_SUCCESS && node->table->stat_initialized) { + switch (node->rec_type) { + case TRX_UNDO_UPD_EXIST_REC: + break; + case TRX_UNDO_DEL_MARK_REC: + dict_table_n_rows_inc(node->table); + update_statistics = update_statistics + || !srv_stats_include_delete_marked; + break; + case TRX_UNDO_UPD_DEL_REC: + dict_table_n_rows_dec(node->table); + update_statistics = update_statistics + || !srv_stats_include_delete_marked; + break; + } + + /* Do not attempt to update statistics when + executing ROLLBACK in the InnoDB SQL + interpreter, because in that case we would + already be holding dict_sys->mutex, which + would be acquired when updating statistics. */ + if (update_statistics && !dict_locked) { + dict_stats_update_if_needed(node->table); + } else { + node->table->stat_modified_counter++; + } + } } dict_table_close(node->table, dict_locked, FALSE); diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index d251b2b71a4..176af0400ce 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -2603,7 +2603,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_clust_rec_by_insert( /*========================*/ - ulint flags, /*!< in: undo logging and locking flags */ upd_node_t* node, /*!< in/out: row update node */ dict_index_t* index, /*!< in: clustered index of the record */ que_thr_t* thr, /*!< in: query thread */ @@ -2672,7 +2671,7 @@ row_upd_clust_rec_by_insert( } err = btr_cur_del_mark_set_clust_rec( - flags, btr_cur_get_block(btr_cur), rec, index, offsets, + btr_cur_get_block(btr_cur), rec, index, offsets, thr, node->row, mtr); if (err != DB_SUCCESS) { err_exit: @@ -2913,7 +2912,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_del_mark_clust_rec( /*=======================*/ - ulint flags, /*!< in: undo logging and locking flags */ upd_node_t* node, /*!< in: row update node */ dict_index_t* index, /*!< in: clustered index */ ulint* offsets,/*!< in/out: rec_get_offsets() for the @@ -2951,7 +2949,7 @@ row_upd_del_mark_clust_rec( rec = btr_cur_get_rec(btr_cur); err = btr_cur_del_mark_set_clust_rec( - flags, btr_cur_get_block(btr_cur), rec, + btr_cur_get_block(btr_cur), rec, index, offsets, thr, node->row, mtr); if (err == DB_SUCCESS && referenced) { @@ -3119,9 +3117,9 @@ row_upd_clust_step( offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap); - if (!node->has_clust_rec_x_lock) { + if (!flags && !node->has_clust_rec_x_lock) { err = lock_clust_rec_modify_check_and_lock( - flags, btr_pcur_get_block(pcur), + 0, btr_pcur_get_block(pcur), rec, index, offsets, thr); if (err != DB_SUCCESS) { mtr_commit(&mtr); @@ -3138,7 +3136,7 @@ row_upd_clust_step( if (node->is_delete) { err = row_upd_del_mark_clust_rec( - flags, node, index, offsets, thr, referenced, foreign, &mtr); + node, index, offsets, thr, referenced, foreign, &mtr); if (err == DB_SUCCESS) { node->state = UPD_NODE_UPDATE_ALL_SEC; @@ -3184,7 +3182,7 @@ row_upd_clust_step( externally! */ err = row_upd_clust_rec_by_insert( - flags, node, index, thr, referenced, foreign, &mtr); + node, index, thr, referenced, foreign, &mtr); if (err != DB_SUCCESS) { goto exit_func; @@ -3231,7 +3229,7 @@ row_upd( ut_ad(!thr_get_trx(thr)->in_rollback); DBUG_PRINT("row_upd", ("table: %s", node->table->name.m_name)); - DBUG_PRINT("row_upd", ("info bits in update vector: 0x%lx", + DBUG_PRINT("row_upd", ("info bits in update vector: 0x" ULINTPFx, node->update ? node->update->info_bits: 0)); DBUG_PRINT("row_upd", ("foreign_id: %s", node->foreign ? node->foreign->id: "NULL")); diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc index bf8a326a633..9f589b57d9c 100644 --- a/storage/innobase/srv/srv0conc.cc +++ b/storage/innobase/srv/srv0conc.cc @@ -197,11 +197,6 @@ srv_conc_enter_innodb_with_atomics( (void) my_atomic_addlint( &srv_conc.n_waiting, 1); - /* Release possible search system latch this - thread has */ - - trx_assert_no_search_latch(trx); - thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK); notified_mysql = TRUE; @@ -257,15 +252,7 @@ srv_conc_enter_innodb( { trx_t* trx = prebuilt->trx; -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); srv_conc_enter_innodb_with_atomics(trx); } @@ -279,15 +266,7 @@ srv_conc_force_enter_innodb( trx_t* trx) /*!< in: transaction object associated with the thread */ { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); if (!srv_thread_concurrency) { @@ -320,15 +299,7 @@ srv_conc_force_exit_innodb( srv_conc_exit_innodb_with_atomics(trx); -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); } /*********************************************************************//** diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 9ad1d2a7103..c89930ecb2c 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -182,9 +182,6 @@ my_bool srv_use_mtflush; my_bool srv_master_thread_disabled_debug; /** Event used to inform that master thread is disabled. */ static os_event_t srv_master_thread_disabled_event; -/** Debug variable to find if any background threads are adding -to purge during slow shutdown. */ -extern bool trx_commit_disallowed; #endif /* UNIV_DEBUG */ /*------------------------- LOG FILES ------------------------ */ @@ -526,16 +523,16 @@ UNIV_INTERN ulong srv_buf_dump_status_frequency; /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ - mutex_enter(&srv_sys->mutex); \ + mutex_enter(&srv_sys.mutex); \ } while (0) /** Test if the system mutex is owned. */ -#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \ +#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \ && !srv_read_only_mode) /** Release the system mutex. */ #define srv_sys_mutex_exit() do { \ - mutex_exit(&srv_sys->mutex); \ + mutex_exit(&srv_sys.mutex); \ } while (0) /* @@ -622,7 +619,7 @@ struct srv_sys_t{ ulint n_sys_threads; /*!< size of the sys_threads array */ - srv_slot_t* sys_threads; /*!< server thread table; + srv_slot_t sys_threads[32 + 1]; /*!< server thread table; os_event_set() and os_event_reset() on sys_threads[]->event are @@ -639,7 +636,7 @@ struct srv_sys_t{ activity */ }; -static srv_sys_t* srv_sys; +static srv_sys_t srv_sys; /** Event to signal srv_monitor_thread. Not protected by a mutex. Set after setting srv_print_innodb_monitor. */ @@ -664,10 +661,10 @@ and/or load it during startup. */ char srv_buffer_pool_dump_at_shutdown = TRUE; char srv_buffer_pool_load_at_startup = TRUE; -/** Slot index in the srv_sys->sys_threads array for the purge thread. */ +/** Slot index in the srv_sys.sys_threads array for the purge thread. */ static const ulint SRV_PURGE_SLOT = 1; -/** Slot index in the srv_sys->sys_threads array for the master thread. */ +/** Slot index in the srv_sys.sys_threads array for the master thread. */ static const ulint SRV_MASTER_SLOT = 0; #ifdef HAVE_PSI_STAGE_INTERFACE @@ -807,21 +804,21 @@ srv_reserve_slot( switch (type) { case SRV_MASTER: - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; break; case SRV_PURGE: - slot = &srv_sys->sys_threads[SRV_PURGE_SLOT]; + slot = &srv_sys.sys_threads[SRV_PURGE_SLOT]; break; case SRV_WORKER: /* Find an empty slot, skip the master and purge slots. */ - for (slot = &srv_sys->sys_threads[2]; + for (slot = &srv_sys.sys_threads[2]; slot->in_use; ++slot) { - ut_a(slot < &srv_sys->sys_threads[ - srv_sys->n_sys_threads]); + ut_a(slot < &srv_sys.sys_threads[ + srv_sys.n_sys_threads]); } break; @@ -837,7 +834,7 @@ srv_reserve_slot( ut_ad(srv_slot_get_type(slot) == type); - my_atomic_addlint(&srv_sys->n_threads_active[type], 1); + my_atomic_addlint(&srv_sys.n_threads_active[type], 1); srv_sys_mutex_exit(); @@ -867,13 +864,13 @@ srv_suspend_thread_low( case SRV_MASTER: /* We have only one master thread and it should be the first entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_PURGE: /* We have only one purge coordinator thread and it should be the second entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_WORKER: @@ -884,7 +881,7 @@ srv_suspend_thread_low( ut_a(!slot->suspended); slot->suspended = TRUE; - if (my_atomic_addlint(&srv_sys->n_threads_active[type], -1) < 0) { + if (my_atomic_addlint(&srv_sys.n_threads_active[type], -1) < 0) { ut_error; } @@ -941,7 +938,7 @@ srv_resume_thread(srv_slot_t* slot, int64_t sig_count = 0, bool wait = true, ut_ad(slot->suspended); slot->suspended = FALSE; - my_atomic_addlint(&srv_sys->n_threads_active[slot->type], 1); + my_atomic_addlint(&srv_sys.n_threads_active[slot->type], 1); srv_sys_mutex_exit(); return(timeout); } @@ -963,8 +960,8 @@ srv_release_threads(enum srv_thread_type type, ulint n) srv_sys_mutex_enter(); - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; if (!slot->in_use || srv_slot_get_type(slot) != type) { continue; @@ -984,7 +981,7 @@ srv_release_threads(enum srv_thread_type type, ulint n) should be the first entry always. */ ut_a(n == 1); ut_a(i == SRV_MASTER_SLOT); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_PURGE: @@ -993,12 +990,12 @@ srv_release_threads(enum srv_thread_type type, ulint n) ut_a(n == 1); ut_a(i == SRV_PURGE_SLOT); ut_a(srv_n_purge_threads > 0); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] + ut_a(srv_sys.n_threads_active[type] < srv_n_purge_threads - 1); break; } @@ -1034,32 +1031,19 @@ static void srv_init() { - ulint n_sys_threads = 0; - ulint srv_sys_sz = sizeof(*srv_sys); - mutex_create(LATCH_ID_SRV_INNODB_MONITOR, &srv_innodb_monitor_mutex); - if (!srv_read_only_mode) { - - /* Number of purge threads + master thread */ - n_sys_threads = srv_n_purge_threads + 1; - - srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads); - } - - srv_sys = static_cast<srv_sys_t*>(ut_zalloc_nokey(srv_sys_sz)); - - srv_sys->n_sys_threads = n_sys_threads; + srv_sys.n_sys_threads = srv_read_only_mode + ? 0 + : srv_n_purge_threads + 1/* purge coordinator */; if (!srv_read_only_mode) { - mutex_create(LATCH_ID_SRV_SYS, &srv_sys->mutex); + mutex_create(LATCH_ID_SRV_SYS, &srv_sys.mutex); - mutex_create(LATCH_ID_SRV_SYS_TASKS, &srv_sys->tasks_mutex); + mutex_create(LATCH_ID_SRV_SYS_TASKS, &srv_sys.tasks_mutex); - srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1]; - - for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; slot->event = os_event_create(0); @@ -1074,7 +1058,7 @@ srv_init() buf_flush_event = os_event_create("buf_flush_event"); - UT_LIST_INIT(srv_sys->tasks, &que_thr_t::queue); + UT_LIST_INIT(srv_sys.tasks, &que_thr_t::queue); } srv_buf_resize_event = os_event_create(0); @@ -1118,7 +1102,7 @@ void srv_free(void) /*==========*/ { - if (!srv_sys) { + if (!srv_buf_resize_event) { return; } @@ -1126,13 +1110,11 @@ srv_free(void) mutex_free(&page_zip_stat_per_index_mutex); if (!srv_read_only_mode) { - mutex_free(&srv_sys->mutex); - mutex_free(&srv_sys->tasks_mutex); - - for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + mutex_free(&srv_sys.mutex); + mutex_free(&srv_sys.tasks_mutex); - os_event_destroy(slot->event); + for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) { + os_event_destroy(srv_sys.sys_threads[i].event); } os_event_destroy(srv_error_event); @@ -1143,18 +1125,11 @@ srv_free(void) os_event_destroy(srv_buf_resize_event); -#ifdef UNIV_DEBUG - os_event_destroy(srv_master_thread_disabled_event); - srv_master_thread_disabled_event = NULL; -#endif /* UNIV_DEBUG */ + ut_d(os_event_destroy(srv_master_thread_disabled_event)); dict_ind_free(); trx_i_s_cache_free(trx_i_s_cache); - - ut_free(srv_sys); - - srv_sys = 0; } /*********************************************************************//** @@ -1187,7 +1162,6 @@ srv_boot(void) srv_normalize_init_values(); sync_check_init(); - os_thread_init(); /* Reset the system variables in the recovery module. */ recv_sys_var_init(); trx_pool_init(); @@ -1484,8 +1458,10 @@ srv_export_innodb_status(void) buf_get_total_stat(&stat); buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len); buf_get_total_list_size_in_bytes(&buf_pools_list_size); - fil_crypt_total_stat(&crypt_stat); - btr_scrub_total_stat(&scrub_stat); + if (!srv_read_only_mode) { + fil_crypt_total_stat(&crypt_stat); + btr_scrub_total_stat(&scrub_stat); + } mutex_enter(&srv_innodb_monitor_mutex); @@ -1683,6 +1659,7 @@ srv_export_innodb_status(void) export_vars.innodb_sec_rec_cluster_reads_avoided = srv_stats.n_sec_rec_cluster_reads_avoided; + if (!srv_read_only_mode) { export_vars.innodb_encryption_rotation_pages_read_from_cache = crypt_stat.pages_read_from_cache; export_vars.innodb_encryption_rotation_pages_read_from_disk = @@ -1711,6 +1688,7 @@ srv_export_innodb_status(void) export_vars.innodb_scrub_page_split_failures_unknown = scrub_stat.page_split_failures_unknown; export_vars.innodb_scrub_log = srv_stats.n_log_scrubs; + } mutex_exit(&srv_innodb_monitor_mutex); } @@ -1860,7 +1838,7 @@ loop: } } - if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { goto exit_func; } @@ -1981,7 +1959,7 @@ loop: os_event_wait_time_low(srv_error_event, 1000000, sig_count); - if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state == SRV_SHUTDOWN_NONE) { goto loop; } @@ -2002,7 +1980,7 @@ void srv_inc_activity_count(void) /*========================*/ { - srv_sys->activity_count.inc(); + srv_sys.activity_count.inc(); } /**********************************************************************//** @@ -2023,7 +2001,7 @@ srv_get_active_thread_type(void) srv_sys_mutex_enter(); for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) { - if (srv_sys->n_threads_active[i] != 0) { + if (srv_sys.n_threads_active[i] != 0) { ret = static_cast<srv_thread_type>(i); break; } @@ -2058,12 +2036,12 @@ srv_active_wake_master_thread_low() srv_inc_activity_count(); - if (my_atomic_loadlint(&srv_sys->n_threads_active[SRV_MASTER]) == 0) { + if (my_atomic_loadlint(&srv_sys.n_threads_active[SRV_MASTER]) == 0) { srv_slot_t* slot; srv_sys_mutex_enter(); - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; /* Only if the master thread has been started. */ @@ -2083,7 +2061,7 @@ srv_wake_purge_thread_if_not_active() ut_ad(!srv_sys_mutex_own()); if (purge_sys->state == PURGE_STATE_RUN - && !my_atomic_loadlint(&srv_sys->n_threads_active[SRV_PURGE]) + && !my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE]) && my_atomic_loadlint(&trx_sys->rseg_history_len)) { srv_release_threads(SRV_PURGE, 1); @@ -2106,7 +2084,7 @@ ulint srv_get_activity_count(void) /*========================*/ { - return(srv_sys->activity_count); + return(srv_sys.activity_count); } /*******************************************************************//** @@ -2117,7 +2095,7 @@ srv_check_activity( /*===============*/ ulint old_activity_count) /*!< in: old activity count */ { - return(srv_sys->activity_count != old_activity_count); + return(srv_sys.activity_count != old_activity_count); } /********************************************************************//** @@ -2187,7 +2165,7 @@ srv_shutdown_print_master_pending( time_elapsed = ut_difftime(current_time, *last_print_time); if (time_elapsed > 60) { - *last_print_time = ut_time(); + *last_print_time = current_time; if (n_tables_to_drop) { ib::info() << "Waiting for " << n_tables_to_drop @@ -2290,7 +2268,7 @@ srv_master_do_active_tasks(void) ut_d(srv_master_do_disabled_loop()); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2315,11 +2293,7 @@ srv_master_do_active_tasks(void) /* Now see if various tasks that are performed at defined intervals need to be performed. */ - if (srv_shutdown_state > 0) { - return; - } - - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2334,7 +2308,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time); } - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2379,7 +2353,7 @@ srv_master_do_idle_tasks(void) ut_d(srv_master_do_disabled_loop()); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2395,7 +2369,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2413,7 +2387,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2424,70 +2398,42 @@ srv_master_do_idle_tasks(void) counter_time); } -/*********************************************************************//** -Perform the tasks during shutdown. The tasks that we do at shutdown -depend on srv_fast_shutdown: -2 => very fast shutdown => do no book keeping -1 => normal shutdown => clear drop table queue and make checkpoint -0 => slow shutdown => in addition to above do complete purge and ibuf -merge -@return TRUE if some work was done. FALSE otherwise */ +/** Perform shutdown tasks. +@param[in] ibuf_merge whether to complete the change buffer merge */ static -ibool -srv_master_do_shutdown_tasks( -/*=========================*/ - ib_time_t* last_print_time)/*!< last time the function - print the message */ +void +srv_shutdown(bool ibuf_merge) { - ulint n_bytes_merged = 0; - ulint n_tables_to_drop = 0; - - ut_ad(!srv_read_only_mode); + ulint n_bytes_merged = 0; + ulint n_tables_to_drop; + ib_time_t now = ut_time(); - ++srv_main_shutdown_loops; - - ut_a(srv_shutdown_state > 0); - - /* In very fast shutdown none of the following is necessary */ - if (srv_fast_shutdown == 2) { - return(FALSE); - } - - /* ALTER TABLE in MySQL requires on Unix that the table handler - can drop tables lazily after there no longer are SELECT - queries to them. */ - srv_main_thread_op_info = "doing background drop tables"; - n_tables_to_drop = row_drop_tables_for_mysql_in_background(); - - /* make sure that there is enough reusable space in the redo - log files */ - srv_main_thread_op_info = "checking free log space"; - log_free_check(); - - /* In case of normal shutdown we don't do ibuf merge or purge */ - if (srv_fast_shutdown == 1) { - goto func_exit; - } - - /* Do an ibuf merge */ - srv_main_thread_op_info = "doing insert buffer merge"; - n_bytes_merged = ibuf_merge_in_background(true); - - /* Flush logs if needed */ - srv_sync_log_buffer_in_background(); - -func_exit: - /* Make a new checkpoint about once in 10 seconds */ - srv_main_thread_op_info = "making checkpoint"; - log_checkpoint(TRUE, FALSE); - - /* Print progress message every 60 seconds during shutdown */ - if (srv_shutdown_state > 0 && srv_print_verbose_log) { - srv_shutdown_print_master_pending( - last_print_time, n_tables_to_drop, n_bytes_merged); - } + do { + ut_ad(!srv_read_only_mode); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); + ++srv_main_shutdown_loops; + + /* FIXME: Remove the background DROP TABLE queue; it is not + crash-safe and breaks ACID. */ + srv_main_thread_op_info = "doing background drop tables"; + n_tables_to_drop = row_drop_tables_for_mysql_in_background(); + + if (ibuf_merge) { + srv_main_thread_op_info = "checking free log space"; + log_free_check(); + srv_main_thread_op_info = "doing insert buffer merge"; + n_bytes_merged = ibuf_merge_in_background(true); + + /* Flush logs if needed */ + srv_sync_log_buffer_in_background(); + } - return(n_bytes_merged || n_tables_to_drop); + /* Print progress message every 60 seconds during shutdown */ + if (srv_print_verbose_log) { + srv_shutdown_print_master_pending( + &now, n_tables_to_drop, n_bytes_merged); + } + } while (n_bytes_merged || n_tables_to_drop); } /*********************************************************************//** @@ -2520,7 +2466,6 @@ DECLARE_THREAD(srv_master_thread)( srv_slot_t* slot; ulint old_activity_count = srv_get_activity_count(); - ib_time_t last_print_time; ut_ad(!srv_read_only_mode); @@ -2537,9 +2482,8 @@ DECLARE_THREAD(srv_master_thread)( srv_main_thread_id = os_thread_pf(os_thread_get_curr_id()); slot = srv_reserve_slot(SRV_MASTER); - ut_a(slot == srv_sys->sys_threads); + ut_a(slot == srv_sys.sys_threads); - last_print_time = ut_time(); loop: if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { goto suspend_thread; @@ -2559,14 +2503,26 @@ loop: } } - while (srv_shutdown_state != SRV_SHUTDOWN_EXIT_THREADS - && srv_master_do_shutdown_tasks(&last_print_time)) { - - /* Shouldn't loop here in case of very fast shutdown */ - ut_ad(srv_fast_shutdown < 2); +suspend_thread: + switch (srv_shutdown_state) { + case SRV_SHUTDOWN_NONE: + break; + case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_LAST_PHASE: + ut_ad(0); + /* fall through */ + case SRV_SHUTDOWN_EXIT_THREADS: + /* srv_init_abort() must have been invoked */ + case SRV_SHUTDOWN_CLEANUP: + if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP + && srv_fast_shutdown < 2) { + srv_shutdown(srv_fast_shutdown == 0); + } + srv_suspend_thread(slot); + my_thread_end(); + os_thread_exit(); } -suspend_thread: srv_main_thread_op_info = "suspending"; srv_suspend_thread(slot); @@ -2578,44 +2534,32 @@ suspend_thread: srv_main_thread_op_info = "waiting for server activity"; srv_resume_thread(slot); - - if (srv_shutdown_state != SRV_SHUTDOWN_EXIT_THREADS) { - goto loop; - } - - my_thread_end(); - os_thread_exit(); - DBUG_RETURN(0); + goto loop; } -/** -Check if purge should stop. -@return true if it should shutdown. */ +/** Check if purge should stop. +@param[in] n_purged pages purged in the last batch +@return whether purge should exit */ static bool -srv_purge_should_exit( - MYSQL_THD thd, - ulint n_purged) /*!< in: pages purged in last batch */ +srv_purge_should_exit(ulint n_purged) { - switch (srv_shutdown_state) { - case SRV_SHUTDOWN_NONE: - if ((!srv_was_started || srv_running) - && !thd_kill_level(thd)) { - /* Normal operation. */ - break; - } - /* close_connections() was called; fall through */ - case SRV_SHUTDOWN_CLEANUP: - case SRV_SHUTDOWN_EXIT_THREADS: - /* Exit unless slow shutdown requested or all done. */ - return(srv_fast_shutdown != 0 || n_purged == 0); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE + || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); - case SRV_SHUTDOWN_LAST_PHASE: - case SRV_SHUTDOWN_FLUSH_PHASE: - ut_error; + if (srv_undo_sources) { + return(false); } - - return(false); + if (srv_fast_shutdown) { + return(true); + } + /* Slow shutdown was requested. */ + if (n_purged) { + /* The previous round still did some work. */ + return(false); + } + /* Exit if there are no active transactions to roll back. */ + return(trx_sys_any_active_transactions() == 0); } /*********************************************************************//** @@ -2631,18 +2575,18 @@ srv_task_execute(void) ut_ad(!srv_read_only_mode); ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) { + if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) { - thr = UT_LIST_GET_FIRST(srv_sys->tasks); + thr = UT_LIST_GET_FIRST(srv_sys.tasks); ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE); - UT_LIST_REMOVE(srv_sys->tasks, thr); + UT_LIST_REMOVE(srv_sys.tasks, thr); } - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); if (thr != NULL) { @@ -2665,6 +2609,8 @@ DECLARE_THREAD(srv_worker_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ut_ad(!srv_read_only_mode); @@ -2680,7 +2626,7 @@ DECLARE_THREAD(srv_worker_thread)( slot = srv_reserve_slot(SRV_WORKER); ut_a(srv_n_purge_threads > 1); - ut_a(my_atomic_loadlint(&srv_sys->n_threads_active[SRV_WORKER]) + ut_a(my_atomic_loadlint(&srv_sys.n_threads_active[SRV_WORKER]) < static_cast<lint>(srv_n_purge_threads)); /* We need to ensure that the worker threads exit after the @@ -2726,16 +2672,12 @@ DECLARE_THREAD(srv_worker_thread)( OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */ } -/*********************************************************************//** -Do the actual purge operation. +/** Do the actual purge operation. +@param[in,out] n_total_purged total number of purged pages @return length of history list before the last purge batch. */ static ulint -srv_do_purge( -/*=========*/ - MYSQL_THD thd, - ulint n_threads, /*!< in: number of threads to use */ - ulint* n_total_purged) /*!< in/out: total pages purged */ +srv_do_purge(ulint* n_total_purged) { ulint n_pages_purged; @@ -2743,6 +2685,7 @@ srv_do_purge( static ulint n_use_threads = 0; static ulint rseg_history_len = 0; ulint old_activity_count = srv_get_activity_count(); + const ulint n_threads = srv_n_purge_threads; ut_a(n_threads > 0); ut_ad(!srv_read_only_mode); @@ -2804,7 +2747,7 @@ srv_do_purge( *n_total_purged += n_pages_purged; - } while (!srv_purge_should_exit(thd, n_pages_purged) + } while (!srv_purge_should_exit(n_pages_purged) && n_pages_purged > 0 && purge_sys->state == PURGE_STATE_RUN); @@ -2817,7 +2760,6 @@ static void srv_purge_coordinator_suspend( /*==========================*/ - MYSQL_THD thd, srv_slot_t* slot, /*!< in/out: Purge coordinator thread slot */ ulint rseg_history_len) /*!< in: history list length @@ -2879,7 +2821,7 @@ srv_purge_coordinator_suspend( } rw_lock_x_unlock(&purge_sys->latch); - } while (stop && !thd_kill_level(thd)); + } while (stop && srv_undo_sources); srv_resume_thread(slot, 0, false); } @@ -2929,51 +2871,23 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( purge didn't purge any records then wait for activity. */ if (srv_shutdown_state == SRV_SHUTDOWN_NONE - && !thd_kill_level(thd) + && srv_undo_sources && (purge_sys->state == PURGE_STATE_STOP || n_total_purged == 0)) { - srv_purge_coordinator_suspend(thd, slot, rseg_history_len); + srv_purge_coordinator_suspend(slot, rseg_history_len); } ut_ad(!slot->suspended); - if (srv_purge_should_exit(thd, n_total_purged)) { + if (srv_purge_should_exit(n_total_purged)) { break; } n_total_purged = 0; - rseg_history_len = srv_do_purge( - thd, srv_n_purge_threads, &n_total_purged); - - } while (!srv_purge_should_exit(thd, n_total_purged)); - - /* Ensure that we don't jump out of the loop unless the - exit condition is satisfied. */ - - ut_a(srv_purge_should_exit(thd, n_total_purged)); - - /* Ensure that all records are purged on slow shutdown. */ - while (srv_fast_shutdown == 0 - && trx_purge(1, srv_purge_batch_size, false)); - -#ifdef UNIV_DEBUG - if (srv_fast_shutdown == 0) { - trx_commit_disallowed = true; - } -#endif /* UNIV_DEBUG */ - - /* This trx_purge is called to remove any undo records (added by - background threads) after completion of the above loop. When - srv_fast_shutdown != 0, a large batch size can cause significant - delay in shutdown ,so reducing the batch size to magic number 20 - (which was default in 5.5), which we hope will be sufficient to - remove all the undo records */ - - if (trx_purge(1, std::min(srv_purge_batch_size, 20UL), true)) { - ut_a(srv_fast_shutdown); - } + rseg_history_len = srv_do_purge(&n_total_purged); + } while (!srv_purge_should_exit(n_total_purged)); /* The task queue should always be empty, independent of fast shutdown state. */ @@ -3022,11 +2936,11 @@ srv_que_task_enqueue_low( que_thr_t* thr) /*!< in: query thread */ { ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - UT_LIST_ADD_LAST(srv_sys->tasks, thr); + UT_LIST_ADD_LAST(srv_sys.tasks, thr); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); srv_release_threads(SRV_WORKER, 1); } @@ -3042,11 +2956,11 @@ srv_get_task_queue_length(void) ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - n_tasks = UT_LIST_GET_LEN(srv_sys->tasks); + n_tasks = UT_LIST_GET_LEN(srv_sys.tasks); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); return(n_tasks); } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 761e9d2d20d..3fb54c1280b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -126,6 +126,9 @@ ibool srv_start_raw_disk_in_use; /** Number of IO threads to use */ ulint srv_n_file_io_threads; +/** UNDO tablespaces starts with space id. */ +ulint srv_undo_space_id_start; + /** TRUE if the server is being started, before rolling back any incomplete transactions */ bool srv_startup_is_before_trx_rollback_phase; @@ -136,7 +139,11 @@ bool srv_sys_tablespaces_open; /** TRUE if the server was successfully started */ bool srv_was_started; /** TRUE if innobase_start_or_create_for_mysql() has been called */ -static bool srv_start_has_been_called; +static bool srv_start_has_been_called; + +/** Whether any undo log records can be generated */ +UNIV_INTERN bool srv_undo_sources; + #ifdef UNIV_DEBUG /** InnoDB system tablespace to set during recovery */ UNIV_INTERN uint srv_sys_space_size_debug; @@ -146,15 +153,23 @@ UNIV_INTERN uint srv_sys_space_size_debug; determine which threads need to be stopped if we need to abort during the initialisation step. */ enum srv_start_state_t { + /** No thread started */ SRV_START_STATE_NONE = 0, /*!< No thread started */ + /** lock_wait_timeout_thread started */ SRV_START_STATE_LOCK_SYS = 1, /*!< Started lock-timeout thread. */ - SRV_START_STATE_IO = 2, /*!< Started IO threads */ - SRV_START_STATE_MONITOR = 4, /*!< Started montior thread */ - SRV_START_STATE_MASTER = 8, /*!< Started master threadd. */ - SRV_START_STATE_PURGE = 16, /*!< Started purge thread(s) */ - SRV_START_STATE_STAT = 32 /*!< Started bufdump + dict stat - and FTS optimize thread. */ + /** buf_flush_page_cleaner_coordinator, + buf_flush_page_cleaner_worker started */ + SRV_START_STATE_IO = 2, + /** srv_error_monitor_thread, srv_monitor_thread started */ + SRV_START_STATE_MONITOR = 4, + /** srv_master_thread started */ + SRV_START_STATE_MASTER = 8, + /** srv_purge_coordinator_thread, srv_worker_thread started */ + SRV_START_STATE_PURGE = 16, + /** fil_crypt_thread, btr_defragment_thread started + (all background threads that can generate redo log but not undo log */ + SRV_START_STATE_REDO = 32 }; /** Track server thrd starting phases */ @@ -165,7 +180,7 @@ SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ enum srv_shutdown_t srv_shutdown_state = SRV_SHUTDOWN_NONE; /** Files comprising the system tablespace */ -static os_file_t files[1000]; +static pfs_os_file_t files[1000]; /** io_handler_thread parameters for thread identification */ static ulint n[SRV_MAX_N_IO_THREADS + 6]; @@ -182,9 +197,6 @@ static os_thread_t buf_dump_thread_handle; static os_thread_t dict_stats_thread_handle; /** Status variables, is thread started ?*/ static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + 32] = {false}; -static bool buf_dump_thread_started = false; -static bool dict_stats_thread_started = false; -static bool buf_flush_page_cleaner_thread_started = false; /** Name of srv_monitor_file */ static char* srv_monitor_file_name; @@ -344,7 +356,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t create_log_file( /*============*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name) /*!< in: log file name */ { bool ret; @@ -448,8 +460,7 @@ create_log_files( fil_space_t* log_space = fil_space_create( "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, - NULL, /* innodb_encrypt_log works at a different level */ - true /* this is create */); + NULL/* innodb_encrypt_log works at a different level */); ut_a(fil_validate()); ut_a(log_space != NULL); @@ -475,9 +486,8 @@ create_log_files( } } - if (!log_group_init(0, srv_n_log_files, - srv_log_file_size * UNIV_PAGE_SIZE, - SRV_LOG_SPACE_FIRST_ID)) { + log_init(srv_n_log_files, srv_log_file_size * UNIV_PAGE_SIZE); + if (!log_set_capacity()) { return(DB_ERROR); } @@ -557,7 +567,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_log_file( /*==========*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name, /*!< in: log file name */ os_offset_t* size) /*!< out: file size */ { @@ -588,7 +598,7 @@ srv_undo_tablespace_create( const char* name, /*!< in: tablespace name */ ulint size) /*!< in: tablespace size in pages */ { - os_file_t fh; + pfs_os_file_t fh; bool ret; dberr_t err = DB_SUCCESS; @@ -657,7 +667,7 @@ srv_undo_tablespace_open( const char* name, /*!< in: tablespace file name */ ulint space_id) /*!< in: tablespace id */ { - os_file_t fh; + pfs_os_file_t fh; bool ret; dberr_t err = DB_ERROR; char undo_name[sizeof "innodb_undo000"]; @@ -705,7 +715,7 @@ srv_undo_tablespace_open( space = fil_space_create( undo_name, space_id, FSP_FLAGS_PAGE_SSIZE(), - FIL_TYPE_TABLESPACE, NULL, true); + FIL_TYPE_TABLESPACE, NULL); ut_a(fil_validate()); ut_a(space); @@ -833,13 +843,23 @@ srv_undo_tablespaces_init(bool create_new_db) for (i = 0; create_new_db && i < srv_undo_tablespaces; ++i) { char name[OS_FILE_MAX_PATH]; + ulint space_id = i + 1; + + DBUG_EXECUTE_IF("innodb_undo_upgrade", + space_id = i + 3;); ut_snprintf( name, sizeof(name), "%s%cundo%03zu", - srv_undo_dir, OS_PATH_SEPARATOR, i + 1); + srv_undo_dir, OS_PATH_SEPARATOR, space_id); + + if (i == 0) { + srv_undo_space_id_start = space_id; + prev_space_id = srv_undo_space_id_start - 1; + } + + undo_tablespace_ids[i] = space_id; - /* Undo space ids start from 1. */ err = srv_undo_tablespace_create( name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES); @@ -897,11 +917,10 @@ srv_undo_tablespaces_init(bool create_new_db) srv_undo_tablespaces_active = srv_undo_tablespaces; n_undo_tablespaces = srv_undo_tablespaces; - for (i = 1; i <= n_undo_tablespaces; ++i) { - undo_tablespace_ids[i - 1] = i; + if (n_undo_tablespaces != 0) { + srv_undo_space_id_start = undo_tablespace_ids[0]; + prev_space_id = srv_undo_space_id_start - 1; } - - undo_tablespace_ids[i] = ULINT_UNDEFINED; } /* Open all the undo tablespaces that are currently in use. If we @@ -925,8 +944,6 @@ srv_undo_tablespaces_init(bool create_new_db) ut_a(undo_tablespace_ids[i] != 0); ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED); - /* Undo space ids start from 1. */ - err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]); if (err != DB_SUCCESS) { @@ -937,6 +954,12 @@ srv_undo_tablespaces_init(bool create_new_db) prev_space_id = undo_tablespace_ids[i]; + /* Note the first undo tablespace id in case of + no active undo tablespace. */ + if (0 == srv_undo_tablespaces_open++) { + srv_undo_space_id_start = undo_tablespace_ids[i]; + } + ++srv_undo_tablespaces_open; } @@ -964,6 +987,12 @@ srv_undo_tablespaces_init(bool create_new_db) ++srv_undo_tablespaces_open; } + /* Initialize srv_undo_space_id_start=0 when there are no + dedicated undo tablespaces. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = 0; + } + /* If the user says that there are fewer than what we find we tolerate that discrepancy but not the inverse. Because there could be unused undo tablespaces for future use. */ @@ -993,10 +1022,11 @@ srv_undo_tablespaces_init(bool create_new_db) mtr_start(&mtr); /* The undo log tablespace */ - for (i = 1; i <= n_undo_tablespaces; ++i) { + for (i = 0; i < n_undo_tablespaces; ++i) { fsp_header_init( - i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + undo_tablespace_ids[i], + SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); } mtr_commit(&mtr); @@ -1203,10 +1233,6 @@ srv_shutdown_all_bg_threads() { srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; - if (!srv_start_state) { - return; - } - /* All threads end up waiting for certain events. Put those events to the signaled state. Then the threads will exit themselves after os_event_wait(). */ @@ -1312,7 +1338,7 @@ srv_init_abort_low( dberr_t err) { if (create_new_db) { - ib::error() << "InnoDB Database creation was aborted" + ib::error() << "Database creation was aborted" #ifdef UNIV_DEBUG " at " << innobase_basename(file) << "[" << line << "]" #endif /* UNIV_DEBUG */ @@ -1429,8 +1455,7 @@ Starts InnoDB and creates a new database if database files are not found and the user wants. @return DB_SUCCESS or error code */ dberr_t -innobase_start_or_create_for_mysql(void) -/*====================================*/ +innobase_start_or_create_for_mysql() { bool create_new_db = false; lsn_t flushed_lsn; @@ -1446,6 +1471,10 @@ innobase_start_or_create_for_mysql(void) srv_read_only_mode = true; } + if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) { + srv_read_only_mode = 1; + } + high_level_read_only = srv_read_only_mode || srv_force_recovery > SRV_FORCE_NO_TRX_UNDO; @@ -1808,12 +1837,11 @@ innobase_start_or_create_for_mysql(void) #endif /* UNIV_DEBUG */ fsp_init(); - log_init(); + log_sys_init(); recv_sys_create(); recv_sys_init(buf_pool_get_curr_size()); lock_sys_create(srv_lock_table_size); - srv_start_state_set(SRV_START_STATE_LOCK_SYS); /* Create i/o-handler threads: */ @@ -1828,21 +1856,15 @@ innobase_start_or_create_for_mysql(void) if (!srv_read_only_mode) { buf_flush_page_cleaner_init(); + buf_page_cleaner_is_active = true; os_thread_create(buf_flush_page_cleaner_coordinator, NULL, NULL); - buf_flush_page_cleaner_thread_started = true; - for (i = 1; i < srv_n_page_cleaners; ++i) { os_thread_create(buf_flush_page_cleaner_worker, NULL, NULL); } - /* Make sure page cleaner is active. */ - while (!buf_page_cleaner_is_active) { - os_thread_sleep(10000); - } - srv_start_state_set(SRV_START_STATE_IO); } @@ -2044,8 +2066,7 @@ innobase_start_or_create_for_mysql(void) "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, - NULL /* no encryption yet */, - true /* create */); + NULL /* no encryption yet */); ut_a(fil_validate()); ut_a(log_space); @@ -2064,8 +2085,9 @@ innobase_start_or_create_for_mysql(void) } } - if (!log_group_init(0, i, srv_log_file_size * UNIV_PAGE_SIZE, - SRV_LOG_SPACE_FIRST_ID)) { + log_init(i, srv_log_file_size * UNIV_PAGE_SIZE); + + if (!log_set_capacity()) { return(srv_init_abort(DB_ERROR)); } } @@ -2102,14 +2124,24 @@ files_checked: mtr_start(&mtr); - bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr); + fsp_header_init(0, sum_of_new_sizes, &mtr); + + compile_time_assert(TRX_SYS_SPACE == 0); + compile_time_assert(IBUF_SPACE_ID == 0); + + ulint ibuf_root = btr_create( + DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + 0, univ_page_size, DICT_IBUF_ID_MIN, + dict_ind_redundant, NULL, &mtr); mtr_commit(&mtr); - if (!ret) { + if (ibuf_root == FIL_NULL) { return(srv_init_abort(DB_ERROR)); } + ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO); + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2237,7 +2269,7 @@ files_checked: const ulint sum_of_data_file_sizes = srv_sys_space.get_sum_of_sizes(); /* Compare the system tablespace file size to what is - stored in FSP_SIZE. In open_or_create_data_files() + stored in FSP_SIZE. In srv_sys_space.open_or_create() we already checked that the file sizes match the innodb_data_file_path specification. */ if (srv_read_only_mode @@ -2344,7 +2376,6 @@ files_checked: } } - /* Validate a few system page types that were left uninitialized by older versions of MySQL. */ if (!high_level_read_only) { @@ -2441,6 +2472,7 @@ files_checked: } recv_recovery_rollback_active(); + srv_startup_is_before_trx_rollback_phase = FALSE; } ut_ad(err == DB_SUCCESS); @@ -2496,7 +2528,8 @@ files_checked: srv_monitor_thread, NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS); thread_started[4 + SRV_MAX_N_IO_THREADS] = true; - srv_start_state_set(SRV_START_STATE_MONITOR); + srv_start_state |= SRV_START_STATE_LOCK_SYS + | SRV_START_STATE_MONITOR; } /* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */ @@ -2546,6 +2579,15 @@ files_checked: NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS)); thread_started[1 + SRV_MAX_N_IO_THREADS] = true; srv_start_state_set(SRV_START_STATE_MASTER); + + srv_undo_sources = true; + /* Create the dict stats gathering thread */ + srv_dict_stats_thread_active = true; + dict_stats_thread_handle = os_thread_create( + dict_stats_thread, NULL, NULL); + + /* Create the thread that will optimize the FTS sub-system. */ + fts_optimize_init(); } if (!srv_read_only_mode @@ -2626,11 +2668,10 @@ files_checked: if (!wsrep_recovery) { #endif /* WITH_WSREP */ /* Create the buffer pool dump/load thread */ + srv_buf_dump_thread_active = true; buf_dump_thread_handle= os_thread_create(buf_dump_thread, NULL, NULL); - srv_buf_dump_thread_active = true; - buf_dump_thread_started = true; #ifdef WITH_WSREP } else { ib::warn() << @@ -2654,22 +2695,15 @@ files_checked: */ log_make_checkpoint_at(LSN_MAX, TRUE); - /* Create the dict stats gathering thread */ - dict_stats_thread_handle = os_thread_create( - dict_stats_thread, NULL, NULL); - srv_dict_stats_thread_active = true; - dict_stats_thread_started = true; - - /* Create the thread that will optimize the FTS sub-system. */ - fts_optimize_init(); - /* Init data for datafile scrub threads */ btr_scrub_init(); /* Initialize online defragmentation. */ btr_defragment_init(); + btr_defragment_thread_active = true; + os_thread_create(btr_defragment_thread, NULL, NULL); - srv_start_state_set(SRV_START_STATE_STAT); + srv_start_state |= SRV_START_STATE_REDO; } /* Create the buffer pool resize thread */ @@ -2709,16 +2743,19 @@ srv_fts_close(void) } #endif -/****************************************************************//** -Shuts down background threads that can generate undo pages. */ +/** Shut down background threads that can generate undo log. */ void -srv_shutdown_bg_undo_sources(void) -/*===========================*/ +srv_shutdown_bg_undo_sources() { - if (srv_start_state_is_set(SRV_START_STATE_STAT)) { + if (srv_undo_sources) { ut_ad(!srv_read_only_mode); fts_optimize_shutdown(); dict_stats_shutdown(); + while (row_get_background_drop_list_len_low()) { + srv_wake_master_thread(); + os_thread_yield(); + } + srv_undo_sources = false; } } @@ -2727,10 +2764,7 @@ void innodb_shutdown() { ut_ad(!srv_running); - - if (srv_fast_shutdown) { - srv_shutdown_bg_undo_sources(); - } + ut_ad(!srv_undo_sources); /* 1. Flush the buffer pool to disk, write the current lsn to the tablespace header(s), and copy all log data to archive. @@ -2783,14 +2817,14 @@ innodb_shutdown() dict_stats_thread_deinit(); } - if (srv_start_state_is_set(SRV_START_STATE_STAT)) { + if (srv_start_state_is_set(SRV_START_STATE_REDO)) { ut_ad(!srv_read_only_mode); /* srv_shutdown_bg_undo_sources() already invoked fts_optimize_shutdown(); dict_stats_shutdown(); */ fil_crypt_threads_cleanup(); btr_scrub_cleanup(); - /* FIXME: call btr_defragment_shutdown(); */ + btr_defragment_shutdown(); } /* This must be disabled before closing the buffer pool @@ -2855,10 +2889,6 @@ innodb_shutdown() buf_pool_free(srv_buf_pool_instances); } - /* 6. Free the thread management resoruces. */ - os_thread_free(); - - /* 7. Free the synchronisation infrastructure. */ sync_check_close(); if (dict_foreign_err_file) { @@ -2875,85 +2905,6 @@ innodb_shutdown() srv_start_has_been_called = false; } -#if 0 // TODO: Enable this in WL#6608 -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ -static -void -srv_shutdown_table_bg_threads(void) -/*===============================*/ -{ - dict_table_t* table; - dict_table_t* first; - dict_table_t* last = NULL; - - mutex_enter(&dict_sys->mutex); - - /* Signal all threads that they should stop. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - first = table; - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_start_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (!next) { - last = table; - } - - table = next; - } - - /* We must release dict_sys->mutex here; if we hold on to it in the - loop below, we will deadlock if any of the background threads try to - acquire it (for example, the FTS thread by calling que_eval_sql). - - Releasing it here and going through dict_sys->table_LRU without - holding it is safe because: - - a) MySQL only starts the shutdown procedure after all client - threads have been disconnected and no new ones are accepted, so no - new tables are added or old ones dropped. - - b) Despite its name, the list is not LRU, and the order stays - fixed. - - To safeguard against the above assumptions ever changing, we store - the first and last items in the list above, and then check that - they've stayed the same below. */ - - mutex_exit(&dict_sys->mutex); - - /* Wait for the threads of each table to stop. This is not inside - the above loop, because by signaling all the threads first we can - overlap their shutting down delays. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - ut_a(first == table); - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (table == last) { - ut_a(!next); - } - - table = next; - } -} -#endif - /** Get the meta-data filename from the table name for a single-table tablespace. @param[in] table table object diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index d0e55bda207..0d2dcea7605 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -298,29 +298,23 @@ struct LatchDebug { } /** Iterate over a thread's latches. - @param[in,out] functor The callback + @param[in] functor The callback @return true if the functor returns true. */ - bool for_each(sync_check_functor_t& functor) + bool for_each(const sync_check_functor_t& functor) UNIV_NOTHROW { - const Latches* latches = thread_latches(); - - if (latches == 0) { - return(functor.result()); - } - - Latches::const_iterator end = latches->end(); - - for (Latches::const_iterator it = latches->begin(); - it != end; - ++it) { - - if (functor(it->m_level)) { - break; + if (const Latches* latches = thread_latches()) { + Latches::const_iterator end = latches->end(); + for (Latches::const_iterator it = latches->begin(); + it != end; ++it) { + + if (functor(it->m_level)) { + return(true); + } } } - return(functor.result()); + return(false); } /** Removes a latch from the thread level array if it is found there. @@ -1213,13 +1207,12 @@ sync_check_find(latch_level_t level) /** Iterate over the thread's latches. @param[in,out] functor called for each element. -@return false if the sync debug hasn't been initialised -@return the value returned by the functor */ +@return true if the functor returns true for any element */ bool -sync_check_iterate(sync_check_functor_t& functor) +sync_check_iterate(const sync_check_functor_t& functor) { - if (LatchDebug::instance() != NULL) { - return(LatchDebug::instance()->for_each(functor)); + if (LatchDebug* debug = LatchDebug::instance()) { + return(debug->for_each(functor)); } return(false); @@ -1493,8 +1486,6 @@ sync_latch_meta_init() LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK, sync_array_mutex_key); - LATCH_ADD_MUTEX(THREAD_MUTEX, SYNC_NO_ORDER_CHECK, thread_mutex_key); - LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key); LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK, diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index fcc50b8c76d..7854ad2ab5a 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -589,10 +589,6 @@ thd_done: row->trx_foreign_key_error = NULL; } -#ifdef BTR_CUR_HASH_ADAPT - row->trx_has_search_latch = (ibool) trx->has_search_latch; -#endif /* BTR_CUR_HASH_ADAPT */ - row->trx_is_read_only = trx->read_only; row->trx_is_autocommit_non_locking = trx_is_autocommit_non_locking(trx); diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index b21ec75c3a6..e317a38815b 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -61,7 +61,6 @@ trx_undo_rec_t trx_purge_dummy_rec; #ifdef UNIV_DEBUG my_bool srv_purge_view_update_only_debug; -bool trx_commit_disallowed = false; #endif /* UNIV_DEBUG */ /** Sentinel value */ @@ -148,12 +147,10 @@ TrxUndoRsegsIterator::set_next() ut_a(purge_sys->rseg->last_page_no != FIL_NULL); ut_ad(purge_sys->rseg->last_trx_no == m_trx_undo_rsegs.get_trx_no()); - /* We assume in purge of externally stored fields that - space id is in the range of UNDO tablespace space ids - unless space is system tablespace */ - ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open - || is_system_tablespace( - purge_sys->rseg->space)); + /* We assume in purge of externally stored fields that space id is + in the range of UNDO tablespace space ids */ + ut_a(purge_sys->rseg->space == TRX_SYS_SPACE + || srv_is_undo_tablespace(purge_sys->rseg->space)); ut_a(purge_sys->iter.trx_no <= purge_sys->rseg->last_trx_no); @@ -284,7 +281,18 @@ trx_purge_add_update_undo_to_history( hist_size + undo->size, MLOG_4BYTES, mtr); } - ut_ad(!trx_commit_disallowed); + /* Before any transaction-generating background threads or the + purge have been started, recv_recovery_rollback_active() can + start transactions in row_merge_drop_temp_indexes() and + fts_drop_orphaned_tables(), and roll back recovered transactions. + After the purge thread has been given permission to exit, + in fast shutdown, we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd(). */ + ut_ad(srv_undo_sources + || ((srv_startup_is_before_trx_rollback_phase + || trx_rollback_or_clean_is_active) + && purge_sys->state == PURGE_STATE_INIT) + || (trx->undo_no == 0 && srv_fast_shutdown)); /* Add the log as the first in the history list */ flst_add_first(rseg_header + TRX_RSEG_HISTORY, diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index 0ed51a1f80d..735e25a7e1e 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1838,22 +1838,22 @@ transaction. dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, index entry to insert into the - clustered index, otherwise NULL */ + clustered index; in updates, + may contain a clustered index + record tuple that also contains + virtual columns of the table; + otherwise, NULL */ const upd_t* update, /*!< in: in the case of an update, the update vector, otherwise NULL */ ulint cmpl_info, /*!< in: compiler info on secondary index updates */ - const rec_t* rec, /*!< in: in case of an update or delete + const rec_t* rec, /*!< in: case of an update or delete marking, the record in the clustered - index, otherwise NULL */ + index; NULL if insert */ const ulint* offsets, /*!< in: rec_get_offsets(rec) */ roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the inserted undo log record, @@ -1869,18 +1869,9 @@ trx_undo_report_row_operation( #endif /* UNIV_DEBUG */ ut_a(dict_index_is_clust(index)); + ut_ad(!update || rec); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); ut_ad(!srv_read_only_mode); - ut_ad(op_type == TRX_UNDO_INSERT_OP || op_type == TRX_UNDO_MODIFY_OP); - ut_ad((op_type != TRX_UNDO_INSERT_OP) - || (clust_entry && !update && !rec)); - - if (flags & BTR_NO_UNDO_LOG_FLAG) { - - *roll_ptr = 0; - - return(DB_SUCCESS); - } trx = thr_get_trx(thr); @@ -1901,7 +1892,7 @@ trx_undo_report_row_operation( not listed there. */ trx->mod_tables.insert(index->table); - pundo = op_type == TRX_UNDO_INSERT_OP + pundo = !rec ? &trx->rsegs.m_redo.insert_undo : &trx->rsegs.m_redo.update_undo; rseg = trx->rsegs.m_redo.rseg; @@ -1912,7 +1903,7 @@ trx_undo_report_row_operation( if (*pundo) { err = DB_SUCCESS; - } else if (op_type == TRX_UNDO_INSERT_OP || is_temp) { + } else if (!rec || is_temp) { err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_INSERT); } else { err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_UPDATE); @@ -1936,23 +1927,14 @@ trx_undo_report_row_operation( buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE); do { - page_t* undo_page; - ulint offset; - - undo_page = buf_block_get_frame(undo_block); ut_ad(page_no == undo_block->page.id.page_no()); - - switch (op_type) { - case TRX_UNDO_INSERT_OP: - offset = trx_undo_page_report_insert( - undo_page, trx, index, clust_entry, &mtr); - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - offset = trx_undo_page_report_modify( + page_t* undo_page = buf_block_get_frame(undo_block); + ulint offset = !rec + ? trx_undo_page_report_insert( + undo_page, trx, index, clust_entry, &mtr) + : trx_undo_page_report_modify( undo_page, trx, index, rec, offsets, update, cmpl_info, clust_entry, &mtr); - } if (UNIV_UNLIKELY(offset == 0)) { /* The record did not fit on the page. We erase the @@ -2006,8 +1988,7 @@ trx_undo_report_row_operation( mutex_exit(&trx->undo_mutex); *roll_ptr = trx_undo_build_roll_ptr( - op_type == TRX_UNDO_INSERT_OP, - rseg->id, page_no, offset); + !rec, rseg->id, page_no, offset); return(DB_SUCCESS); } diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc index 69f01e64b59..d6857b892da 100644 --- a/storage/innobase/trx/trx0roll.cc +++ b/storage/innobase/trx/trx0roll.cc @@ -869,7 +869,6 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( os_thread_create */ { my_thread_init(); - ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index 663566cf26f..b48f3e18f1b 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -263,7 +263,9 @@ trx_rseg_array_init() } /** Create a persistent rollback segment. -@param[in] space_id system or undo tablespace id */ +@param[in] space_id system or undo tablespace id +@return pointer to new rollback segment +@retval NULL on failure */ trx_rseg_t* trx_rseg_create(ulint space_id) { diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index d9d163a4008..ff99900b5b9 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -608,7 +608,8 @@ trx_sys_create_rsegs() /* Tablespace 0 is the system tablespace. Dedicated undo log tablespaces start from 1. */ ulint space = srv_undo_tablespaces > 0 - ? (i % srv_undo_tablespaces) + 1 + ? (i % srv_undo_tablespaces) + + srv_undo_space_id_start : TRX_SYS_SPACE; if (!trx_rseg_create(space)) { diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 315697403c4..7fdbd808a60 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -272,8 +272,6 @@ struct TrxFactory { ut_a(trx->lock.wait_lock == NULL); ut_a(trx->lock.wait_thr == NULL); - - trx_assert_no_search_latch(trx); ut_a(trx->dict_operation_lock_mode == 0); if (trx->lock.lock_heap != NULL) { @@ -341,9 +339,6 @@ struct TrxFactory { ut_a(trx->lock.wait_thr == NULL); ut_a(trx->lock.wait_lock == NULL); - - trx_assert_no_search_latch(trx); - ut_a(trx->dict_operation_lock_mode == 0); ut_a(UT_LIST_GET_LEN(trx->lock.trx_locks) == 0); @@ -2413,13 +2408,6 @@ state_ok: (ulong) n_rec_locks); } -#ifdef BTR_CUR_HASH_ADAPT - if (trx->has_search_latch) { - newline = TRUE; - fputs(", holds adaptive hash latch", f); - } -#endif /* BTR_CUR_HASH_ADAPT */ - if (trx->undo_no != 0) { newline = TRUE; fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); @@ -2551,11 +2539,6 @@ state_ok: fprintf(f, "que state %lu ", (ulong) trx->lock.que_state); } - if (trx->has_search_latch) { - newline = TRUE; - fputs(", holds adaptive hash latch", f); - } - if (trx->undo_no != 0) { newline = TRUE; fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); diff --git a/storage/innobase/usr/usr0sess.cc b/storage/innobase/usr/usr0sess.cc index 85eca604d80..55ce9500e5c 100644 --- a/storage/innobase/usr/usr0sess.cc +++ b/storage/innobase/usr/usr0sess.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index e7489861473..4eb9d45b0dc 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -38,8 +38,6 @@ Created 5/11/1994 Heikki Tuuri #include <string> #include "log.h" -/** A constant to prevent the compiler from optimizing ut_delay() away. */ -ibool ut_always_false = FALSE; #ifdef _WIN32 /*****************************************************************//** NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix @@ -839,7 +837,7 @@ error::~error() fatal::~fatal() { sql_print_error("[FATAL] InnoDB: %s", m_oss.str().c_str()); - ut_error; + abort(); } error_or_warn::~error_or_warn() @@ -853,8 +851,11 @@ error_or_warn::~error_or_warn() fatal_or_error::~fatal_or_error() { - sql_print_error("InnoDB: %s", m_oss.str().c_str()); - ut_a(!m_fatal); + sql_print_error(m_fatal ? "[FATAL] InnoDB: %s" : "InnoDB: %s", + m_oss.str().c_str()); + if (m_fatal) { + abort(); + } } } // namespace ib diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index cc721b6b04e..fb4fcaed817 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -834,7 +834,10 @@ extern "C" { int _ma_killed_ptr(HA_CHECK *param) { - return thd_killed((THD*)param->thd); + if (likely(thd_killed((THD*)param->thd)) == 0) + return 0; + my_errno= HA_ERR_ABORTED_BY_USER; + return 1; } @@ -1669,8 +1672,11 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize) } if (error && file->create_unique_index_by_sort && share->state.dupp_key != MAX_KEY) + { + my_errno= HA_ERR_FOUND_DUPP_KEY; print_keydup_error(table, &table->key_info[share->state.dupp_key], MYF(0)); + } } else { diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index f92774a0321..84fba63a2f3 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -1364,6 +1364,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, pos=block_info.filepos+block_info.block_len; if (block_info.rec_len > (uint) share->base.max_pack_length) { + my_errno= HA_ERR_WRONG_IN_RECORD; _ma_check_print_error(param,"Found too long record (%lu) at %s", (ulong) block_info.rec_len, llstr(start_recpos,llbuff)); @@ -4220,6 +4221,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, printf("Data records: %s\n", llstr(start_records, llbuff)); } + bzero(&new_data_cache, sizeof(new_data_cache)); if (initialize_variables_for_repair(param, &sort_info, &tmp_sort_param, info, rep_quick, &backup_share)) goto err; @@ -4995,6 +4997,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } b_type= _ma_get_block_info(info, &block_info,-1,pos); @@ -5268,6 +5271,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } sort_param->start_recpos=sort_param->pos; diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 0cf5b2822b1..1db81a0ee1b 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -157,6 +157,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -313,7 +314,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, share->state.open_count= 1; share->changed= 1; _ma_mark_file_changed_now(share); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: { my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index 86e51cc3526..e143ed793e4 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -946,6 +946,7 @@ static File create_logfile_by_number_no_cache(uint32 file_no) { DBUG_PRINT("error", ("Error %d during syncing directory '%s'", errno, log_descriptor.directory)); + mysql_file_close(file, MYF(0)); translog_stop_writing(); DBUG_RETURN(-1); } @@ -1447,17 +1448,16 @@ LSN translog_get_file_max_lsn_stored(uint32 file) if (translog_read_file_header(&info, fd)) { DBUG_PRINT("error", ("Can't read file header")); - DBUG_RETURN(LSN_ERROR); + info.max_lsn= LSN_ERROR; } if (mysql_file_close(fd, MYF(MY_WME))) { DBUG_PRINT("error", ("Can't close file")); - DBUG_RETURN(LSN_ERROR); + info.max_lsn= LSN_ERROR; } - DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", - LSN_IN_PARTS(info.max_lsn))); + DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", LSN_IN_PARTS(info.max_lsn))); DBUG_RETURN(info.max_lsn); } } @@ -1621,13 +1621,15 @@ static my_bool translog_create_new_file() if (allocate_dynamic(&log_descriptor.open_files, log_descriptor.max_file - log_descriptor.min_file + 2)) goto error_lock; - if ((file->handler.file= - create_logfile_by_number_no_cache(file_no)) == -1) + + /* this call just expand the array */ + if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file)) + goto error_lock; + + if ((file->handler.file= create_logfile_by_number_no_cache(file_no)) == -1) goto error_lock; translog_file_init(file, file_no, 0); - /* this call just expand the array */ - insert_dynamic(&log_descriptor.open_files, (uchar*)&file); log_descriptor.max_file++; { char *start= (char*) dynamic_element(&log_descriptor.open_files, 0, @@ -1661,6 +1663,7 @@ error_lock: mysql_rwlock_unlock(&log_descriptor.open_files_lock); error: translog_stop_writing(); + my_free(file); DBUG_RETURN(1); } @@ -3962,11 +3965,14 @@ my_bool translog_init_with_table(const char *directory, /* Start new log system from scratch */ log_descriptor.horizon= MAKE_LSN(start_file_num, TRANSLOG_PAGE_SIZE); /* header page */ - if ((file->handler.file= - create_logfile_by_number_no_cache(start_file_num)) == -1) - goto err; translog_file_init(file, start_file_num, 0); if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file)) + { + my_free(file); + goto err; + } + if ((file->handler.file= + create_logfile_by_number_no_cache(start_file_num)) == -1) goto err; log_descriptor.min_file= log_descriptor.max_file= start_file_num; if (translog_write_file_header()) @@ -7789,8 +7795,24 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, translog_force_current_buffer_to_finish(); translog_buffer_unlock(buffer); } - else if (log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE) + else { + if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE) + { + /* + In this case both last_lsn & prev_last_lsn are LSN_IMPOSSIBLE + otherwise it will go in the first IF because LSN_IMPOSSIBLE less + then any real LSN and cmp_translog_addr(*lsn, + log_descriptor.bc.buffer->prev_last_lsn) will be TRUE + */ + DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn == + LSN_IMPOSSIBLE); + DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing")); + translog_unlock(); + DBUG_VOID_RETURN; + } + + DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE); /* fix lsn if it was horizon */ *lsn= log_descriptor.bc.buffer->prev_last_lsn; DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: (%lu,0x%lx)", @@ -7799,13 +7821,6 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, TRANSLOG_BUFFERS_NO); translog_unlock(); } - else if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE) - { - DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing")); - translog_unlock(); - DBUG_VOID_RETURN; - } - /* flush buffers */ *sent_to_disk= translog_get_sent_to_disk(); if (cmp_translog_addr(*lsn, *sent_to_disk) > 0) diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c index 5243d55428c..861023a0064 100644 --- a/storage/maria/ma_packrec.c +++ b/storage/maria/ma_packrec.c @@ -1445,7 +1445,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, maria->blob_length=info->blob_len; } info->filepos=filepos+head_length; - if (file > 0) + if (file >= 0) { info->offset=MY_MIN(info->rec_len, ref_length - head_length); memcpy(*rec_buff_p, header + head_length, info->offset); diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 1eebfac03f8..caaaacfc9b1 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -1187,14 +1187,14 @@ void end_pagecache(PAGECACHE *pagecache, my_bool cleanup) pagecache->blocks_changed= 0; } - DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %lu " - "writes: %lu r_requests: %lu reads: %lu", - (ulong) pagecache->blocks_used, - (ulong) pagecache->global_blocks_changed, - (ulong) pagecache->global_cache_w_requests, - (ulong) pagecache->global_cache_write, - (ulong) pagecache->global_cache_r_requests, - (ulong) pagecache->global_cache_read)); + DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %llu " + "writes: %llu r_requests: %llu reads: %llu", + pagecache->blocks_used, + pagecache->global_blocks_changed, + pagecache->global_cache_w_requests, + pagecache->global_cache_write, + pagecache->global_cache_r_requests, + pagecache->global_cache_read)); if (cleanup) { diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 08d306a89be..d0e0210db64 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -1988,7 +1988,7 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT) const HA_KEYSEG *keyseg= info->s->keyinfo[keynr].seg; ulonglong value; char llbuf[22]; - uchar *to; + uchar reversed[MARIA_MAX_KEY_BUFF], *to; tprint(tracef, " state older than record\n"); /* we read the record to find the auto_increment value */ enlarge_buffer(rec); @@ -2005,7 +2005,6 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT) if (keyseg->flag & HA_SWAP_KEY) { /* We put key from log record to "data record" packing format... */ - uchar reversed[MARIA_MAX_KEY_BUFF]; uchar *key_ptr= to; uchar *key_end= key_ptr + keyseg->length; to= reversed + keyseg->length; @@ -3061,7 +3060,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const case LOGREC_REDO_INDEX: case LOGREC_REDO_INDEX_FREE_PAGE: index_page_redo_entry= 1; - /* Fall trough*/ + /* Fall through */ case LOGREC_REDO_INSERT_ROW_HEAD: case LOGREC_REDO_INSERT_ROW_TAIL: case LOGREC_REDO_PURGE_ROW_HEAD: diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 3c6684623e9..2c9f9a2a315 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -577,7 +577,10 @@ extern "C" { int killed_ptr(HA_CHECK *param) { - return thd_killed((THD*)param->thd); + if (likely(thd_killed((THD*)param->thd)) == 0) + return 0; + my_errno= HA_ERR_ABORTED_BY_USER; + return 1; } void mi_check_print_error(HA_CHECK *param, const char *fmt,...) @@ -850,6 +853,10 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) /* Count statistics of usage for newly open normal files */ if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) { + /* use delay_key_write from .frm, not .MYI */ + file->s->delay_key_write= delay_key_write_options == DELAY_KEY_WRITE_ALL || + (delay_key_write_options == DELAY_KEY_WRITE_ON && + table->s->db_create_options & HA_OPTION_DELAY_KEY_WRITE); if (file->s->delay_key_write) feature_files_opened_with_delayed_keys++; } @@ -1176,9 +1183,6 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) share->state.dupp_key= MI_MAX_KEY; strmov(fixed_name,file->filename); - // Release latches since this can take a long time - ha_release_temporary_latches(thd); - /* Don't lock tables if we have used LOCK TABLE or if we come from enable_index() @@ -1214,6 +1218,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) if (remap) mi_munmap_file(file); #endif + /* + The following is to catch errors when my_errno is no set properly + during repairt + */ + my_errno= 0; if (mi_test_if_sort_rep(file,file->state->records,tmp_key_map,0) && (local_testflag & T_REP_BY_SORT)) { @@ -1236,8 +1245,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) } if (error && file->create_unique_index_by_sort && share->state.dupp_key != MAX_KEY) + { + my_errno= HA_ERR_FOUND_DUPP_KEY; print_keydup_error(table, &table->key_info[share->state.dupp_key], MYF(0)); + } } else { @@ -1328,6 +1340,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) { KEY_CACHE *new_key_cache= check_opt->key_cache; const char *errmsg= 0; + char buf[STRING_BUFFER_USUAL_SIZE]; int error= HA_ADMIN_OK; ulonglong map; TABLE_LIST *table_list= table->pos_in_table_list; @@ -1344,7 +1357,6 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) if ((error= mi_assign_to_key_cache(file, map, new_key_cache))) { - char buf[STRING_BUFFER_USUAL_SIZE]; my_snprintf(buf, sizeof(buf), "Failed to flush to index file (errno: %d)", error); errmsg= buf; @@ -2369,10 +2381,8 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *create_info, table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet return COMPATIBLE_DATA_NO; - if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM | - HA_OPTION_DELAY_KEY_WRITE)) != - (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM | - HA_OPTION_DELAY_KEY_WRITE))) + if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM)) != + (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM))) return COMPATIBLE_DATA_NO; return COMPATIBLE_DATA_YES; } diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index e0016eca43f..b65bb8b78bd 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -3126,6 +3126,7 @@ static int sort_key_read(MI_SORT_PARAM *sort_param, void *key) } if (info->state->records == sort_info->max_records) { + my_errno= HA_ERR_WRONG_IN_RECORD; mi_check_print_error(sort_info->param, "Key %d - Found too many records; Can't continue", sort_param->key+1); @@ -3332,6 +3333,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } b_type=_mi_get_block_info(&block_info,-1,pos); @@ -3590,6 +3592,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } sort_param->start_recpos=sort_param->pos; diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index 8a2bc82d166..d0ec858439c 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -150,6 +150,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -262,7 +263,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) share->deleting= TRUE; share->global_changed= FALSE; /* force writing changed flag */ _mi_mark_file_changed(info); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: DBUG_ASSERT(!share->temporary); mysql_mutex_lock(&THR_LOCK_myisam); diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index 531b800c63e..1921926463e 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -29,7 +29,7 @@ static void mi_update_status_with_lock(MI_INFO *info); int mi_lock_database(MI_INFO *info, int lock_type) { - int error; + int error, mark_crashed= 0; uint count; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_lock_database"); @@ -52,6 +52,7 @@ int mi_lock_database(MI_INFO *info, int lock_type) } error= 0; + DBUG_EXECUTE_IF ("mi_lock_database_failure", error= EINVAL;); mysql_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { @@ -75,17 +76,15 @@ int mi_lock_database(MI_INFO *info, int lock_type) &share->dirty_part_map, FLUSH_KEEP)) { - error=my_errno; + mark_crashed= error=my_errno; mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { - error=my_errno; + mark_crashed= error=my_errno; mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); } } if (!count) @@ -110,22 +109,19 @@ int mi_lock_database(MI_INFO *info, int lock_type) share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) - error=my_errno; + mark_crashed= error=my_errno; share->changed=0; if (myisam_flush) { if (mysql_file_sync(share->kfile, MYF(0))) - error= my_errno; + mark_crashed= error= my_errno; if (mysql_file_sync(info->dfile, MYF(0))) - error= my_errno; + mark_crashed= error= my_errno; } else share->not_flushed=1; if (error) - { mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); - } } if (info->lock_type != F_EXTRA_LCK) { @@ -260,6 +256,8 @@ int mi_lock_database(MI_INFO *info, int lock_type) } #endif mysql_mutex_unlock(&share->intern_lock); + if (mark_crashed) + mi_mark_crashed(info); DBUG_RETURN(error); } /* mi_lock_database */ diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index ef47265a18b..41b0e18da02 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -151,7 +151,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } share->mode=open_mode; errpos=1; - if (mysql_file_read(kfile, share->state.header.file_version, head_length, + if (mysql_file_read(kfile, (uchar*)&share->state.header, head_length, MYF(MY_NABP))) { my_errno= HA_ERR_NOT_A_TABLE; diff --git a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff index 4215af58011..a8c78b117a9 100644 --- a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff +++ b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff @@ -13,7 +13,7 @@ -2 -ALTER TABLE t1 DISCARD TABLESPACE; -SELECT a FROM t1; --ERROR HY000: Tablespace has been discarded for table 't1' +-ERROR HY000: Tablespace has been discarded for table `t1` -ALTER TABLE t1 IMPORT TABLESPACE; -Warnings: -Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff index 19ca1a1b6e1..e5462f8cb1f 100644 --- a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff @@ -13,7 +13,7 @@ -2 -ALTER TABLE t1 DISCARD TABLESPACE; -SELECT a FROM t1; --ERROR HY000: Tablespace has been discarded for table 't1' +-ERROR HY000: Tablespace has been discarded for table `t1` -ALTER TABLE t1 IMPORT TABLESPACE; -Warnings: -Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff index 585e5c915ba..4c98e62625a 100644 --- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff @@ -29,14 +29,14 @@ -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( -- `1` bigint(20) NOT NULL DEFAULT '0' +- `1` bigint(20) NOT NULL DEFAULT 0 -) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 -SELECT * FROM t1; -1 -1 -2 -DROP TABLE t1; -+ERROR HY000: 'test.t1' is not BASE TABLE ++ERROR HY000: 'test.t1' is not of type 'BASE TABLE' +# ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed) +# ------------ UNEXPECTED RESULT ------------ +# The statement|command finished with ER_WRONG_OBJECT. diff --git a/storage/myisammrg/mysql-test/storage_engine/disabled.def b/storage/myisammrg/mysql-test/storage_engine/disabled.def new file mode 100644 index 00000000000..227e33029d8 --- /dev/null +++ b/storage/myisammrg/mysql-test/storage_engine/disabled.def @@ -0,0 +1,3 @@ +insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock +lock_concurrent : MDEV-12882 - Assertion failure +select_high_prio : MDEV-12885 - MDL_SHARED_READ_ONLY is taken instead of MDL_SHARED_READ diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff index d7bf99fd674..4346545abcf 100644 --- a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff @@ -1,6 +1,6 @@ ---- repair_table.result 2013-01-23 01:35:44.388267080 +0400 -+++ repair_table.reject 2013-01-23 03:16:26.468307847 +0400 -@@ -1,234 +1,114 @@ +--- suite/storage_engine/parts/repair_table.result 2017-05-20 03:58:19.451939791 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/parts/repair_table.reject 2017-05-24 02:42:31.130318292 +0300 +@@ -1,234 +1,115 @@ call mtr.add_suppression("Table '.*t1.*' is marked as crashed and should be repaired"); DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2; @@ -144,6 +144,7 @@ call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); call mtr.add_suppression(" '\..test.t1'"); call mtr.add_suppression("Couldn't repair table: test.t1"); ++call mtr.add_suppression("Table 't1' is marked as crashed.*"); CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2; +ERROR HY000: Engine cannot be used in partitioned tables +# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed) diff --git a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff index 9ff8f906511..79f6c7040e0 100644 --- a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff @@ -1,5 +1,5 @@ ---- repair_table.result 2013-01-23 01:26:05.995538460 +0400 -+++ repair_table.reject 2013-01-23 02:50:55.035560564 +0400 +--- suite/storage_engine/repair_table.result 2017-05-24 01:09:07.274213486 +0300 ++++ suite/storage_engine/repair_table.reject 2017-05-24 01:10:25.466214949 +0300 @@ -4,56 +4,50 @@ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; REPAIR TABLE t1; @@ -71,7 +71,7 @@ DROP TABLE t1, t2; call mtr.add_suppression("Got an error from thread_id=.*"); call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); -@@ -62,45 +56,32 @@ +@@ -63,45 +57,32 @@ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; REPAIR TABLE t1; Table Op Msg_type Msg_text @@ -104,7 +104,7 @@ -test.t1 check error Corrupt +test.t1 check status OK SELECT a,b FROM t1; --ERROR HY000: Incorrect key file for table 't1'; try to repair it +-ERROR HY000: Index for table 't1' is corrupt; try to repair it -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file -INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff index e6055278b3c..671e26ec617 100644 --- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff @@ -1,18 +1,18 @@ ---- tbl_opt_data_index_dir.result 2013-01-22 22:05:05.246633000 +0400 -+++ tbl_opt_data_index_dir.reject 2013-01-23 02:50:59.951498762 +0400 -@@ -4,7 +4,7 @@ +--- suite/storage_engine/tbl_opt_data_dir.result 2017-05-24 00:21:15.550159778 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.reject 2017-05-24 00:25:45.506164827 +0300 +@@ -5,7 +5,7 @@ t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL --) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>' +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + # For ALTER TABLE the option is ignored + # Running ALTER TABLE .. DATA DIRECTORY = <> Warnings: - Warning 1618 <INDEX DIRECTORY> option ignored - SHOW CREATE TABLE t1; -@@ -12,5 +12,5 @@ +@@ -15,5 +15,5 @@ t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL --) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR_1>' +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff new file mode 100644 index 00000000000..ca025861f68 --- /dev/null +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff @@ -0,0 +1,18 @@ +--- suite/storage_engine/tbl_opt_index_dir.result 2017-05-24 00:21:15.550159778 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.reject 2017-05-24 00:25:45.506164827 +0300 +@@ -5,7 +5,7 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>' ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + # For ALTER TABLE the option is ignored + # Running ALTER TABLE .. INDEX DIRECTORY = <> + Warnings: +@@ -15,5 +15,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INDEX DIRECTORY='<INDEX_DIR_1>' ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff index f7e0905d4e7..6c756e7b8e1 100644 --- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff @@ -1,17 +1,33 @@ ---- tbl_opt_row_format.result 2013-01-22 22:05:05.246633000 +0400 -+++ tbl_opt_row_format.reject 2013-01-23 02:51:04.743438518 +0400 -@@ -5,12 +5,12 @@ +--- ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:50:44.254192857 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:50:44.334192859 +0300 +@@ -5,26 +5,26 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + ALTER TABLE t1 ROW_FORMAT=FIXED; + SHOW CREATE TABLE t1; + Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL -) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED +) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED INSERT_METHOD=LAST UNION=(`mrg`.`t1`) - ALTER TABLE t1 ROW_FORMAT=DYNAMIC; + ALTER TABLE t1 ROW_FORMAT=PAGE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL --) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC -+) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`) +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + ALTER TABLE t1 ROW_FORMAT=COMPACT; + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT INSERT_METHOD=LAST UNION=(`mrg`.`t1`) DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff index c7372326fad..d537967ef99 100644 --- a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff @@ -6,12 +6,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # VIRTUAL +-b int(11) # # VIRTUAL GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -23,12 +23,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # PERSISTENT +-b int(11) # # STORED GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -40,12 +40,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # VIRTUAL +-b int(11) # # VIRTUAL GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -57,12 +57,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # PERSISTENT +-b int(11) # # STORED GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -70,11 +70,11 @@ -3 4 -4 5 -DROP TABLE t1; -+ERROR HY000: MRG_MyISAM storage engine does not support computed columns -+# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed) ++ERROR HY000: MRG_MyISAM storage engine does not support generated columns ++# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS (expected to succeed) +# ------------ UNEXPECTED RESULT ------------ +# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ] -+# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS. ++# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS. +# Virtual columns or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. +# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. +# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc index ecce3c5e627..e8e9581d54b 100644 --- a/storage/perfschema/ha_perfschema.cc +++ b/storage/perfschema/ha_perfschema.cc @@ -214,7 +214,7 @@ maria_declare_plugin(perfschema) 0x0001, pfs_status_vars, NULL, - "5.6.33", + "5.6.36", MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 58704c87b74..c411025d148 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -3953,9 +3953,11 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker, switch (state->m_operation) { case PSI_FILE_STAT: + case PSI_FILE_RENAME: break; case PSI_FILE_STREAM_OPEN: case PSI_FILE_CREATE: + case PSI_FILE_OPEN: if (result != NULL) { PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class); @@ -3966,7 +3968,6 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker, state->m_file= reinterpret_cast<PSI_file*> (pfs_file); } break; - case PSI_FILE_OPEN: default: DBUG_ASSERT(false); break; diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc index 3330c29795f..86b05f37fd2 100644 --- a/storage/perfschema/pfs_digest.cc +++ b/storage/perfschema/pfs_digest.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -257,10 +257,11 @@ search: if (safe_index == 0) { /* Record [0] is reserved. */ - safe_index= 1; + continue; } /* Add a new record in digest stat array. */ + DBUG_ASSERT(safe_index < digest_max); pfs= &statements_digest_stat_array[safe_index]; if (pfs->m_lock.is_free()) diff --git a/storage/perfschema/unittest/pfs-t.cc b/storage/perfschema/unittest/pfs-t.cc index f76b1aa2e75..b8814f2ad2d 100644 --- a/storage/perfschema/unittest/pfs-t.cc +++ b/storage/perfschema/unittest/pfs-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -27,6 +27,8 @@ #include "stub_print_error.h" #include "stub_pfs_defaults.h" +void unload_performance_schema(); + /* test helpers, to simulate the setup */ void setup_thread(PSI_thread *t, bool enabled) @@ -126,7 +128,7 @@ void test_bootstrap() psi_2= boot->get_interface(PSI_VERSION_2); ok(psi_2 == NULL, "version 2"); - shutdown_performance_schema(); + unload_performance_schema(); } /* @@ -183,6 +185,27 @@ PSI * load_perfschema() return (PSI*) psi; } +void unload_performance_schema() +{ + cleanup_table_share(); + cleanup_instruments(); + cleanup_sync_class(); + cleanup_thread_class(); + cleanup_table_share(); + cleanup_file_class(); + cleanup_stage_class(); + cleanup_statement_class(); + cleanup_socket_class(); + cleanup_events_waits_history_long(); + cleanup_events_stages_history_long(); + cleanup_events_statements_history_long(); + cleanup_table_share_hash(); + cleanup_file_hash(); + cleanup_digest(); + + shutdown_performance_schema(); +} + void test_bad_registration() { PSI *psi; @@ -581,8 +604,7 @@ void test_bad_registration() psi->register_socket("X", bad_socket_3, 1); ok(dummy_socket_key == 2, "assigned key"); - - shutdown_performance_schema(); + unload_performance_schema(); } void test_init_disabled() @@ -1016,7 +1038,7 @@ void test_init_disabled() socket_A1= psi->init_socket(99, NULL, NULL, 0); ok(socket_A1 == NULL, "broken socket key not instrumented"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_locker_disabled() @@ -1316,14 +1338,14 @@ void test_locker_disabled() /* Pretend the socket does not have a thread owner */ /* ---------------------------------------------- */ - psi->delete_current_thread(); socket_class_A->m_enabled= true; socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0); ok(socket_A1 != NULL, "instrumented"); /* Socket thread owner has not been set */ socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12); - ok(socket_locker == NULL, "no locker (no thread owner)"); - + ok(socket_locker != NULL, "locker (owner not used)"); + psi->end_socket_wait(socket_locker, 10); + /* Pretend the running thread is not instrumented */ /* ---------------------------------------------- */ @@ -1351,7 +1373,7 @@ void test_locker_disabled() socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12); ok(socket_locker == NULL, "no locker"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_file_instrumentation_leak() @@ -1438,7 +1460,7 @@ void test_file_instrumentation_leak() file_locker= psi->get_thread_file_descriptor_locker(&file_state, (File) 12, PSI_FILE_WRITE); ok(file_locker == NULL, "no locker, no leak"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_enabled() @@ -1474,7 +1496,7 @@ void test_enabled() { & cond_key_B, "C-B", 0} }; - shutdown_performance_schema(); + unload_performance_schema(); #endif } @@ -1644,5 +1666,5 @@ int main(int argc, char **argv) MY_INIT(argv[0]); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_account-oom-t.cc b/storage/perfschema/unittest/pfs_account-oom-t.cc index f1cd5069b54..a87588487cb 100644 --- a/storage/perfschema/unittest/pfs_account-oom-t.cc +++ b/storage/perfschema/unittest/pfs_account-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -108,6 +108,6 @@ int main(int, char **) MY_INIT("pfs_account-oom-t"); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_connect_attr-t.cc b/storage/perfschema/unittest/pfs_connect_attr-t.cc index 7bee1d063a1..ecf790eeede 100644 --- a/storage/perfschema/unittest/pfs_connect_attr-t.cc +++ b/storage/perfschema/unittest/pfs_connect_attr-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -341,5 +341,5 @@ int main(int, char **) diag("skipping the cp1251 tests : missing character set"); plan(59 + (cs_cp1251 ? 10 : 0)); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_host-oom-t.cc b/storage/perfschema/unittest/pfs_host-oom-t.cc index c72162038ca..c089083e4ae 100644 --- a/storage/perfschema/unittest/pfs_host-oom-t.cc +++ b/storage/perfschema/unittest/pfs_host-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -108,6 +108,6 @@ int main(int, char **) MY_INIT("pfs_host-oom-t"); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr-oom-t.cc b/storage/perfschema/unittest/pfs_instr-oom-t.cc index 93cddb0de6c..888acfab744 100644 --- a/storage/perfschema/unittest/pfs_instr-oom-t.cc +++ b/storage/perfschema/unittest/pfs_instr-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -355,6 +355,11 @@ void test_oom() rc= init_instruments(& param); ok(rc == 1, "oom (per thread wait)"); + cleanup_sync_class(); + cleanup_thread_class(); + cleanup_file_class(); + cleanup_instruments(); + param.m_enabled= true; param.m_mutex_class_sizing= 0; param.m_rwlock_class_sizing= 0; @@ -432,6 +437,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (thread stages history sizing)"); + + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -467,6 +474,9 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (per thread stages)"); + + cleanup_stage_class(); + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -502,6 +512,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (thread statements history sizing)"); + + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -537,6 +549,9 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (per thread statements)"); + + cleanup_statement_class(); + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -572,6 +587,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (global waits)"); + + cleanup_sync_class(); cleanup_instruments(); param.m_enabled= true; @@ -609,8 +626,10 @@ void test_oom() ok(rc == 0, "init stage class"); rc= init_instruments(& param); ok(rc == 1, "oom (global stages)"); - cleanup_instruments(); + + cleanup_sync_class(); cleanup_stage_class(); + cleanup_instruments(); param.m_enabled= true; param.m_mutex_class_sizing= 10; @@ -647,8 +666,10 @@ void test_oom() ok(rc == 0, "init statement class"); rc= init_instruments(& param); ok(rc == 1, "oom (global statements)"); - cleanup_instruments(); + + cleanup_sync_class(); cleanup_statement_class(); + cleanup_instruments(); } void do_all_tests() @@ -662,6 +683,6 @@ int main(int argc, char **argv) MY_INIT(argv[0]); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr-t.cc b/storage/perfschema/unittest/pfs_instr-t.cc index c191b3dc41a..659f410e283 100644 --- a/storage/perfschema/unittest/pfs_instr-t.cc +++ b/storage/perfschema/unittest/pfs_instr-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -23,10 +23,11 @@ #include <memory.h> +PFS_global_param param; + void test_no_instruments() { int rc; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -86,7 +87,6 @@ void test_no_instances() PFS_file *file; PFS_socket *socket; PFS_table *table; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -227,7 +227,6 @@ void test_with_instances() PFS_socket *socket_2; PFS_table *table_1; PFS_table *table_2; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -414,6 +413,6 @@ int main(int argc, char **argv) MY_INIT(argv[0]); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc index 708e08be6d4..c9b87b9cf1e 100644 --- a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc +++ b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -64,6 +64,6 @@ int main(int argc, char **argv) MY_INIT(argv[0]); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr_class-t.cc b/storage/perfschema/unittest/pfs_instr_class-t.cc index 19a8989b34e..dcbc3927eae 100644 --- a/storage/perfschema/unittest/pfs_instr_class-t.cc +++ b/storage/perfschema/unittest/pfs_instr_class-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -670,5 +670,5 @@ int main(int argc, char **argv) MY_INIT(argv[0]); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_misc-t.cc b/storage/perfschema/unittest/pfs_misc-t.cc index a4b11b9a727..eed9039dfb2 100644 --- a/storage/perfschema/unittest/pfs_misc-t.cc +++ b/storage/perfschema/unittest/pfs_misc-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -67,6 +67,6 @@ int main(int, char **) MY_INIT("pfs_misc-t"); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_timer-t.cc b/storage/perfschema/unittest/pfs_timer-t.cc index 8fb3a206ebf..55113860532 100644 --- a/storage/perfschema/unittest/pfs_timer-t.cc +++ b/storage/perfschema/unittest/pfs_timer-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -115,6 +115,6 @@ int main(int, char **) MY_INIT("pfs_timer-t"); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_user-oom-t.cc b/storage/perfschema/unittest/pfs_user-oom-t.cc index 9fb64d130ae..e153b39cbd2 100644 --- a/storage/perfschema/unittest/pfs_user-oom-t.cc +++ b/storage/perfschema/unittest/pfs_user-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -107,6 +107,6 @@ int main(int, char **) MY_INIT("pfs_user-oom-t"); do_all_tests(); my_end(0); - return exit_status(); + return (exit_status()); } diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 8742a0794a4..9974baaeed0 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -751,7 +751,7 @@ public: interface Rdb_tables_scanner { virtual int add_table(Rdb_tbl_def * tdef) = 0; - virtual ~Rdb_tables_scanner() {} + virtual ~Rdb_tables_scanner() {} /* Keep the compiler happy */ }; /* diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index e9eeb802568..be57c04806b 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -216,7 +216,9 @@ enum ESphGroupBy SPH_GROUPBY_WEEK = 1, ///< group by week SPH_GROUPBY_MONTH = 2, ///< group by month SPH_GROUPBY_YEAR = 3, ///< group by year - SPH_GROUPBY_ATTR = 4 ///< group by attribute value + SPH_GROUPBY_ATTR = 4, ///< group by attribute value + SPH_GROUPBY_ATTRPAIR = 5, ///< group by sequential attrs pair (rendered redundant by 64bit attrs support; removed) + SPH_GROUPBY_MULTIPLE = 6 ///< group by on multiple attribute values }; /// known attribute types @@ -911,7 +913,7 @@ bool sphinx_show_status ( THD * thd ) } // show last error or warning (either in addition to stats, or on their own) - if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] ) + if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] ) { const char * sMessageType = pTls->m_pHeadTable->m_tStats.m_bLastError ? "error" : "warning"; @@ -1563,6 +1565,7 @@ bool CSphSEQuery::ParseField ( char * sField ) { "month:", SPH_GROUPBY_MONTH }, { "year:", SPH_GROUPBY_YEAR }, { "attr:", SPH_GROUPBY_ATTR }, + { "multi:", SPH_GROUPBY_MULTIPLE } }; int i; diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 96d323a3492..ce425e9a65e 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1846,8 +1846,8 @@ int spider_db_append_key_where_internal( #if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000 case HA_READ_PREFIX_LAST: result_list->desc_flg = TRUE; - /* fall through */ #endif + /* fall through */ case HA_READ_KEY_EXACT: if (sql_kind == SPIDER_SQL_KIND_SQL) { diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 7724eb067e9..56931f47f24 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -516,7 +516,6 @@ int spider_free_share_alloc( ) { int roop_count; DBUG_ENTER("spider_free_share_alloc"); - if (share->dbton_bitmap) { for (roop_count = SPIDER_DBTON_SIZE - 1; roop_count >= 0; roop_count--) { diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index ac8e5a11e2a..c02375ceb2b 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(TOKUDB_VERSION 5.6.35-80.0) +SET(TOKUDB_VERSION 5.6.36-82.0) # PerconaFT only supports x86-64 and cmake-2.8.9+ IF(CMAKE_VERSION VERSION_LESS "2.8.9") MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB") @@ -21,7 +21,8 @@ SET(TOKUDB_SOURCES tokudb_background.cc tokudb_information_schema.cc tokudb_sysvars.cc - tokudb_thread.cc) + tokudb_thread.cc + tokudb_dir_cmd.cc) MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY COMPONENT tokudb-engine) diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc index 0145d631839..6f0b7c5f419 100644 --- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc +++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc @@ -426,6 +426,9 @@ static void print_db_env_struct (void) { "bool (*set_dir_per_db)(DB_ENV *, bool new_val)", "bool (*get_dir_per_db)(DB_ENV *)", "const char *(*get_data_dir)(DB_ENV *env)", + "int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)", + "int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)", + "int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)", "void (*kill_waiter)(DB_ENV *, void *extra)", NULL}; diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc index eba9aa33e9f..d3a5a6c9e91 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.cc +++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc @@ -3901,25 +3901,34 @@ struct keyrange_compare_s { }; // TODO: Remove me, I'm boring -static int keyrange_compare(DBT const &kdbt, const struct keyrange_compare_s &s) { +static int keyrange_compare(DBT const &kdbt, + const struct keyrange_compare_s &s) { return s.ft->cmp(&kdbt, s.key); } -static void -keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, - DBT* key_left, DBT* key_right, - int left_child_number, int right_child_number, uint64_t estimated_num_rows, - uint64_t *less, uint64_t* equal_left, uint64_t* middle, - uint64_t* equal_right, uint64_t* greater, bool* single_basement_node) +static void keysrange_in_leaf_partition(FT_HANDLE ft_handle, + FTNODE node, + DBT *key_left, + DBT *key_right, + int left_child_number, + int right_child_number, + uint64_t estimated_num_rows, + uint64_t *less, + uint64_t *equal_left, + uint64_t *middle, + uint64_t *equal_right, + uint64_t *greater, + bool *single_basement_node) // If the partition is in main memory then estimate the number // Treat key_left == NULL as negative infinity // Treat key_right == NULL as positive infinity { - paranoid_invariant(node->height == 0); // we are in a leaf + paranoid_invariant(node->height == 0); // we are in a leaf paranoid_invariant(!(key_left == NULL && key_right != NULL)); paranoid_invariant(left_child_number <= right_child_number); bool single_basement = left_child_number == right_child_number; - paranoid_invariant(!single_basement || (BP_STATE(node, left_child_number) == PT_AVAIL)); + paranoid_invariant(!single_basement || + (BP_STATE(node, left_child_number) == PT_AVAIL)); if (BP_STATE(node, left_child_number) == PT_AVAIL) { int r; // The partition is in main memory then get an exact count. @@ -3927,29 +3936,35 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, BASEMENTNODE bn = BLB(node, left_child_number); uint32_t idx_left = 0; // if key_left is NULL then set r==-1 and idx==0. - r = key_left ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(s_left, nullptr, nullptr, nullptr, &idx_left) : -1; + r = key_left + ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>( + s_left, nullptr, nullptr, nullptr, &idx_left) + : -1; *less = idx_left; - *equal_left = (r==0) ? 1 : 0; + *equal_left = (r == 0) ? 1 : 0; uint32_t size = bn->data_buffer.num_klpairs(); uint32_t idx_right = size; r = -1; if (single_basement && key_right) { struct keyrange_compare_s s_right = {ft_handle->ft, key_right}; - r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(s_right, nullptr, nullptr, nullptr, &idx_right); + r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>( + s_right, nullptr, nullptr, nullptr, &idx_right); } *middle = idx_right - idx_left - *equal_left; - *equal_right = (r==0) ? 1 : 0; + *equal_right = (r == 0) ? 1 : 0; *greater = size - idx_right - *equal_right; } else { paranoid_invariant(!single_basement); uint32_t idx_left = estimated_num_rows / 2; if (!key_left) { - //Both nullptr, assume key_left belongs before leftmost entry, key_right belongs after rightmost entry + // Both nullptr, assume key_left belongs before leftmost entry, + // key_right belongs after rightmost entry idx_left = 0; paranoid_invariant(!key_right); } - // Assume idx_left and idx_right point to where key_left and key_right belong, (but are not there). + // Assume idx_left and idx_right point to where key_left and key_right + // belong, (but are not there). *less = idx_left; *equal_left = 0; *middle = estimated_num_rows - idx_left; @@ -3959,44 +3974,76 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, *single_basement_node = single_basement; } -static int -toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, - DBT* key_left, DBT* key_right, bool may_find_right, - uint64_t* less, uint64_t* equal_left, uint64_t* middle, - uint64_t* equal_right, uint64_t* greater, bool* single_basement_node, - uint64_t estimated_num_rows, - ftnode_fetch_extra *min_bfe, // set up to read a minimal read. - ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it - struct unlockers *unlockers, ANCESTORS ancestors, const pivot_bounds &bounds) -// Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in. +static int toku_ft_keysrange_internal( + FT_HANDLE ft_handle, + FTNODE node, + DBT *key_left, + DBT *key_right, + bool may_find_right, + uint64_t *less, + uint64_t *equal_left, + uint64_t *middle, + uint64_t *equal_right, + uint64_t *greater, + bool *single_basement_node, + uint64_t estimated_num_rows, + ftnode_fetch_extra *min_bfe, // set up to read a minimal read. + ftnode_fetch_extra + *match_bfe, // set up to read a basement node iff both keys in it + struct unlockers *unlockers, + ANCESTORS ancestors, + const pivot_bounds &bounds) +// Implementation note: Assign values to less, equal, and greater, and then on +// the way out (returning up the stack) we add more values in. { int r = 0; // if KEY is NULL then use the leftmost key. - int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, ft_handle->ft->cmp) : 0; - int right_child_number = node->n_children; // Sentinel that does not equal left_child_number. + int left_child_number = + key_left ? toku_ftnode_which_child(node, key_left, ft_handle->ft->cmp) + : 0; + int right_child_number = + node->n_children; // Sentinel that does not equal left_child_number. if (may_find_right) { - right_child_number = key_right ? toku_ftnode_which_child (node, key_right, ft_handle->ft->cmp) : node->n_children - 1; + right_child_number = + key_right + ? toku_ftnode_which_child(node, key_right, ft_handle->ft->cmp) + : node->n_children - 1; } uint64_t rows_per_child = estimated_num_rows / node->n_children; if (node->height == 0) { - keysrange_in_leaf_partition(ft_handle, node, key_left, key_right, left_child_number, right_child_number, - rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node); - - *less += rows_per_child * left_child_number; + keysrange_in_leaf_partition(ft_handle, + node, + key_left, + key_right, + left_child_number, + right_child_number, + rows_per_child, + less, + equal_left, + middle, + equal_right, + greater, + single_basement_node); + + *less += rows_per_child * left_child_number; if (*single_basement_node) { - *greater += rows_per_child * (node->n_children - left_child_number - 1); + *greater += + rows_per_child * (node->n_children - left_child_number - 1); } else { - *middle += rows_per_child * (node->n_children - left_child_number - 1); + *middle += + rows_per_child * (node->n_children - left_child_number - 1); } } else { // do the child. struct ancestors next_ancestors = {node, left_child_number, ancestors}; BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number); - uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, left_child_number); + uint32_t fullhash = + compute_child_fullhash(ft_handle->ft->cf, node, left_child_number); FTNODE childnode; bool msgs_applied = false; - bool child_may_find_right = may_find_right && left_child_number == right_child_number; + bool child_may_find_right = + may_find_right && left_child_number == right_child_number; r = toku_pin_ftnode_for_query( ft_handle, childblocknum, @@ -4007,27 +4054,45 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, child_may_find_right ? match_bfe : min_bfe, false, &childnode, - &msgs_applied - ); + &msgs_applied); paranoid_invariant(!msgs_applied); if (r != TOKUDB_TRY_AGAIN) { assert_zero(r); - struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false}; - struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers}; - const pivot_bounds next_bounds = bounds.next_bounds(node, left_child_number); - - r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right, - less, equal_left, middle, equal_right, greater, single_basement_node, - rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, next_bounds); + struct unlock_ftnode_extra unlock_extra = { + ft_handle, childnode, false}; + struct unlockers next_unlockers = { + true, unlock_ftnode_fun, (void *)&unlock_extra, unlockers}; + const pivot_bounds next_bounds = + bounds.next_bounds(node, left_child_number); + + r = toku_ft_keysrange_internal(ft_handle, + childnode, + key_left, + key_right, + child_may_find_right, + less, + equal_left, + middle, + equal_right, + greater, + single_basement_node, + rows_per_child, + min_bfe, + match_bfe, + &next_unlockers, + &next_ancestors, + next_bounds); if (r != TOKUDB_TRY_AGAIN) { assert_zero(r); - *less += rows_per_child * left_child_number; + *less += rows_per_child * left_child_number; if (*single_basement_node) { - *greater += rows_per_child * (node->n_children - left_child_number - 1); + *greater += rows_per_child * + (node->n_children - left_child_number - 1); } else { - *middle += rows_per_child * (node->n_children - left_child_number - 1); + *middle += rows_per_child * + (node->n_children - left_child_number - 1); } assert(unlockers->locked); @@ -4038,10 +4103,21 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, return r; } -void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p) -// Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys. +void toku_ft_keysrange(FT_HANDLE ft_handle, + DBT *key_left, + DBT *key_right, + uint64_t *less_p, + uint64_t *equal_left_p, + uint64_t *middle_p, + uint64_t *equal_right_p, + uint64_t *greater_p, + bool *middle_3_exact_p) +// Effect: Return an estimate of the number of keys to the left, the number +// equal (to left key), number between keys, number equal to right key, and the +// number to the right of both keys. // The values are an estimate. -// If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact. +// If you perform a keyrange on two keys that are in the same basement, +// equal_less, middle, and equal_right will be exact. // 4184: What to do with a NULL key? // key_left==NULL is treated as -infinity // key_right==NULL is treated as +infinity @@ -4049,10 +4125,21 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6 // key_right can be non-null only if key_left is non-null; { if (!key_left && key_right) { - // Simplify internals by only supporting key_right != null when key_left != null - // If key_right != null and key_left == null, then swap them and fix up numbers. - uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; - toku_ft_keysrange(ft_handle, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p); + // Simplify internals by only supporting key_right != null when key_left + // != null + // If key_right != null and key_left == null, then swap them and fix up + // numbers. + uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, + greater = 0; + toku_ft_keysrange(ft_handle, + key_right, + nullptr, + &less, + &equal_left, + &middle, + &equal_right, + &greater, + middle_3_exact_p); *less_p = 0; *equal_left_p = 0; *middle_p = less; @@ -4065,98 +4152,132 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6 paranoid_invariant(!(!key_left && key_right)); ftnode_fetch_extra min_bfe; ftnode_fetch_extra match_bfe; - min_bfe.create_for_min_read(ft_handle->ft); // read pivot keys but not message buffers - match_bfe.create_for_keymatch(ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it. -try_again: + min_bfe.create_for_min_read( + ft_handle->ft); // read pivot keys but not message buffers + match_bfe.create_for_keymatch( + ft_handle->ft, + key_left, + key_right, + false, + false); // read basement node only if both keys in it. +try_again : { + uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; + bool single_basement_node = false; + FTNODE node = NULL; { - uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; - bool single_basement_node = false; - FTNODE node = NULL; - { - uint32_t fullhash; - CACHEKEY root_key; - toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); - toku_pin_ftnode( - ft_handle->ft, - root_key, - fullhash, - &match_bfe, - PL_READ, // may_modify_node, cannot change root during keyrange - &node, - true - ); - } + uint32_t fullhash; + CACHEKEY root_key; + toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); + toku_pin_ftnode( + ft_handle->ft, + root_key, + fullhash, + &match_bfe, + PL_READ, // may_modify_node, cannot change root during keyrange + &node, + true); + } - struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false}; - struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL}; + struct unlock_ftnode_extra unlock_extra = {ft_handle, node, false}; + struct unlockers unlockers = { + true, unlock_ftnode_fun, (void *)&unlock_extra, (UNLOCKERS)NULL}; - { - int r; - int64_t numrows = ft_handle->ft->in_memory_stats.numrows; - if (numrows < 0) - numrows = 0; // prevent appearance of a negative number - r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true, - &less, &equal_left, &middle, &equal_right, &greater, - &single_basement_node, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds()); + { + int r; + int64_t numrows = ft_handle->ft->in_memory_logical_rows; + if (numrows < 0) + numrows = 0; // prevent appearance of a negative number + r = toku_ft_keysrange_internal(ft_handle, + node, + key_left, + key_right, + true, + &less, + &equal_left, + &middle, + &equal_right, + &greater, + &single_basement_node, + numrows, + &min_bfe, + &match_bfe, + &unlockers, + (ANCESTORS)NULL, + pivot_bounds::infinite_bounds()); + assert(r == 0 || r == TOKUDB_TRY_AGAIN); + if (r == TOKUDB_TRY_AGAIN) { + assert(!unlockers.locked); + goto try_again; + } + // May need to do a second query. + if (!single_basement_node && key_right != nullptr) { + // "greater" is stored in "middle" + invariant_zero(equal_right); + invariant_zero(greater); + uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, + greater2 = 0; + bool ignore; + r = toku_ft_keysrange_internal(ft_handle, + node, + key_right, + nullptr, + false, + &less2, + &equal_left2, + &middle2, + &equal_right2, + &greater2, + &ignore, + numrows, + &min_bfe, + &match_bfe, + &unlockers, + (ANCESTORS) nullptr, + pivot_bounds::infinite_bounds()); assert(r == 0 || r == TOKUDB_TRY_AGAIN); if (r == TOKUDB_TRY_AGAIN) { assert(!unlockers.locked); goto try_again; } - // May need to do a second query. - if (!single_basement_node && key_right != nullptr) { - // "greater" is stored in "middle" - invariant_zero(equal_right); - invariant_zero(greater); - uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0; - bool ignore; - r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false, - &less2, &equal_left2, &middle2, &equal_right2, &greater2, - &ignore, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, pivot_bounds::infinite_bounds()); - assert(r == 0 || r == TOKUDB_TRY_AGAIN); - if (r == TOKUDB_TRY_AGAIN) { - assert(!unlockers.locked); - goto try_again; - } - invariant_zero(equal_right2); - invariant_zero(greater2); - // Update numbers. - // less is already correct. - // equal_left is already correct. - - // "middle" currently holds everything greater than left_key in first query - // 'middle2' currently holds everything greater than right_key in second query - // 'equal_left2' is how many match right_key - - // Prevent underflow. - if (middle >= equal_left2 + middle2) { - middle -= equal_left2 + middle2; - } else { - middle = 0; - } - equal_right = equal_left2; - greater = middle2; + invariant_zero(equal_right2); + invariant_zero(greater2); + // Update numbers. + // less is already correct. + // equal_left is already correct. + + // "middle" currently holds everything greater than left_key in + // first query + // 'middle2' currently holds everything greater than right_key in + // second query + // 'equal_left2' is how many match right_key + + // Prevent underflow. + if (middle >= equal_left2 + middle2) { + middle -= equal_left2 + middle2; + } else { + middle = 0; } + equal_right = equal_left2; + greater = middle2; } - assert(unlockers.locked); - toku_unpin_ftnode_read_only(ft_handle->ft, node); - if (!key_right) { - paranoid_invariant_zero(equal_right); - paranoid_invariant_zero(greater); - } - if (!key_left) { - paranoid_invariant_zero(less); - paranoid_invariant_zero(equal_left); - } - *less_p = less; - *equal_left_p = equal_left; - *middle_p = middle; - *equal_right_p = equal_right; - *greater_p = greater; - *middle_3_exact_p = single_basement_node; } + assert(unlockers.locked); + toku_unpin_ftnode_read_only(ft_handle->ft, node); + if (!key_right) { + paranoid_invariant_zero(equal_right); + paranoid_invariant_zero(greater); + } + if (!key_left) { + paranoid_invariant_zero(less); + paranoid_invariant_zero(equal_left); + } + *less_p = less; + *equal_left_p = equal_left; + *middle_p = middle; + *equal_right_p = equal_right; + *greater_p = greater; + *middle_3_exact_p = single_basement_node; +} } struct get_key_after_bytes_iterate_extra { diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc index 45385ef9120..6eb138f15d7 100644 --- a/storage/tokudb/PerconaFT/src/ydb.cc +++ b/storage/tokudb/PerconaFT/src/ydb.cc @@ -71,6 +71,8 @@ const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or it #include "util/status.h" #include "util/context.h" +#include <functional> + // Include ydb_lib.cc here so that its constructor/destructor gets put into // ydb.o, to make sure they don't get erased at link time (when linking to // a static libtokufractaltree.a that was compiled with gcc). See #5094. @@ -1315,6 +1317,159 @@ static const char *env_get_data_dir(DB_ENV *env) { return env->i->real_data_dir; } +static int env_dirtool_attach(DB_ENV *env, + DB_TXN *txn, + const char *dname, + const char *iname) { + int r; + DBT dname_dbt; + DBT iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1); + toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1); + + r = toku_db_put(env->i->directory, + txn, + &dname_dbt, + &iname_dbt, + 0, + true); + return r; +} + +static int env_dirtool_detach(DB_ENV *env, + DB_TXN *txn, + const char *dname) { + int r; + DBT dname_dbt; + DBT old_iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + + toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1); + toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC); + + r = toku_db_get(env->i->directory, + txn, + &dname_dbt, + &old_iname_dbt, + DB_SERIALIZABLE); // allocates memory for iname + if (r == DB_NOTFOUND) + return EEXIST; + toku_free(old_iname_dbt.data); + + r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true); + + return r; +} + +static int env_dirtool_move(DB_ENV *env, + DB_TXN *txn, + const char *old_dname, + const char *new_dname) { + int r; + DBT old_dname_dbt; + DBT new_dname_dbt; + DBT iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + + toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1); + toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1); + toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC); + + r = toku_db_get(env->i->directory, + txn, + &old_dname_dbt, + &iname_dbt, + DB_SERIALIZABLE); // allocates memory for iname + if (r == DB_NOTFOUND) + return EEXIST; + + r = toku_db_del( + env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true); + if (r != 0) + goto exit; + + r = toku_db_put( + env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true); + +exit: + toku_free(iname_dbt.data); + return r; +} + +static int locked_env_op(DB_ENV *env, + DB_TXN *txn, + std::function<int(DB_TXN *)> f) { + int ret, r; + HANDLE_READ_ONLY_TXN(txn); + HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); + + DB_TXN *child_txn = NULL; + int using_txns = env->i->open_flags & DB_INIT_TXN; + if (using_txns) { + ret = toku_txn_begin(env, txn, &child_txn, 0); + lazy_assert_zero(ret); + } + + // cannot begin a checkpoint + toku_multi_operation_client_lock(); + r = f(child_txn); + toku_multi_operation_client_unlock(); + + if (using_txns) { + if (r == 0) { + ret = locked_txn_commit(child_txn, 0); + lazy_assert_zero(ret); + } else { + ret = locked_txn_abort(child_txn); + lazy_assert_zero(ret); + } + } + return r; + +} + +static int locked_env_dirtool_attach(DB_ENV *env, + DB_TXN *txn, + const char *dname, + const char *iname) { + auto f = std::bind( + env_dirtool_attach, env, std::placeholders::_1, dname, iname); + return locked_env_op(env, txn, f); +} + +static int locked_env_dirtool_detach(DB_ENV *env, + DB_TXN *txn, + const char *dname) { + auto f = std::bind( + env_dirtool_detach, env, std::placeholders::_1, dname); + return locked_env_op(env, txn, f); +} + +static int locked_env_dirtool_move(DB_ENV *env, + DB_TXN *txn, + const char *old_dname, + const char *new_dname) { + auto f = std::bind( + env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname); + return locked_env_op(env, txn, f); +} + static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags); static int @@ -2657,6 +2812,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { #define SENV(name) result->name = locked_env_ ## name SENV(dbremove); SENV(dbrename); + SENV(dirtool_attach); + SENV(dirtool_detach); + SENV(dirtool_move); //SENV(set_noticecall); #undef SENV #define USENV(name) result->name = env_ ## name @@ -2988,8 +3146,10 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u if (txn && r) { if (r == EMFILE || r == ENFILE) r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n"); - else + else if (r != ENOENT) r = toku_ydb_do_error(env, r, "toku dbremove failed\n"); + else + r = 0; goto exit; } if (txn) { diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 9db14dbb443..1a7c42e970f 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -5248,17 +5248,17 @@ int ha_tokudb::fill_range_query_buf( DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range"); goto cleanup; } else if (result == ICP_NO_MATCH) { - // if we are performing a DESC ICP scan and have no end_range - // to compare to stop using ICP filtering as there isn't much more - // that we can do without going through contortions with remembering - // and comparing key parts. + // Optimizer change for MyRocks also benefits us here in TokuDB as + // opt_range.cc QUICK_SELECT::get_next now sets end_range during + // descending scan. We should not ever hit this condition, but + // leaving this code in to prevent any possibility of a descending + // scan to the beginning of an index and catch any possibility + // in debug builds with an assertion + assert_debug(!(!end_range && direction < 0)); if (!end_range && direction < 0) { - cancel_pushed_idx_cond(); - DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate"); } - error = TOKUDB_CURSOR_CONTINUE; goto cleanup; } @@ -6108,7 +6108,6 @@ int ha_tokudb::info(uint flag) { stats.records = share->row_count() + share->rows_from_locked_table; stats.deleted = 0; if (!(flag & HA_STATUS_NO_LOCK)) { - uint64_t num_rows = 0; error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd()); if (error) { @@ -6118,20 +6117,13 @@ int ha_tokudb::info(uint flag) { // we should always have a primary key assert_always(share->file != NULL); - error = estimate_num_rows(share->file, &num_rows, txn); - if (error == 0) { - share->set_row_count(num_rows, false); - stats.records = num_rows; - } else { - goto cleanup; - } - DB_BTREE_STAT64 dict_stats; error = share->file->stat64(share->file, txn, &dict_stats); if (error) { goto cleanup; } - + share->set_row_count(dict_stats.bt_ndata, false); + stats.records = dict_stats.bt_ndata; stats.create_time = dict_stats.bt_create_time_sec; stats.update_time = dict_stats.bt_modify_time_sec; stats.check_time = dict_stats.bt_verify_time_sec; @@ -7835,7 +7827,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* // As a result, equal may be 0 and greater may actually be equal+greater // So, we call key_range64 on the key, and the key that is after it. if (!start_key && !end_key) { - error = estimate_num_rows(kfile, &rows, transaction); + error = estimate_num_rows(share->file, &rows, transaction); if (error) { ret_val = HA_TOKUDB_RANGE_COUNT; goto cleanup; diff --git a/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result new file mode 100644 index 00000000000..1d86478d833 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result @@ -0,0 +1,70 @@ +SET GLOBAL tokudb_dir_per_db=ON; +CREATE PROCEDURE create_table() +BEGIN +CREATE TABLE test.t1 ( +a INT +) ENGINE = TokuDB +PARTITION BY RANGE (a) +(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB, +PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB, +PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB, +PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB +); +END| +### Create partitioned table +CALL create_table(); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_main_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Stop server +### Remove 'main' file of one of the partitions +### Start server +### Make sure 'main' partition file is deleted +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Make sure the table still exists +SHOW TABLES; +Tables_in_test +t1 +### Drop table +DROP TABLE t1; +### Make sure the table is dropped +SHOW TABLES; +Tables_in_test +### Check what files still exist after DROP TABLE +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +### Remove the rest of the files +### Make sure there are no tokudb files +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +### Create the same table once more +CALL create_table(); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_main_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Restore state +DROP TABLE t1; +DROP PROCEDURE create_table; +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result new file mode 100644 index 00000000000..dd3b693db49 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result @@ -0,0 +1,58 @@ +SET GLOBAL tokudb_dir_per_db = ON; +SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1"; +SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_1 test_iname_1 +test_dname_2 test_iname_2 +SET tokudb_dir_cmd = "detach test_dname_1"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_2 test_iname_2 +SET tokudb_dir_cmd = "move test_dname_2 test_dname_3"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_3 test_iname_2 +SET tokudb_dir_cmd = "detach test_dname_3"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +CREATE TABLE t1(a int) ENGINE=tokudb; +INSERT INTO t1 (a) VALUES (10); +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +./test/t1-main ./test/t1_main_id.tokudb +./test/t1-status ./test/t1_status_id.tokudb +SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +./test/t1-main test/t1-main-renamed.tokudb +./test/t1-status ./test/t1_status_id.tokudb +### rename t1_main_id.tokudb to t1-main-renamed.tokudb +SELECT * FROM t1; +a +10 +### Test for errors notification +SET tokudb_dir_cmd = "detach foo"; +ERROR 42000: Variable 'tokudb_dir_cmd' can't be set to the value of 'detach foo' +SELECT @@tokudb_dir_cmd_last_error; +@@tokudb_dir_cmd_last_error +17 +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +detach command error +SET @@tokudb_dir_cmd_last_error_string = "blablabla"; +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +blablabla +SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +statement_blablabla +DROP TABLE t1; +SET GLOBAL tokudb_dir_per_db = default; diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt deleted file mode 100644 index 3cc9ea3009e..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt +++ /dev/null @@ -1 +0,0 @@ ---tokudb-background-job-status diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test index 933814442e0..709fc463696 100644 --- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test +++ b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # This is a comprehensive test for the background job manager and # the information_schema.tokudb_background_job_status table # diff --git a/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test new file mode 100644 index 00000000000..adcf4ef55f6 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test @@ -0,0 +1,73 @@ +--source include/have_partition.inc +# See https://bugs.launchpad.net/percona-server/+bug/1657908 + +source include/have_tokudb.inc; + +SET GLOBAL tokudb_dir_per_db=ON; + +--let $DB= test +--let $DATADIR= `SELECT @@datadir` + +--delimiter | +CREATE PROCEDURE create_table() +BEGIN +CREATE TABLE test.t1 ( + a INT +) ENGINE = TokuDB +PARTITION BY RANGE (a) +(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB, + PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB, + PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB, + PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB +); +END| +--delimiter ; + +--echo ### Create partitioned table +CALL create_table(); +--source dir_per_db_show_table_files.inc + +--echo ### Stop server +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--shutdown_server +--source include/wait_until_disconnected.inc + +--echo ### Remove 'main' file of one of the partitions +--remove_files_wildcard $DATADIR/$DB t1_P_p_to_del_main_*.tokudb + +--echo ### Start server +--enable_reconnect +--exec echo "restart: --loose-tokudb-dir-per-db=ON" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc + +--echo ### Make sure 'main' partition file is deleted +--source dir_per_db_show_table_files.inc + +--echo ### Make sure the table still exists +SHOW TABLES; + +--echo ### Drop table +# error 1051 was here before the fix +DROP TABLE t1; + +--echo ### Make sure the table is dropped +SHOW TABLES; + +--echo ### Check what files still exist after DROP TABLE +--source dir_per_db_show_table_files.inc + +--echo ### Remove the rest of the files +--remove_files_wildcard $DATADIR/$DB *.tokudb + +--echo ### Make sure there are no tokudb files +--source dir_per_db_show_table_files.inc + +--echo ### Create the same table once more +# engine error 17 (EEXIST) was here before the fix +CALL create_table(); +--source dir_per_db_show_table_files.inc + +--echo ### Restore state +DROP TABLE t1; +DROP PROCEDURE create_table; +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test index ec74a4a28bc..9675449372b 100644 --- a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test +++ b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # Test the auto analyze on lots of tables -- source include/have_tokudb.inc diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test new file mode 100644 index 00000000000..b9d8c80de65 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test @@ -0,0 +1,52 @@ +skip TokuDB dir CMD disabled in MariaDB; +source include/have_tokudb.inc; + +--let $MYSQL_DATADIR=`select @@datadir` + +SET GLOBAL tokudb_dir_per_db = ON; + +SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1"; +SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "detach test_dname_1"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "move test_dname_2 test_dname_3"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "detach test_dname_3"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +CREATE TABLE t1(a int) ENGINE=tokudb; +INSERT INTO t1 (a) VALUES (10); +--source include/table_files_replace_pattern.inc +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb"; +--source include/table_files_replace_pattern.inc +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +--echo ### rename t1_main_id.tokudb to t1-main-renamed.tokudb +--exec mv $MYSQL_DATADIR/test/t1_main_*.tokudb $MYSQL_DATADIR/test/t1-main-renamed.tokudb + +SELECT * FROM t1; + +--echo ### Test for errors notification +--error 1231 +SET tokudb_dir_cmd = "detach foo"; +SELECT @@tokudb_dir_cmd_last_error; +SELECT @@tokudb_dir_cmd_last_error_string; +SET @@tokudb_dir_cmd_last_error_string = "blablabla"; +SELECT @@tokudb_dir_cmd_last_error_string; +SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR + SELECT @@tokudb_dir_cmd_last_error_string; + +DROP TABLE t1; +SET GLOBAL tokudb_dir_per_db = default; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test index 42fb548814f..8fe5e66a9b3 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # verify that tokudb_locks and tokudb_lock_waits contents for 2 conflicting transactions with a lock timeout source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test index 8f205ad7f45..59d04ead386 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # verify that information_schema.tokudb_locks gets populated with locks for 2 clients source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test index 517280391c4..b4ab64be962 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test @@ -1,3 +1,4 @@ +skip Tokudb trx not in I_S in MariaDB; # verify that information_schema.tokudb_trx gets populated with transactions source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result new file mode 100644 index 00000000000..992a828e287 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result @@ -0,0 +1,26 @@ +### +# Test for binlog position +##### +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +Backup +include/filter_file.inc +### tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name: +### +# Test for gtid set +##### +include/rpl_set_gtid_mode.inc +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +Backup +include/filter_file.inc +### tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: ####, channel name: +include/rpl_set_gtid_mode.inc +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result new file mode 100644 index 00000000000..072dfff448b --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result @@ -0,0 +1,36 @@ +### Create backup dir +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +### Check for settings +SELECT @@gtid_mode; +@@gtid_mode +OFF +### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +### Master backup +include/filter_file.inc +### tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: OFF, GTID of last change: +### Delete backup dir +### Create backup dir +### GTID-mode on +include/rpl_set_gtid_mode.inc +### Check for settings +SELECT @@gtid_mode; +@@gtid_mode +ON +### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +### Master backup +include/filter_file.inc +### tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: ON, GTID of last change: ##### +### Delete backup dir +### GTID-mode off +include/rpl_set_gtid_mode.inc +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result new file mode 100644 index 00000000000..a0af40d80cc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result @@ -0,0 +1 @@ +Backup diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result new file mode 100644 index 00000000000..94e113fc87d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result @@ -0,0 +1,5 @@ +SELECT @@innodb_use_native_aio; +@@innodb_use_native_aio +1 +SET SESSION tokudb_backup_dir='MYSQL_TMP_DIR/tokudb_backup'; +ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of 'MYSQL_TMP_DIR/tokudb_backup' diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result new file mode 100644 index 00000000000..13b5915354f --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result @@ -0,0 +1,77 @@ +### +# Master-slave test +#### +include/rpl_init.inc [topology=1->2] +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +### Create temp table on master +CREATE TEMPORARY TABLE t1 (a INT); +include/sync_slave_sql_with_master.inc +### Setup debug_sync points and prepare for slave backup +SET SESSION debug="+d,debug_sync_abort_on_timeout"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue'; +### Turn-on safe-slave option +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; +### Start slave backup +SET SESSION debug="+d,debug_sync_abort_on_timeout"; +### Wait for safe slave function to start +SET DEBUG_SYNC = "now WAIT_FOR sse"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe slave loop start +SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop point just after slave sql thread start 1 +SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop end +SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop point just after slave sql thread start 2 +SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss"; +### Drop temp table on master +DROP TABLE t1; +### and syncronize slave +include/sync_slave_sql_with_master.inc +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 0 +### Continue backup +SET DEBUG_SYNC = "now SIGNAL ttlss_continue"; +## Reset debug_sync points +SET DEBUG_SYNC = "RESET"; +### Wait for backup finish +include/filter_file.inc +### Slave tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name: +### Delete slave backup dir +### Turn-off safe-slave option for slave +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; +### Turn-on safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; +### Backup master +### Turn-off safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; +include/filter_file.inc +### Master tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: OFF, GTID of last change: +### Delete master backup dir +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result new file mode 100644 index 00000000000..50508f073ab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result @@ -0,0 +1,59 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +### Create some data on master +CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB; +INSERT INTO t1 SET a=100, b=100; +INSERT INTO t1 SET a=200, b=100; +INSERT INTO t1 SET a=300, b=100; +INSERT INTO t1 SET a=400, b=100; +INSERT INTO t1 SET a=500, b=100; +UPDATE t1 SET b = 200 WHERE a = 200; +DELETE FROM t1 WHERE a = 100; +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Check for slave options +SELECT @@tokudb_commit_sync; +@@tokudb_commit_sync +0 +SELECT @@tokudb_fsync_log_period; +@@tokudb_fsync_log_period +1000000 +### Check data on slave after sync +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Do backup on slave +### Check for errors +SELECT @@session.tokudb_backup_last_error; +@@session.tokudb_backup_last_error +0 +SELECT @@session.tokudb_backup_last_error_string; +@@session.tokudb_backup_last_error_string +NULL +### Stop slave server +include/rpl_stop_server.inc [server_number=2] +### Restore backup +### Start slave server and slave threads +include/rpl_start_server.inc [server_number=2] +include/start_slave.inc +### Sync slave with master +### Check data on slave +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Cleanup +DROP TABLE t1; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test new file mode 100644 index 00000000000..8e9f6df4b1d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test @@ -0,0 +1,94 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let $SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE +--let DDIR=$BACKUP_DIR_SLAVE + +# Settings for include/filter_file.inc + +--delimiter | +let $script= + s{host: [^,]+,}{host: #.#.#.#,}; + s{user: [^,]+,}{user: ####,}; + s{port: [^,]+,}{port: ####,}; + s{master log file: [^,]+,}{master log file: ####,}; + s{relay log file: [^,]+,}{relay log file: ####,}; + s{exec master log pos: [^,]+,}{exec master log pos: ####,}; + s{executed gtid set: [^,]+, }{executed gtid set: ####, }; + s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, }; +| +--delimiter ; +--let $input_file = $SLAVE_INFO_FILE_PATH +--let $skip_column_names= 1 + +--echo ### +--echo # Test for binlog position +--echo ##### + +--mkdir $BACKUP_DIR_SLAVE + +--source include/master-slave.inc + +--connection master +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--sync_slave_with_master + +--connection slave +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--source include/filter_file.inc +--echo ### $SLAVE_INFO_FILE content: +--cat_file $SLAVE_INFO_FILE_PATH + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### +--echo # Test for gtid set +--echo ##### + +--mkdir $BACKUP_DIR_SLAVE + +--let $rpl_server_numbers= 1,2 +--let $rpl_set_enforce_gtid_consistency= 1 +--source include/rpl_set_gtid_mode.inc + +--connection master +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--sync_slave_with_master + +--connection slave +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--source include/filter_file.inc +--echo ### $SLAVE_INFO_FILE content: +--cat_file $SLAVE_INFO_FILE_PATH + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--let $rpl_gtid_mode= OFF +--let $rpl_set_enforce_gtid_consistency= 0 +--let $rpl_server_numbers= 1,2 +--let $rpl_skip_sync= 1 +--source include/rpl_set_gtid_mode.inc +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test new file mode 100644 index 00000000000..c301d55f8fa --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test @@ -0,0 +1,87 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + +--let $MASTER_STATE_FILE=tokubackup_binlog_info +--let $BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master +--let $MASTER_STATE_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_STATE_FILE +--let DDIR=$BACKUP_DIR_MASTER + +# Settings for include/filter_file.inc +--delimiter | +let $script= + s{filename: [^,]+,}{filename: ####,}; + s{position: [^,]+,}{position: ####,}; + s{GTID of last change: [^ ]+}{GTID of last change: #####}; +| +--delimiter ; +--let $input_file = $MASTER_STATE_FILE_PATH +--let $skip_column_names= 1 + +--echo ### Create backup dir +--mkdir $BACKUP_DIR_MASTER + +--source include/master-slave.inc + +--connection master + +--echo ### Check for settings +SELECT @@gtid_mode; + +--echo ### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--echo ### Master backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--source include/filter_file.inc +--echo ### $MASTER_STATE_FILE content: +--cat_file $MASTER_STATE_FILE_PATH + +--echo ### Delete backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### Create backup dir +--mkdir $BACKUP_DIR_MASTER + +--echo ### GTID-mode on +--let $rpl_server_numbers= 1,2 +--let $rpl_set_enforce_gtid_consistency= 1 +--source include/rpl_set_gtid_mode.inc + +--echo ### Check for settings +SELECT @@gtid_mode; + +--echo ### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--echo ### Master backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--source include/filter_file.inc +--echo ### $MASTER_STATE_FILE content: +--cat_file $MASTER_STATE_FILE_PATH + +--echo ### Delete backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### GTID-mode off +--let $rpl_gtid_mode= OFF +--let $rpl_set_enforce_gtid_consistency= 0 +--let $rpl_server_numbers= 1,2 +--source include/rpl_set_gtid_mode.inc + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test new file mode 100644 index 00000000000..53592903a27 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test @@ -0,0 +1,23 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let DDIR=$BACKUP_DIR_SLAVE + +--mkdir $BACKUP_DIR_SLAVE + +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--list_files $BACKUP_DIR_SLAVE $SLAVE_INFO_FILE + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt new file mode 100644 index 00000000000..5f5dbb9c7c6 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt @@ -0,0 +1 @@ +--innodb_use_native_aio=on diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test new file mode 100644 index 00000000000..3e09b465c02 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test @@ -0,0 +1,19 @@ +# Check if tokudb hot backup is prevented if innodb_use_native_aio enabled +--source include/have_tokudb_backup.inc +--source include/have_innodb.inc + +SELECT @@innodb_use_native_aio; + +--let BACKUP_DIR= $MYSQL_TMP_DIR/tokudb_backup + +--mkdir $BACKUP_DIR + +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +--error ER_WRONG_VALUE_FOR_VAR +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR' + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR"}; +rmtree([ "$DDIR" ]); +EOF diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt new file mode 100644 index 00000000000..af3a211967b --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt @@ -0,0 +1 @@ +--binlog-format=statement diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt new file mode 100644 index 00000000000..49405b1aec3 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt @@ -0,0 +1 @@ +--master-info-repository=TABLE --relay-log-info-repository=TABLE diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf new file mode 100644 index 00000000000..321be4ab2fc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf @@ -0,0 +1,14 @@ +!include ../../rpl/my.cnf + +[mysqld.1] + +[mysqld.2] + +[mysqld.3] +master-info-repository=TABLE +relay-log-info-repository=TABLE + +[ENV] +SERVER_MYPORT_3= @mysqld.3.port +SERVER_MYSOCK_3= @mysqld.3.socket + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc new file mode 100644 index 00000000000..e0732ee63fc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc @@ -0,0 +1,112 @@ +--connection server_1 +--echo ### Create temp table on master +CREATE TEMPORARY TABLE t1 (a INT); + +--let $sync_slave_connection= server_2 +--source include/sync_slave_sql_with_master.inc + +--echo ### Setup debug_sync points and prepare for slave backup +--connection slave_2 +SET SESSION debug="+d,debug_sync_abort_on_timeout"; + +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue'; + +--mkdir $BACKUP_DIR_SLAVE + +--echo ### Turn-on safe-slave option +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; + +--echo ### Start slave backup +--disable_query_log +--send_eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--connection server_2 +SET SESSION debug="+d,debug_sync_abort_on_timeout"; + +--echo ### Wait for safe slave function to start +SET DEBUG_SYNC = "now WAIT_FOR sse"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe slave loop start +SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe thread loop point just after slave sql thread start 1 +SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe thread loop end +SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +--echo ### Wait for safe thread loop point just after slave sql thread start 2 +SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss"; + +--connection server_1 +--echo ### Drop temp table on master +DROP TABLE t1; + +--echo ### and syncronize slave +--let $sync_slave_connection= server_2 +--source include/sync_slave_sql_with_master.inc + +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +--echo ### Continue backup +SET DEBUG_SYNC = "now SIGNAL ttlss_continue"; + +--echo ## Reset debug_sync points +SET DEBUG_SYNC = "RESET"; + +--connection slave_2 +--echo ### Wait for backup finish +--reap + +--let $input_file = $S_SLAVE_INFO_FILE_PATH +--source include/filter_file.inc +--echo ### Slave $SLAVE_INFO_FILE content: +--cat_file $S_SLAVE_INFO_FILE_PATH + +--echo ### Delete slave backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR_SLAVE"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### Turn-off safe-slave option for slave +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; + +--connection server_1 + +--echo ### Turn-on safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; + +--echo ### Backup master +--mkdir $BACKUP_DIR_MASTER +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--echo ### Turn-off safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; + +--let $input_file = $M_MASTER_INFO_FILE_PATH +--source include/filter_file.inc +--echo ### Master $MASTER_INFO_FILE content: +--cat_file $M_MASTER_INFO_FILE_PATH + +--echo ### Delete master backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR_MASTER"}; +rmtree([ "$DDIR" ]); +EOF + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test new file mode 100644 index 00000000000..15ba1d8bb66 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test @@ -0,0 +1,49 @@ +--source include/have_tokudb_backup.inc +--source include/have_binlog_format_statement.inc +--source include/have_debug_sync.inc + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $MASTER_INFO_FILE=tokubackup_binlog_info + +--let BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let $S_SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE + +--let BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master +--let $M_MASTER_INFO_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_INFO_FILE + +# Settings for include/filter_file.inc +--delimiter | +let $script= + s{filename: [^,]+,}{filename: ####,}; + s{position: [^,]+,}{position: ####,}; + s{GTID of last change: [^ ]+}{GTID of last change: #####}; + s{host: [^,]+,}{host: #.#.#.#,}; + s{user: [^,]+,}{user: ####,}; + s{port: [^,]+,}{port: ####,}; + s{master log file: [^,]+,}{master log file: ####,}; + s{relay log file: [^,]+,}{relay log file: ####,}; + s{exec master log pos: [^,]+,}{exec master log pos: ####,}; + s{executed gtid set: [^,]+, }{executed gtid set: ####, }; + s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, }; +| +--delimiter ; +--let $skip_column_names= 1 + +--disable_query_log +CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +CALL mtr.add_suppression("Sending passwords in plain text without SSL/TLS is extremely insecure"); +--enable_query_log + +--echo ### +--echo # Master-slave test +--echo #### + +--let $rpl_server_count=3 +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--connect (slave_2,localhost,root,,test,$SLAVE_MYPORT,$SLAVE_MYSOCK) + +--source rpl_safe_slave.inc + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt new file mode 100644 index 00000000000..263e1aef0ab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt @@ -0,0 +1 @@ +--loose-tokudb-commit-sync=OFF --loose-tokudb-fsync-log-period=1000000 diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test new file mode 100644 index 00000000000..5bd53cacdab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test @@ -0,0 +1,72 @@ +# if --tokudb-commit-sync is off on slave tokudb log must be flushed on backup +# to provide the ability to restore replication after backup restoring + +--source include/have_tokudb_backup.inc + +--let $BACKUP_DIR_SLAVE= $MYSQL_TMP_DIR/tokudb_backup_slave +--let $BACKUP_MYSQL_DATA_DIR= $BACKUP_DIR_SLAVE/mysql_data_dir + +--mkdir $BACKUP_DIR_SLAVE + +--source include/master-slave.inc + +--echo ### Create some data on master +--connection master +CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB; +INSERT INTO t1 SET a=100, b=100; +INSERT INTO t1 SET a=200, b=100; +INSERT INTO t1 SET a=300, b=100; +INSERT INTO t1 SET a=400, b=100; +INSERT INTO t1 SET a=500, b=100; +UPDATE t1 SET b = 200 WHERE a = 200; +DELETE FROM t1 WHERE a = 100; + +SELECT * FROM t1; + +--sync_slave_with_master +--let $SLAVE_DATA_DIR=`SELECT @@DATADIR` + +--echo ### Check for slave options +SELECT @@tokudb_commit_sync; +SELECT @@tokudb_fsync_log_period; + +--echo ### Check data on slave after sync +SELECT * FROM t1; + + +--echo ### Do backup on slave +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--echo ### Check for errors +SELECT @@session.tokudb_backup_last_error; +SELECT @@session.tokudb_backup_last_error_string; + +--echo ### Stop slave server +--connection slave +--let $rpl_server_number= 2 +--let $rpl_force_stop= 1 +--source include/rpl_stop_server.inc + +--echo ### Restore backup +--exec rm -rf $SLAVE_DATA_DIR; +--exec mv $BACKUP_MYSQL_DATA_DIR $SLAVE_DATA_DIR; + +--echo ### Start slave server and slave threads +--connection slave +--source include/rpl_start_server.inc +--source include/start_slave.inc + +--echo ### Sync slave with master +--connection master +--sync_slave_with_master + +--echo ### Check data on slave +SELECT * FROM t1; + +--echo ### Cleanup +--connection master +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt index 5d4cb245e27..a624d6895cc 100644 --- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M +$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-innodb_use_native_aio=off --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result index e5808f52e69..a7cdbcae1e2 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result @@ -14,16 +14,6 @@ INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4 ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK -set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1'; -SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC; -set DEBUG_SYNC = 'now WAIT_FOR hit1'; -set DEBUG_SYNC = 'now SIGNAL done1'; -c -8 -7 -6 -6 -5 set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2'; SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC; set DEBUG_SYNC = 'now WAIT_FOR hit2'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test index 8e4c3b73c09..fec11bf0553 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test @@ -29,24 +29,6 @@ ANALYZE TABLE t1; # lets flip to another connection connect(conn1, localhost, root); -# set up the DEBUG_SYNC point -set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1'; - -# send the query -send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC; - -# back to default connection -connection default; - -# wait for the ICP reverse scan to invalidate -set DEBUG_SYNC = 'now WAIT_FOR hit1'; - -# lets release and clean up -set DEBUG_SYNC = 'now SIGNAL done1'; - -connection conn1; -reap; - # set up the DEBUG_SYNC point again, but for the out of range set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test index dfd935b17ff..0502b35bc2c 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on DELETE triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test index ffe2face9f2..313b1f96b52 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on INSERT triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test index 063a88cb4ab..ce1eb6bddd1 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on UPDATE triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test index 50434a79a00..d65bf3d95de 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # This test for DB-938 tests a race condition where a scheduled background job # (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that # were set to NULL during a TRUNCATE TABLE operation. diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test index c7774877291..50dc91829d8 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; source include/have_tokudb.inc; set default_storage_engine='tokudb'; disable_warnings; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test index 9d82604e4b1..28cfff14770 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test @@ -132,6 +132,7 @@ CREATE TABLE `t2` ( ); LOAD DATA INFILE 'leak172_t1.data' INTO TABLE `t1` fields terminated by ','; +remove_file $MYSQLD_DATADIR/test/leak172_t1.data; connect(conn1,localhost,root,,); set session debug_dbug="+d,tokudb_end_bulk_insert_sleep"; @@ -145,6 +146,7 @@ UPDATE t1, t2 SET t1.`c5` = 4 WHERE t1.`c6` <= 'o'; connection conn1; reap; +remove_file $MYSQLD_DATADIR/test/leak172_t2.data; connection default; disconnect conn1; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test index 735a88afed8..e7ef3211401 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test @@ -1,3 +1,4 @@ +skip Tokudb Fractal Tree info not in I_S in MariaDB; # test DB-718, a crash caused by broken error handling in tokudb's fractal_tree_info information schema source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/tokudb_dir_cmd.cc b/storage/tokudb/tokudb_dir_cmd.cc new file mode 100644 index 00000000000..5431cbab7aa --- /dev/null +++ b/storage/tokudb/tokudb_dir_cmd.cc @@ -0,0 +1,331 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* -*- mode: C; c-basic-offset: 4 -*- */ +#ident "$Id$" +/*====== +This file is part of TokuDB + + +Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. + + TokuDBis is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2, + as published by the Free Software Foundation. + + TokuDB is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TokuDB. If not, see <http://www.gnu.org/licenses/>. + +======= */ + +#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." + +#include "hatoku_hton.h" +#include "tokudb_dir_cmd.h" +#include "my_dbug.h" +#include "sql_base.h" + +#include <vector> +#include <string> + +namespace tokudb { + +const char tokens_delimiter = ' '; +const char tokens_escape_delimiter_char = '\\'; + +static int MDL_and_TDC(THD *thd, + const char *db, + const char *table, + const dir_cmd_callbacks &cb) { + int error; + LEX_CSTRING db_arg; + LEX_CSTRING table_arg; + + db_arg.str = const_cast<char *>(db); + db_arg.length = strlen(db);; + table_arg.str = const_cast<char *>(table); + table_arg.length = strlen(table); + Table_ident table_ident(thd, &db_arg, &table_arg, true);; + thd->lex->select_lex.add_table_to_list( + thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0); + /* The lock will be released at the end of mysq_execute_command() */ + error = lock_table_names(thd, + thd->lex->select_lex.table_list.first, + NULL, + thd->variables.lock_wait_timeout, + 0); + if (error) { + if (cb.set_error) + cb.set_error(thd, + error, + "Can't lock table '%s.%s'", + db, + table); + return error; + } + tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table, false); + return error; +} + +static bool parse_db_and_table(const char *dname, + std::string /*out*/ &db_name, + std::string /*out*/ &table_name) { + const char *begin; + const char *end; + const char *db_name_begin; + const char *db_name_end; + + begin = strchr(dname, '/'); + if (!begin) + return false; + ++begin; + end = strchr(begin, '/'); + if (!end) + return false; + + db_name_begin = begin; + db_name_end = end; + + begin = end + 1; + + end = strchr(begin, '-'); + if (!end) + return false; + + if (strncmp(end, "-main", strlen("-main")) && + strncmp(end, "-status", strlen("-status")) && + strncmp(end, "-key", strlen("-key"))) + return false; + + db_name.assign(db_name_begin, db_name_end); + table_name.assign(begin, end); + + return true; +} + +static int attach(THD *thd, + const std::string &dname, + const std::string &iname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_attach(db_env, + txn, + dname.c_str(), + iname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static int detach(THD *thd, + const std::string &dname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_detach(db_env, + txn, + dname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static int move(THD *thd, + const std::string &old_dname, + const std::string &new_dname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(old_dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_move(db_env, + txn, + old_dname.c_str(), + new_dname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static void tokenize(const char *cmd_str, + std::vector<std::string> /*out*/ &tokens) { + DBUG_ASSERT(cmd_str); + + bool was_escape = false; + const char *token_begin = cmd_str; + const char *token_end = token_begin; + + while (*token_end) { + if (*token_end == tokens_escape_delimiter_char) { + was_escape = true; + } + else if (*token_end == tokens_delimiter) { + if (was_escape) + was_escape = false; + else { + if (token_begin == token_end) + ++token_begin; + else { + tokens.push_back(std::string(token_begin, token_end)); + token_begin = token_end + 1; + } + } + } + else { + was_escape = false; + } + ++token_end; + } + + if (token_begin != token_end) + tokens.push_back(std::string(token_begin, token_end)); +} + +void process_dir_cmd(THD *thd, + const char *cmd_str, + const dir_cmd_callbacks &cb) { + + DBUG_ASSERT(thd); + DBUG_ASSERT(cmd_str); + + std::vector<std::string> tokens; + tokenize(cmd_str, tokens); + + if (tokens.empty()) + return; + + const std::string &cmd = tokens[0]; + + if (!cmd.compare("attach")) { + if (tokens.size() != 3) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "attach command requires two arguments"); + } + else { + int r = attach(thd, tokens[1], tokens[2], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "Attach command error"); + } + } + else if (!cmd.compare("detach")) { + if (tokens.size() != 2) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "detach command requires one argument"); + } + else { + int r = detach(thd, tokens[1], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "detach command error"); + } + } + else if (!cmd.compare("move")) { + if (tokens.size() != 3) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "move command requires two arguments"); + } + else { + int r = move(thd, tokens[1], tokens[2], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "move command error"); + } + } + else { + if (cb.set_error) + cb.set_error(thd, + ENOENT, + "Unknown command '%s'", + cmd.c_str()); + } + + return; +}; + + +} // namespace tokudb diff --git a/storage/tokudb/tokudb_dir_cmd.h b/storage/tokudb/tokudb_dir_cmd.h new file mode 100644 index 00000000000..b39caadc7c3 --- /dev/null +++ b/storage/tokudb/tokudb_dir_cmd.h @@ -0,0 +1,46 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/*====== +This file is part of TokuDB + + +Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. + + TokuDBis is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2, + as published by the Free Software Foundation. + + TokuDB is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TokuDB. If not, see <http://www.gnu.org/licenses/>. + +======= */ + +#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." + +#ifndef _TOKUDB_DIR_CMD_H +#define _TOKUDB_DIR_CMD_H + +#include <sql_class.h> + +namespace tokudb { + +struct dir_cmd_callbacks { + void (*set_error)(THD *thd, + int error, + const char *error_fmt, + ...); +}; + +void process_dir_cmd(THD *thd, + const char *cmd_str, + const dir_cmd_callbacks &cb); + +}; + +#endif // _TOKUDB_DIR_CMD_H diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index b758929c10e..bbc39dc550a 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -25,6 +25,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." #include "hatoku_hton.h" +#include "sql_acl.h" +#include "tokudb_dir_cmd.h" +#include "sql_parse.h" namespace tokudb { namespace sysvars { @@ -40,6 +43,7 @@ namespace sysvars { #define TOKUDB_VERSION_STR NULL #endif + ulonglong cache_size = 0; uint cachetable_pool_threads = 0; int cardinality_scale_percent = 0; @@ -918,8 +922,6 @@ static MYSQL_THDVAR_BOOL( true); #endif - - //****************************************************************************** // all system variables //****************************************************************************** @@ -949,7 +951,6 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(version), MYSQL_SYSVAR(write_status_frequency), MYSQL_SYSVAR(dir_per_db), - #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL MYSQL_SYSVAR(gdb_path), MYSQL_SYSVAR(gdb_on_fatal), diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc index d84c93f8b3e..85a083aaee0 100644 --- a/storage/xtradb/btr/btr0btr.cc +++ b/storage/xtradb/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -745,7 +745,8 @@ btr_root_block_get( if (index && index->table) { index->table->file_unreadable = true; - ib_push_warning(index->table->thd, DB_DECRYPTION_FAILED, + ib_push_warning( + static_cast<THD*>(NULL), DB_DECRYPTION_FAILED, "Table %s in tablespace %lu is encrypted but encryption service or" " used key_id is not available. " " Can't continue reading table.", @@ -1703,9 +1704,7 @@ btr_create( dict_index_t* index, /*!< in: index */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ulint page_no; buf_block_t* block; - buf_frame_t* frame; page_t* page; page_zip_des_t* page_zip; @@ -1720,6 +1719,10 @@ btr_create( space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); + if (ibuf_hdr_block == NULL) { + return(FIL_NULL); + } + buf_block_dbg_add_level( ibuf_hdr_block, SYNC_IBUF_TREE_NODE_NEW); @@ -1733,7 +1736,17 @@ btr_create( + IBUF_HEADER + IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); + + if (block == NULL) { + return(FIL_NULL); + } + ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); + + buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); + + flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, + mtr); } else { #ifdef UNIV_BLOB_DEBUG if ((type & DICT_CLUSTERED) && !index->blobs) { @@ -1745,34 +1758,19 @@ btr_create( #endif /* UNIV_BLOB_DEBUG */ block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); - } - if (block == NULL) { - - return(FIL_NULL); - } - - page_no = buf_block_get_page_no(block); - frame = buf_block_get_frame(block); - - if (type & DICT_IBUF) { - /* It is an insert buffer tree: initialize the free list */ - buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); - - ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO); + if (block == NULL) { + return(FIL_NULL); + } - flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr); - } else { - /* It is a non-ibuf tree: create a file segment for leaf - pages */ buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW); - if (!fseg_create(space, page_no, + if (!fseg_create(space, buf_block_get_page_no(block), PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { /* Not enough space for new segment, free root segment before return. */ - btr_free_root(space, zip_size, page_no, mtr); - + btr_free_root(space, zip_size, + buf_block_get_page_no(block), mtr); return(FIL_NULL); } @@ -1816,7 +1814,7 @@ btr_create( ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE); - return(page_no); + return(buf_block_get_page_no(block)); } /************************************************************//** diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index 1ea1ec0696b..ffd7ebc7504 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -1329,18 +1329,21 @@ btr_cur_ins_lock_and_undo( index, thr, mtr, inherit); if (err != DB_SUCCESS + || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) || !dict_index_is_clust(index) || dict_index_is_ibuf(index)) { return(err); } - err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP, - thr, index, entry, - NULL, 0, NULL, NULL, - &roll_ptr); - if (err != DB_SUCCESS) { - - return(err); + if (flags & BTR_NO_UNDO_LOG_FLAG) { + roll_ptr = 0; + } else { + err = trx_undo_report_row_operation(thr, index, entry, + NULL, 0, NULL, NULL, + &roll_ptr); + if (err != DB_SUCCESS) { + return(err); + } } /* Now we can fill in the roll ptr field in entry */ @@ -1389,15 +1392,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -1418,6 +1423,7 @@ btr_cur_optimistic_insert( ulint rec_size; dberr_t err; + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; block = btr_cur_get_block(cursor); @@ -1427,7 +1433,10 @@ btr_cur_optimistic_insert( page = buf_block_get_frame(block); index = cursor->index; - ut_ad((thr && thr_get_trx(thr)->fake_changes) + const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) + && thr_get_trx(thr)->fake_changes; + ut_ad(fake_changes || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) @@ -1568,7 +1577,7 @@ fail_err: goto fail_err; } - if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { + if (UNIV_UNLIKELY(fake_changes)) { /* skip CHANGE, LOG */ *big_rec = big_rec_vec; return(err); /* == DB_SUCCESS */ @@ -1686,15 +1695,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index = cursor->index; @@ -1706,13 +1717,17 @@ btr_cur_pessimistic_insert( ulint n_reserved = 0; ut_ad(dtuple_check_typed(entry)); + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; - ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, + const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) + && thr_get_trx(thr)->fake_changes; + ut_ad(fake_changes || mtr_memo_contains(mtr, dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK)); - ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, btr_cur_get_block(cursor), + ut_ad(fake_changes || mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) @@ -1773,7 +1788,7 @@ btr_cur_pessimistic_insert( } } - if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { + if (UNIV_UNLIKELY(fake_changes)) { /* skip CHANGE, LOG */ if (n_reserved > 0) { fil_space_release_free_extents(index->space, @@ -1871,7 +1886,9 @@ btr_cur_upd_lock_and_undo( const rec_t* rec; dberr_t err; - if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG)); + + if (!(flags & BTR_NO_LOCKING_FLAG) && thr_get_trx(thr)->fake_changes) { /* skip LOCK, UNDO */ return(DB_SUCCESS); } @@ -1906,9 +1923,10 @@ btr_cur_upd_lock_and_undo( /* Append the info about the update in the undo log */ - return(trx_undo_report_row_operation( - flags, TRX_UNDO_MODIFY_OP, thr, - index, NULL, update, + return((flags & BTR_NO_UNDO_LOG_FLAG) + ? DB_SUCCESS + : trx_undo_report_row_operation( + thr, index, NULL, update, cmpl_info, rec, offsets, roll_ptr)); } @@ -2659,12 +2677,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ @@ -3239,7 +3257,7 @@ btr_cur_del_mark_set_clust_rec( return(err); } - err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr, + err = trx_undo_report_row_operation(thr, index, NULL, NULL, 0, rec, offsets, &roll_ptr); if (err != DB_SUCCESS) { diff --git a/storage/xtradb/btr/btr0defragment.cc b/storage/xtradb/btr/btr0defragment.cc index 64dc077d582..c2f58a8e1cf 100644 --- a/storage/xtradb/btr/btr0defragment.cc +++ b/storage/xtradb/btr/btr0defragment.cc @@ -154,7 +154,6 @@ btr_defragment_init() (ulonglong) (1000000.0 / srv_defragment_frequency)); mutex_create(btr_defragment_mutex_key, &btr_defragment_mutex, SYNC_ANY_LATCH); - os_thread_create(btr_defragment_thread, NULL, NULL); } /******************************************************************//** @@ -735,14 +734,13 @@ btr_defragment_n_pages( return current_block; } -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ +/** Whether btr_defragment_thread is active */ +bool btr_defragment_thread_active; + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg) /*!< in: work queue */ +DECLARE_THREAD(btr_defragment_thread)(void*) { btr_pcur_t* pcur; btr_cur_t* cursor; @@ -752,6 +750,8 @@ DECLARE_THREAD(btr_defragment_thread)( buf_block_t* last_block; while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { + ut_ad(btr_defragment_thread_active); + /* If defragmentation is disabled, sleep before checking whether it's enabled. */ if (!srv_defragment) { @@ -825,9 +825,9 @@ DECLARE_THREAD(btr_defragment_thread)( btr_defragment_remove_item(item); } } - btr_defragment_shutdown(); + + btr_defragment_thread_active = false; os_thread_exit(NULL); OS_THREAD_DUMMY_RETURN; } - #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc index 68dbcdf1fa7..2f0428747d5 100644 --- a/storage/xtradb/btr/btr0sea.cc +++ b/storage/xtradb/btr/btr0sea.cc @@ -192,7 +192,7 @@ btr_search_sys_create( &btr_search_latch_arr[i], SYNC_SEARCH_SYS); btr_search_sys->hash_tables[i] - = ha_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0); + = ib_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG btr_search_sys->hash_tables[i]->adaptive = TRUE; diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index c57dab79ef7..01bec11d2ed 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2013, 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -65,6 +65,18 @@ Created 11/5/1995 Heikki Tuuri #include "fil0pagecompress.h" #include "ha_prototypes.h" +#ifdef UNIV_LINUX +#include <stdlib.h> +#endif + +#ifdef HAVE_LZO +#include "lzo/lzo1x.h" +#endif + +#ifdef HAVE_SNAPPY +#include "snappy-c.h" +#endif + /** Decrypt a page. @param[in,out] bpage Page control block @param[in,out] space tablespace @@ -74,9 +86,38 @@ bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) MY_ATTRIBUTE((nonnull)); +/********************************************************************//** +Mark a table with the specified space pointed by bpage->space corrupted. +Also remove the bpage from LRU list. +@param[in,out] bpage Block */ +static +void +buf_mark_space_corrupt( + buf_page_t* bpage); + /* prototypes for new functions added to ha_innodb.cc */ trx_t* innobase_get_trx(); +inline void* aligned_malloc(size_t size, size_t align) { + void *result; +#ifdef _MSC_VER + result = _aligned_malloc(size, align); +#else + if(posix_memalign(&result, align, size)) { + result = 0; + } +#endif + return result; +} + +inline void aligned_free(void *ptr) { +#ifdef _MSC_VER + _aligned_free(ptr); +#else + free(ptr); +#endif +} + static inline void _increment_page_get_statistics(buf_block_t* block, trx_t* trx) @@ -108,10 +149,6 @@ _increment_page_get_statistics(buf_block_t* block, trx_t* trx) return; } -#ifdef HAVE_LZO -#include "lzo/lzo1x.h" -#endif - /* IMPLEMENTATION OF THE BUFFER POOL ================================= @@ -1510,8 +1547,6 @@ buf_pool_init_instance( buf_pool->chunks = chunk = (buf_chunk_t*) mem_zalloc(sizeof *chunk); - UT_LIST_INIT(buf_pool->free); - if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) { mem_free(chunk); mem_free(buf_pool); @@ -1533,7 +1568,7 @@ buf_pool_init_instance( ut_a(srv_n_page_hash_locks != 0); ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS); - buf_pool->page_hash = ha_create(2 * buf_pool->curr_size, + buf_pool->page_hash = ib_create(2 * buf_pool->curr_size, srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH, SYNC_BUF_PAGE_HASH); @@ -1642,20 +1677,14 @@ buf_pool_free_instance( if (buf_pool->tmp_arr) { for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]); -#ifdef HAVE_LZO - if (slot && slot->lzo_mem) { - ut_free(slot->lzo_mem); - slot->lzo_mem = NULL; - } -#endif - if (slot && slot->crypt_buf_free) { - ut_free(slot->crypt_buf_free); - slot->crypt_buf_free = NULL; + if (slot && slot->crypt_buf) { + aligned_free(slot->crypt_buf); + slot->crypt_buf = NULL; } - if (slot && slot->comp_buf_free) { - ut_free(slot->comp_buf_free); - slot->comp_buf_free = NULL; + if (slot && slot->comp_buf) { + aligned_free(slot->comp_buf); + slot->comp_buf = NULL; } } } @@ -2521,17 +2550,26 @@ buf_zip_decompress( { const byte* frame = block->page.zip.data; ulint size = page_zip_get_size(&block->page.zip); + /* Space is not found if this function is called during IMPORT */ + fil_space_t* space = fil_space_acquire_for_io(block->page.space); + const unsigned key_version = mach_read_from_4(frame + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL; + const bool encrypted = crypt_data + && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED + && (!crypt_data->is_default_encryption() + || srv_encrypt_tables); ut_ad(buf_block_get_zip_size(block)); ut_a(buf_block_get_space(block) != 0); if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: compressed page checksum mismatch" - " (space %u page %u): stored: %lu, crc32: %lu " - "innodb: %lu, none: %lu\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Compressed page checksum mismatch" + " for %s [%u:%u]: stored: " ULINTPF ", crc32: " ULINTPF + " innodb: " ULINTPF ", none: " ULINTPF ".", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset, mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM), page_zip_calc_checksum(frame, size, @@ -2540,22 +2578,28 @@ buf_zip_decompress( SRV_CHECKSUM_ALGORITHM_INNODB), page_zip_calc_checksum(frame, size, SRV_CHECKSUM_ALGORITHM_NONE)); - return(FALSE); + goto err_exit; } switch (fil_page_get_type(frame)) { - case FIL_PAGE_INDEX: + case FIL_PAGE_INDEX: { + if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { + if (space) { + fil_space_release_for_io(space); + } return(TRUE); } - fprintf(stderr, - "InnoDB: unable to decompress space %u page %u\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Unable to decompress space %s [%u:%u]", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset); - return(FALSE); + goto err_exit; + } case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: @@ -2566,14 +2610,36 @@ buf_zip_decompress( /* Copy to uncompressed storage. */ memcpy(block->frame, frame, buf_block_get_zip_size(block)); + + if (space) { + fil_space_release_for_io(space); + } + return(TRUE); } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: unknown compressed page" - " type %lu\n", - fil_page_get_type(frame)); + ib_logf(IB_LOG_LEVEL_ERROR, + "Unknown compressed page in %s [%u:%u]" + " type %s [" ULINTPF "].", + space ? space->chain.start->name : "N/A", + block->page.space, block->page.offset, + fil_get_page_type_name(fil_page_get_type(frame)), fil_page_get_type(frame)); + +err_exit: + if (encrypted) { + ib_logf(IB_LOG_LEVEL_INFO, + "Row compressed page could be encrypted with key_version %u.", + key_version); + block->page.encrypted = true; + dict_set_encrypted_by_space(block->page.space); + } else { + dict_set_corrupted_by_space(block->page.space); + } + + if (space) { + fil_space_release_for_io(space); + } + return(FALSE); } @@ -3056,9 +3122,9 @@ loop: } ib_logf(IB_LOG_LEVEL_FATAL, "Unable" - " to read tablespace %lu page no" - " %lu into the buffer pool after" - " %lu attempts" + " to read tablespace " ULINTPF " page no " + ULINTPF " into the buffer pool after " + ULINTPF " attempts." " The most probable cause" " of this error may be that the" " table has been corrupted." @@ -3271,12 +3337,21 @@ got_block: /* Decompress the page while not holding any buf_pool or block->mutex. */ - /* Page checksum verification is already done when - the page is read from disk. Hence page checksum - verification is not necessary when decompressing the page. */ { - bool success = buf_zip_decompress(block, FALSE); - ut_a(success); + bool success = buf_zip_decompress(block, TRUE); + + if (!success) { + buf_block_mutex_enter(fix_block); + buf_block_set_io_fix(fix_block, BUF_IO_NONE); + buf_block_mutex_exit(fix_block); + + os_atomic_decrement_ulint(&buf_pool->n_pend_unzip, 1); + rw_lock_x_unlock(&fix_block->lock); + mutex_enter(&buf_pool->LRU_list_mutex); + buf_block_unfix(fix_block); + mutex_exit(&buf_pool->LRU_list_mutex); + return NULL; + } } if (!recv_no_ibuf_operations) { @@ -3374,16 +3449,10 @@ got_block: goto loop; } - fprintf(stderr, - "innodb_change_buffering_debug evict %u %u\n", - (unsigned) space, (unsigned) offset); return(NULL); } if (buf_flush_page_try(buf_pool, fix_block)) { - fprintf(stderr, - "innodb_change_buffering_debug flush %u %u\n", - (unsigned) space, (unsigned) offset); guess = fix_block; goto loop; } @@ -4354,11 +4423,11 @@ buf_page_create( memset(frame + FIL_PAGE_NEXT, 0xff, 4); mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); - /* Reset to zero the file flush lsn field in the page; if the first - page of an ibdata file is 'created' in this function into the buffer - pool then we lose the original contents of the file flush lsn stamp. - Then InnoDB could in a crash recovery print a big, false, corruption - warning if the stamp contains an lsn bigger than the ib_logfile lsn. */ + /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the + following pages: + (1) The first page of the InnoDB system tablespace (page 0:0) + (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages + (3) key_version on encrypted pages (not page 0:0) */ memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); @@ -4570,6 +4639,7 @@ buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space) !bpage->encrypted && fil_space_verify_crypt_checksum(dst_frame, zip_size, space, bpage->offset)); + if (!still_encrypted) { /* If traditional checksums match, we assume that page is not anymore encrypted. */ @@ -6176,22 +6246,27 @@ buf_pool_reserve_tmp_slot( buf_pool_mutex_exit(buf_pool); /* Allocate temporary memory for encryption/decryption */ - if (free_slot->crypt_buf_free == NULL) { - free_slot->crypt_buf_free = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->crypt_buf = static_cast<byte *>(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2); + if (free_slot->crypt_buf == NULL) { + free_slot->crypt_buf = static_cast<byte*>(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); + memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); } /* For page compressed tables allocate temporary memory for compression/decompression */ - if (compressed && free_slot->comp_buf_free == NULL) { - free_slot->comp_buf_free = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->comp_buf = static_cast<byte *>(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2); -#ifdef HAVE_LZO - free_slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + if (compressed && free_slot->comp_buf == NULL) { + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + size = snappy_max_compressed_length(size); +#endif +#if HAVE_LZO + size += LZO1X_1_15_MEM_COMPRESS; #endif + free_slot->comp_buf = static_cast<byte*>(aligned_malloc(size, UNIV_PAGE_SIZE)); + memset(free_slot->comp_buf, 0, size); } return (free_slot); @@ -6279,8 +6354,7 @@ buf_page_encrypt_before_write( fsp_flags_get_page_compression_level(space->flags), fil_space_get_block_size(space, bpage->offset), encrypted, - &out_len, - IF_LZO(slot->lzo_mem, NULL)); + &out_len); bpage->real_size = out_len; diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc index 1c9646c0bd6..49371f9a6f1 100644 --- a/storage/xtradb/buf/buf0dblwr.cc +++ b/storage/xtradb/buf/buf0dblwr.cc @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -175,13 +175,14 @@ buf_dblwr_init( mem_zalloc(buf_size * sizeof(void*))); } -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void) -/*==================*/ +bool +buf_dblwr_create() { buf_block_t* block2; buf_block_t* new_block; @@ -194,8 +195,7 @@ buf_dblwr_create(void) if (buf_dblwr) { /* Already inited */ - - return; + return(true); } start_again: @@ -213,39 +213,59 @@ start_again: mtr_commit(&mtr); buf_dblwr_being_created = FALSE; - return; + return(true); } - ib_logf(IB_LOG_LEVEL_INFO, - "Doublewrite buffer not found: creating new"); - if (buf_pool_get_curr_size() < ((TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE + FSP_EXTENT_SIZE / 2 + 100) * UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your buffer pool size. Cannot continue " - "operation."); + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "innodb_buffer_pool_size is too small."); + mtr_commit(&mtr); + return(false); + } else { + fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE); + const bool fail = UT_LIST_GET_FIRST(space->chain)->size + < 3 * FSP_EXTENT_SIZE; + fil_space_release(space); + + if (fail) { + goto too_small; + } } block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG, &mtr); + if (block2 == NULL) { +too_small: + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "the first file in innodb_data_file_path" + " must be at least %luM.", + 3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20); + mtr_commit(&mtr); + return(false); + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Doublewrite buffer not found: creating new"); + + /* FIXME: After this point, the doublewrite buffer creation + is not atomic. The doublewrite buffer should not exist in + the InnoDB system tablespace file in the first place. + It could be located in separate optional file(s) in a + user-specified location. */ + /* fseg_create acquires a second latch on the page, therefore we must declare it: */ buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); - if (block2 == NULL) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your tablespace size. " - "Cannot continue operation."); - } - fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG; prev_page_no = 0; @@ -351,7 +371,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages) { @@ -482,6 +502,14 @@ buf_dblwr_process() byte* unaligned_read_buf; recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + if (!buf_dblwr) { + return; + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Restoring possible half-written data pages " + "from the doublewrite buffer..."); + unaligned_read_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); read_buf = static_cast<byte*>( diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc index e728636042b..71b97b770e1 100644 --- a/storage/xtradb/buf/buf0dump.cc +++ b/storage/xtradb/buf/buf0dump.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under @@ -612,6 +612,7 @@ buf_load() if (dump_n == 0) { ut_free(dump); + ut_free(dump_tmp); ut_sprintf_timestamp(now); buf_load_status(STATUS_NOTICE, "Buffer pool(s) load completed at %s " @@ -685,6 +686,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(buf_dump_thread)(void*) { + my_thread_init(); ut_ad(!srv_read_only_mode); buf_dump_status(STATUS_INFO, "Dumping buffer pool(s) not yet started"); @@ -721,6 +723,7 @@ DECLARE_THREAD(buf_dump_thread)(void*) srv_buf_dump_thread_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 1f5c3993be7..84eea3bc692 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2013, 2017, MariaDB Corporation. Copyright (c) 2013, 2014, Fusion-io This program is free software; you can redistribute it and/or modify it under @@ -62,10 +62,10 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set back to FALSE at shutdown by the page_cleaner as well. Therefore no need to protect it by a mutex. It is only ever read by the thread doing the shutdown */ -UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE; +UNIV_INTERN bool buf_page_cleaner_is_active; /** Flag indicating if the lru_manager is in active state. */ -UNIV_INTERN bool buf_lru_manager_is_active = false; +UNIV_INTERN bool buf_lru_manager_is_active; #ifdef UNIV_PFS_THREAD UNIV_INTERN mysql_pfs_key_t buf_page_cleaner_thread_key; @@ -352,6 +352,7 @@ buf_flush_insert_into_flush_list( buf_block_t* block, /*!< in/out: block which is modified */ lsn_t lsn) /*!< in: oldest modification */ { + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(log_flush_order_mutex_own()); ut_ad(mutex_own(&block->mutex)); @@ -410,6 +411,7 @@ buf_flush_insert_sorted_into_flush_list( buf_page_t* prev_b; buf_page_t* b; + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(log_flush_order_mutex_own()); ut_ad(mutex_own(&block->mutex)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); @@ -715,6 +717,7 @@ buf_flush_write_complete( buf_page_set_io_fix(bpage, BUF_IO_NONE); buf_pool->n_flush[flush_type]--; + ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX); #ifdef UNIV_MTFLUSH_DEBUG fprintf(stderr, "n pending flush %lu\n", @@ -1099,6 +1102,7 @@ buf_flush_page( } ++buf_pool->n_flush[flush_type]; + ut_ad(buf_pool->n_flush[flush_type] != 0); mutex_exit(&buf_pool->flush_state_mutex); @@ -2264,13 +2268,14 @@ Clears up tail of the LRU lists: * Flush dirty pages at the tail of LRU to the disk The depth to which we scan each buffer pool is controlled by dynamic config parameter innodb_LRU_scan_depth. -@return number of pages flushed */ +@return number of flushed and evicted pages */ UNIV_INTERN ulint buf_flush_LRU_tail(void) /*====================*/ { ulint total_flushed = 0; + ulint total_evicted = 0; ulint start_time = ut_time_ms(); ulint scan_depth[MAX_BUFFER_POOLS]; ulint requested_pages[MAX_BUFFER_POOLS]; @@ -2341,6 +2346,7 @@ buf_flush_LRU_tail(void) limited_scan[i] = (previous_evicted[i] > n.evicted); previous_evicted[i] = n.evicted; + total_evicted += n.evicted; requested_pages[i] += lru_chunk_size; @@ -2381,7 +2387,7 @@ buf_flush_LRU_tail(void) } } - return(total_flushed); + return(total_flushed + total_evicted); } /*********************************************************************//** @@ -2682,6 +2688,23 @@ buf_get_total_free_list_length(void) return result; } +/** Returns the aggregate LRU list length over all buffer pool instances. +@return total LRU list length. */ +MY_ATTRIBUTE((warn_unused_result)) +static +ulint +buf_get_total_LRU_list_length(void) +{ + ulint result = 0; + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + + result += UT_LIST_GET_LEN(buf_pool_from_array(i)->LRU); + } + + return result; +} + /*********************************************************************//** Adjust the desired page cleaner thread sleep time for LRU flushes. */ MY_ATTRIBUTE((nonnull)) @@ -2694,8 +2717,9 @@ page_cleaner_adapt_lru_sleep_time( ulint lru_n_flushed) /*!< in: number of flushed in previous batch */ { - ulint free_len = buf_get_total_free_list_length(); - ulint max_free_len = srv_LRU_scan_depth * srv_buf_pool_instances; + ulint free_len = buf_get_total_free_list_length(); + ulint max_free_len = ut_min(buf_get_total_LRU_list_length(), + srv_LRU_scan_depth * srv_buf_pool_instances); if (free_len < max_free_len / 100 && lru_n_flushed) { @@ -2707,7 +2731,7 @@ page_cleaner_adapt_lru_sleep_time( /* Free lists filled more than 20% or no pages flushed in previous batch, sleep a bit more */ - *lru_sleep_time += 50; + *lru_sleep_time += 1; if (*lru_sleep_time > srv_cleaner_max_lru_time) *lru_sleep_time = srv_cleaner_max_lru_time; } else if (free_len < max_free_len / 20 && *lru_sleep_time >= 50) { @@ -2754,6 +2778,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ulint next_loop_time = ut_time_ms() + 1000; ulint n_flushed = 0; ulint last_activity = srv_get_activity_count(); @@ -2774,8 +2799,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_page_cleaner_is_active = TRUE; - while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { ulint page_cleaner_sleep_time; @@ -2884,8 +2907,9 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /* We have lived our life. Time to die. */ thread_exit: - buf_page_cleaner_is_active = FALSE; + buf_page_cleaner_is_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -2924,8 +2948,6 @@ DECLARE_THREAD(buf_flush_lru_manager_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_lru_manager_is_active = true; - /* On server shutdown, the LRU manager thread runs through cleanup phase to provide free pages for the master and purge threads. */ while (srv_shutdown_state == SRV_SHUTDOWN_NONE diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index bc46bcab63b..a1cfeb3860f 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -888,6 +888,12 @@ dict_index_get_nth_col_or_prefix_pos( ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); + ut_ad((inc_prefix && !prefix_col_pos) || (!inc_prefix)); + + if (!prefix_col_pos) { + prefix_col_pos = &prefixed_pos_dummy; + } + *prefix_col_pos = ULINT_UNDEFINED; if (!prefix_col_pos) { prefix_col_pos = &prefixed_pos_dummy; diff --git a/storage/xtradb/dict/dict0load.cc b/storage/xtradb/dict/dict0load.cc index 4991c4f3fcc..4c3dd47761f 100644 --- a/storage/xtradb/dict/dict0load.cc +++ b/storage/xtradb/dict/dict0load.cc @@ -1193,7 +1193,7 @@ loop: dberr_t err = fil_open_single_table_tablespace( read_page_0, srv_read_only_mode ? false : true, space_id, dict_tf_to_fsp_flags(flags), - name, filepath, NULL); + name, filepath); if (err != DB_SUCCESS) { ib_logf(IB_LOG_LEVEL_ERROR, @@ -2437,7 +2437,7 @@ err_exit: err = fil_open_single_table_tablespace( true, false, table->space, dict_tf_to_fsp_flags(table->flags), - name, filepath, table); + name, filepath); if (err != DB_SUCCESS) { /* We failed to find a sensible diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc index fa7177c5137..125d7d78a1f 100644 --- a/storage/xtradb/dict/dict0mem.cc +++ b/storage/xtradb/dict/dict0mem.cc @@ -320,8 +320,8 @@ dict_mem_table_col_rename_low( ut_ad(from_len <= NAME_LEN); ut_ad(to_len <= NAME_LEN); - char from[NAME_LEN]; - strncpy(from, s, NAME_LEN); + char from[NAME_LEN + 1]; + strncpy(from, s, NAME_LEN + 1); if (from_len == to_len) { /* The easy case: simply replace the column name in diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc index d2e9a2618c0..c1463e98ce0 100644 --- a/storage/xtradb/dict/dict0stats.cc +++ b/storage/xtradb/dict/dict0stats.cc @@ -1168,8 +1168,9 @@ dict_stats_analyze_index_level( leaf-level delete marks because delete marks on non-leaf level do not make sense. */ - if (level == 0 && srv_stats_include_delete_marked? 0: - rec_get_deleted_flag( + if (level == 0 + && !srv_stats_include_delete_marked + && rec_get_deleted_flag( rec, page_is_comp(btr_pcur_get_page(&pcur)))) { diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc index 55d34ff6ae1..ba6fd115551 100644 --- a/storage/xtradb/dict/dict0stats_bg.cc +++ b/storage/xtradb/dict/dict0stats_bg.cc @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,12 +39,18 @@ Created Apr 25, 2012 Vasil Dimov /** Minimum time interval between stats recalc for a given table */ #define MIN_RECALC_INTERVAL 10 /* seconds */ -#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) - /** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add() or shutdown. Not protected by any mutex. */ UNIV_INTERN os_event_t dict_stats_event; +/** Variable to initiate shutdown the dict stats thread. Note we don't +use 'srv_shutdown_state' because we want to shutdown dict stats thread +before purge thread. */ +static bool dict_stats_start_shutdown; + +/** Event to wait for shutdown of the dict stats thread */ +static os_event_t dict_stats_shutdown_event; + /** This mutex protects the "recalc_pool" variable. */ static ib_mutex_t recalc_pool_mutex; static ib_mutex_t defrag_pool_mutex; @@ -341,11 +347,11 @@ Must be called before dict_stats_thread() is started. */ UNIV_INTERN void dict_stats_thread_init() -/*====================*/ { ut_a(!srv_read_only_mode); dict_stats_event = os_event_create(); + dict_stats_shutdown_event = os_event_create(); /* The recalc_pool_mutex is acquired from: 1) the background stats gathering thread before any other latch @@ -390,6 +396,9 @@ dict_stats_thread_deinit() os_event_free(dict_stats_event); dict_stats_event = NULL; + os_event_free(dict_stats_shutdown_event); + dict_stats_shutdown_event = NULL; + dict_stats_start_shutdown = false; } /*****************************************************************//** @@ -530,9 +539,10 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(dict_stats_thread)(void*) { + my_thread_init(); ut_a(!srv_read_only_mode); - while (!SHUTTING_DOWN()) { + while (!dict_stats_start_shutdown) { /* Wake up periodically even if not signaled. This is because we may lose an event - if the below call to @@ -542,7 +552,7 @@ DECLARE_THREAD(dict_stats_thread)(void*) os_event_wait_time( dict_stats_event, MIN_RECALC_INTERVAL * 1000000); - if (SHUTTING_DOWN()) { + if (dict_stats_start_shutdown) { break; } @@ -556,9 +566,20 @@ DECLARE_THREAD(dict_stats_thread)(void*) srv_dict_stats_thread_active = false; + os_event_set(dict_stats_shutdown_event); + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit instead of return(). */ os_thread_exit(NULL); OS_THREAD_DUMMY_RETURN; } + +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown() +{ + dict_stats_start_shutdown = true; + os_event_set(dict_stats_event); + os_event_wait(dict_stats_shutdown_event); +} diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index e27e93244ae..e73d600d2ca 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -887,7 +887,7 @@ fil_space_decrypt( Calculate post encryption checksum @param[in] zip_size zip_size or 0 @param[in] dst_frame Block where checksum is calculated -@return page checksum or BUF_NO_CHECKSUM_MAGIC +@return page checksum not needed. */ UNIV_INTERN ulint @@ -896,30 +896,13 @@ fil_crypt_calculate_checksum( const byte* dst_frame) { ib_uint32_t checksum = 0; - srv_checksum_algorithm_t algorithm = - static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm); + /* For encrypted tables we use only crc32 and strict_crc32 */ if (zip_size == 0) { - switch (algorithm) { - case SRV_CHECKSUM_ALGORITHM_CRC32: - case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: - checksum = buf_calc_page_crc32(dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_INNODB: - case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: - checksum = (ib_uint32_t) buf_calc_page_new_checksum( - dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_NONE: - case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: - checksum = BUF_NO_CHECKSUM_MAGIC; - break; - /* no default so the compiler will emit a warning - * if new enum is added and not handled here */ - } + checksum = buf_calc_page_crc32(dst_frame); } else { checksum = page_zip_calc_checksum(dst_frame, zip_size, - algorithm); + SRV_CHECKSUM_ALGORITHM_CRC32); } return checksum; @@ -953,14 +936,6 @@ fil_space_verify_crypt_checksum( return(false); } - srv_checksum_algorithm_t algorithm = - static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm); - - /* If no checksum is used, can't continue checking. */ - if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { - return(true); - } - /* Read stored post encryption checksum. */ ib_uint32_t checksum = mach_read_from_4( page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4); @@ -1044,7 +1019,6 @@ fil_space_verify_crypt_checksum( checksum1 = mach_read_from_4( page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); valid = (buf_page_is_checksum_valid_crc32(page,checksum1,checksum2) - || buf_page_is_checksum_valid_none(page,checksum1,checksum2) || buf_page_is_checksum_valid_innodb(page,checksum1, checksum2)); } @@ -1141,6 +1115,36 @@ fil_crypt_needs_rotation( return false; } +/** Read page 0 and possible crypt data from there. +@param[in,out] space Tablespace */ +static inline +void +fil_crypt_read_crypt_data(fil_space_t* space) +{ + if (space->crypt_data || space->size) { + /* The encryption metadata has already been read, or + the tablespace is not encrypted and the file has been + opened already. */ + return; + } + + mtr_t mtr; + mtr_start(&mtr); + ulint zip_size = fsp_flags_get_zip_size(space->flags); + ulint offset = fsp_header_get_crypt_offset(zip_size); + if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, + RW_S_LATCH, &mtr)) { + mutex_enter(&fil_system->mutex); + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data( + space->id, block->frame, offset); + } + mutex_exit(&fil_system->mutex); + } + + mtr_commit(&mtr); +} + /*********************************************************************** Start encrypting a space @param[in,out] space Tablespace @@ -1151,6 +1155,7 @@ fil_crypt_start_encrypting_space( fil_space_t* space) { bool recheck = false; + mutex_enter(&fil_crypt_threads_mutex); fil_space_crypt_t *crypt_data = space->crypt_data; @@ -1217,8 +1222,6 @@ fil_crypt_start_encrypting_space( byte* frame = buf_block_get_frame(block); crypt_data->type = CRYPT_SCHEME_1; crypt_data->write_page0(frame, &mtr); - - mtr_commit(&mtr); /* record lsn of update */ @@ -1294,10 +1297,10 @@ struct rotate_thread_t { bool should_shutdown() const { switch (srv_shutdown_state) { case SRV_SHUTDOWN_NONE: - case SRV_SHUTDOWN_CLEANUP: return thread_no >= srv_n_fil_crypt_threads; - case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_CLEANUP: return true; + case SRV_SHUTDOWN_FLUSH_PHASE: case SRV_SHUTDOWN_LAST_PHASE: case SRV_SHUTDOWN_EXIT_THREADS: break; @@ -1646,6 +1649,8 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { + fil_crypt_read_crypt_data(state->space); + if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); /* init state->min_key_version_found before @@ -2340,8 +2345,10 @@ DECLARE_THREAD(fil_crypt_thread)( while (!thr.should_shutdown() && fil_crypt_find_page_to_rotate(&new_state, &thr)) { - /* rotate a (set) of pages */ - fil_crypt_rotate_pages(&new_state, &thr); + if (!thr.space->is_stopping()) { + /* rotate a (set) of pages */ + fil_crypt_rotate_pages(&new_state, &thr); + } /* If space is marked as stopping, release space and stop rotation. */ @@ -2571,10 +2578,10 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_crypt_read_crypt_data(const_cast<fil_space_t*>(space)); status->space = space->id; - if (crypt_data != NULL) { + if (fil_space_crypt_t* crypt_data = space->crypt_data) { mutex_enter(&crypt_data->mutex); status->scheme = crypt_data->type; status->keyserver_requests = crypt_data->keyserver_requests; diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index e39be46840c..fdd09a6034e 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -166,7 +166,8 @@ UNIV_INTERN extern uint srv_fil_crypt_rotate_key_age; UNIV_INTERN extern ib_mutex_t fil_crypt_threads_mutex; /** Determine if (i) is a user tablespace id or not. */ -# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open) +# define fil_is_user_tablespace_id(i) (i != 0 \ + && !srv_is_undo_tablespace(i)) /** Determine if user has explicitly disabled fsync(). */ #ifndef __WIN__ @@ -621,7 +622,8 @@ fil_node_open_file( size_bytes = os_file_get_size(node->handle); ut_a(size_bytes != (os_offset_t) -1); - node->file_block_size = os_file_get_block_size(node->handle, node->name); + node->file_block_size = os_file_get_block_size( + node->handle, node->name); space->file_block_size = node->file_block_size; #ifdef UNIV_HOTBACKUP @@ -659,12 +661,10 @@ fil_node_open_file( /* Try to read crypt_data from page 0 if it is not yet read. */ - if (!node->space->page_0_crypt_read) { - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(flags)); - ut_ad(node->space->crypt_data == NULL); + if (!node->space->crypt_data) { + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(flags)); node->space->crypt_data = fil_space_read_crypt_data(space_id, page, offset); - node->space->page_0_crypt_read = true; } ut_free(buf2); @@ -731,7 +731,8 @@ add_size: } if (node->file_block_size == 0) { - node->file_block_size = os_file_get_block_size(node->handle, node->name); + node->file_block_size = os_file_get_block_size( + node->handle, node->name); space->file_block_size = node->file_block_size; } @@ -1597,22 +1598,6 @@ fil_space_create( space->magic_n = FIL_SPACE_MAGIC_N; space->crypt_data = crypt_data; - /* In create table we write page 0 so we have already - "read" it and for system tablespaces we have read - crypt data at startup. */ - if (create_table || crypt_data != NULL) { - space->page_0_crypt_read = true; - } - -#ifdef UNIV_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Created tablespace for space %lu name %s key_id %u encryption %d.", - space->id, - space->name, - space->crypt_data ? space->crypt_data->key_id : 0, - space->crypt_data ? space->crypt_data->encryption : 0); -#endif - rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP); HASH_INSERT(fil_space_t, hash, fil_system->spaces, id, space); @@ -2063,8 +2048,6 @@ fil_init( fil_system->spaces = hash_create(hash_size); fil_system->name_hash = hash_create(hash_size); - UT_LIST_INIT(fil_system->LRU); - fil_system->max_n_open = max_n_open; fil_space_crypt_init(); @@ -2264,99 +2247,70 @@ fil_set_max_space_id_if_bigger( mutex_exit(&fil_system->mutex); } -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page header -of the first page of a data file of the system tablespace (space 0), -which is uncompressed. */ -static MY_ATTRIBUTE((warn_unused_result)) +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_lsn_and_arch_no_to_file( -/*==============================*/ - ulint space, /*!< in: space to write to */ - ulint sum_of_sizes, /*!< in: combined size of previous files - in space, in database pages */ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no MY_ATTRIBUTE((unused))) - /*!< in: archived log number to write */ +fil_write_flushed_lsn( + lsn_t lsn) { byte* buf1; byte* buf; dberr_t err; - buf1 = static_cast<byte*>(mem_alloc(2 * UNIV_PAGE_SIZE)); + buf1 = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); buf = static_cast<byte*>(ut_align(buf1, UNIV_PAGE_SIZE)); - err = fil_read(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - if (err == DB_SUCCESS) { - mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, - lsn); + /* Acquire system tablespace */ + fil_space_t* space = fil_space_acquire(0); - err = fil_write(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - } - - mem_free(buf1); + /* If tablespace is not encrypted, stamp flush_lsn to + first page of all system tablespace datafiles to avoid + unnecessary error messages on possible downgrade. */ + if (space->crypt_data->min_key_version == 0) { + fil_node_t* node; + ulint sum_of_sizes = 0; - return(err); -} - -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no) /*!< in: latest archived log file number */ -{ - fil_space_t* space; - fil_node_t* node; - dberr_t err; - - mutex_enter(&fil_system->mutex); - - for (space = UT_LIST_GET_FIRST(fil_system->space_list); - space != NULL; - space = UT_LIST_GET_NEXT(space_list, space)) { - - /* We only write the lsn to all existing data files which have - been open during the lifetime of the mysqld process; they are - represented by the space objects in the tablespace memory - cache. Note that all data files in the system tablespace 0 - and the UNDO log tablespaces (if separate) are always open. */ - - if (space->purpose == FIL_TABLESPACE - && !fil_is_user_tablespace_id(space->id)) { - ulint sum_of_sizes = 0; - - for (node = UT_LIST_GET_FIRST(space->chain); - node != NULL; - node = UT_LIST_GET_NEXT(chain, node)) { - - mutex_exit(&fil_system->mutex); - - err = fil_write_lsn_and_arch_no_to_file( - space->id, sum_of_sizes, lsn, - arch_log_no); + for (node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { - if (err != DB_SUCCESS) { + err = fil_read(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); - return(err); - } + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); - mutex_enter(&fil_system->mutex); + err = fil_write(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); sum_of_sizes += node->size; } } + } else { + /* When system tablespace is encrypted stamp flush_lsn to + only the first page of the first datafile (rest of pages + are encrypted). */ + err = fil_read(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); + + err = fil_write(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + } } - mutex_exit(&fil_system->mutex); + fil_flush_file_spaces(FIL_TABLESPACE); + fil_space_release(space); - return(DB_SUCCESS); + ut_free(buf1); + + return(err); } /** Check the consistency of the first data page of a tablespace @@ -2409,30 +2363,29 @@ fil_check_first_page(const page_t* page, ulint space_id, ulint flags) return("inconsistent data in space header"); } -/*******************************************************************//** -Reads the flushed lsn, arch no, space_id and tablespace flag fields from -the first page of a data file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*< out: crypt data */ + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) { byte* buf; byte* page; - lsn_t flushed_lsn; const char* check_msg = NULL; fil_space_crypt_t* cdata; @@ -2449,6 +2402,7 @@ fil_read_first_page( return "File size is less than minimum"; } } + buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); /* Align the memory for a possible read from a raw device */ @@ -2467,6 +2421,11 @@ fil_read_first_page( *space_id = fsp_header_get_space_id(page); *flags = fsp_header_get_flags(page); + if (flushed_lsn) { + *flushed_lsn = mach_read_from_8(page + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + } + if (!fsp_flags_is_valid(*flags)) { ulint cflags = fsp_flags_convert_from_101(*flags); if (cflags == ULINT_UNDEFINED) { @@ -2479,37 +2438,36 @@ fil_read_first_page( } } - if (!(IS_XTRABACKUP() && srv_backup_mode)) { - check_msg = fil_check_first_page(page, *space_id, *flags); + if (!(IS_XTRABACKUP() && srv_backup_mode)) { + check_msg = fil_check_first_page(page, *space_id, *flags); } - } - - flushed_lsn = mach_read_from_8(page + - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + /* Possible encryption crypt data is also stored only to first page + of the first datafile. */ - ulint space = fsp_header_get_space_id(page); - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags)); + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(*flags)); - cdata = fil_space_read_crypt_data(space, page, offset); + cdata = fil_space_read_crypt_data(*space_id, page, offset); - if (crypt_data) { - *crypt_data = cdata; - } + if (crypt_data) { + *crypt_data = cdata; + } - /* If file space is encrypted we need to have at least some - encryption service available where to get keys */ - if (cdata && cdata->should_encrypt()) { + /* If file space is encrypted we need to have at least some + encryption service available where to get keys */ + if (cdata && cdata->should_encrypt()) { - if (!encryption_key_id_exists(cdata->key_id)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace id %ld is encrypted but encryption service" - " or used key_id %u is not available. Can't continue opening tablespace.", - space, cdata->key_id); - - return ("table encrypted but encryption service not available."); + if (!encryption_key_id_exists(cdata->key_id)) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tablespace id " ULINTPF + " is encrypted but encryption service" + " or used key_id %u is not available. " + "Can't continue opening tablespace.", + *space_id, cdata->key_id); + return ("table encrypted but encryption service not available."); + } } } @@ -2519,20 +2477,6 @@ fil_read_first_page( return(check_msg); } - if (!one_read_already) { - *min_flushed_lsn = flushed_lsn; - *max_flushed_lsn = flushed_lsn; - - return(NULL); - } - - if (*min_flushed_lsn > flushed_lsn) { - *min_flushed_lsn = flushed_lsn; - } - if (*max_flushed_lsn < flushed_lsn) { - *max_flushed_lsn = flushed_lsn; - } - return(NULL); } @@ -2791,14 +2735,12 @@ fil_op_log_parse_or_replay( } else if (log_flags & MLOG_FILE_FLAG_TEMP) { /* Temporary table, do nothing */ } else { - const char* path = NULL; - /* Create the database directory for name, if it does not exist yet */ fil_create_directory_for_tablename(name); if (fil_create_new_single_table_tablespace( - space_id, name, path, flags, + space_id, name, NULL, flags, DICT_TF2_USE_TABLESPACE, FIL_IBD_FILE_INITIAL_SIZE, FIL_ENCRYPTION_DEFAULT, @@ -3744,7 +3686,7 @@ fil_open_linked_file( /*===============*/ const char* tablename, /*!< in: database/tablename */ char** remote_filepath,/*!< out: remote filepath */ - os_file_t* remote_file, /*!< out: remote file handle */ + pfs_os_file_t* remote_file, /*!< out: remote file handle */ ulint atomic_writes) /*!< in: atomic writes table option value */ { @@ -3807,7 +3749,8 @@ fil_create_new_single_table_tablespace( fil_encryption_t mode, /*!< in: encryption mode */ ulint key_id) /*!< in: encryption key_id */ { - os_file_t file; + pfs_os_file_t file; + ibool ret; dberr_t err; byte* buf2; @@ -4250,6 +4193,7 @@ fsp_flags_try_adjust(ulint space_id, ulint flags) flags, MLOG_4BYTES, &mtr); } } + mtr_commit(&mtr); } @@ -4285,8 +4229,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* path_in, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* path_in) /*!< in: table */ { dberr_t err = DB_SUCCESS; bool dict_filepath_same_as_default = false; @@ -4377,6 +4320,7 @@ fil_open_single_table_tablespace( def.file = os_file_create_simple_no_error_handling( innodb_file_data_key, def.filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &def.success, atomic_writes); + if (def.success) { tablespaces_found++; } @@ -4391,16 +4335,12 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (def.success) { def.check_msg = fil_read_first_page( - def.file, FALSE, &def.flags, &def.id, - &def.lsn, &def.lsn, &def.crypt_data); - - if (table) { - table->crypt_data = def.crypt_data; - table->page_0_read = true; - } + def.file, false, &def.flags, &def.id, + NULL, &def.crypt_data); def.valid = !def.check_msg && def.id == id && fsp_flags_match(flags, def.flags); + if (def.valid) { valid_tablespaces_found++; } else { @@ -4414,17 +4354,13 @@ fil_open_single_table_tablespace( /* Read the first page of the remote tablespace */ if (remote.success) { remote.check_msg = fil_read_first_page( - remote.file, FALSE, &remote.flags, &remote.id, - &remote.lsn, &remote.lsn, &remote.crypt_data); - - if (table) { - table->crypt_data = remote.crypt_data; - table->page_0_read = true; - } + remote.file, false, &remote.flags, &remote.id, + NULL, &remote.crypt_data); /* Validate this single-table-tablespace with SYS_TABLES. */ remote.valid = !remote.check_msg && remote.id == id && fsp_flags_match(flags, remote.flags); + if (remote.valid) { valid_tablespaces_found++; } else { @@ -4439,13 +4375,8 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (dict.success) { dict.check_msg = fil_read_first_page( - dict.file, FALSE, &dict.flags, &dict.id, - &dict.lsn, &dict.lsn, &dict.crypt_data); - - if (table) { - table->crypt_data = dict.crypt_data; - table->page_0_read = true; - } + dict.file, false, &dict.flags, &dict.id, + NULL, &dict.crypt_data); /* Validate this single-table-tablespace with SYS_TABLES. */ dict.valid = !dict.check_msg && dict.id == id @@ -4472,14 +4403,16 @@ fil_open_single_table_tablespace( "See " REFMAN "innodb-troubleshooting-datadict.html " "for how to resolve the issue.", tablename); + if (IS_XTRABACKUP() && fix_dict) { ib_logf(IB_LOG_LEVEL_WARN, - "It will be removed from the data dictionary."); + "It will be removed from the data dictionary."); if (purge_sys) { fil_remove_invalid_table_from_data_dict(tablename); } } + err = DB_CORRUPTION; goto cleanup_and_exit; @@ -4491,26 +4424,32 @@ fil_open_single_table_tablespace( ib_logf(IB_LOG_LEVEL_ERROR, "A tablespace for %s has been found in " "multiple places;", tablename); + if (def.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Default location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - def.filepath, def.lsn, - (ulong) def.id, (ulong) def.flags); + "Default location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + def.filepath, + def.id, + def.flags); } + if (remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Remote location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - remote.filepath, remote.lsn, - (ulong) remote.id, (ulong) remote.flags); + "Remote location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + remote.filepath, + remote.id, + remote.flags); } + if (dict.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Dictionary location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - dict.filepath, dict.lsn, - (ulong) dict.id, (ulong) dict.flags); + "Dictionary location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + dict.filepath, + dict.id, + dict.flags); } /* Force-recovery will allow some tablespaces to be @@ -4543,6 +4482,7 @@ fil_open_single_table_tablespace( os_file_close(def.file); tablespaces_found--; } + if (dict.success && !dict.valid) { dict.success = false; os_file_close(dict.file); @@ -4658,7 +4598,17 @@ cleanup_and_exit: mem_free(def.filepath); - if (err == DB_SUCCESS && !srv_read_only_mode) { + /* We need to check fsp flags when no errors has happened and + server was not started on read only mode and tablespace validation + was requested or flags contain other table options except + low order bits to FSP_FLAGS_POS_PAGE_SSIZE position. + Note that flag comparison is pessimistic. Adjust is required + only when flags contain buggy MariaDB 10.1.0 - + MariaDB 10.1.20 flags. */ + if (err == DB_SUCCESS + && !srv_read_only_mode + && (validate + || flags >= (1U << FSP_FLAGS_POS_PAGE_SSIZE))) { fsp_flags_try_adjust(id, flags & ~FSP_FLAGS_MEM_MASK); } @@ -4895,8 +4845,8 @@ fil_validate_single_table_tablespace( check_first_page: fsp->success = TRUE; if (const char* check_msg = fil_read_first_page( - fsp->file, FALSE, &fsp->flags, &fsp->id, - &fsp->lsn, &fsp->lsn, &fsp->crypt_data)) { + fsp->file, false, &fsp->flags, &fsp->id, + NULL, &fsp->crypt_data)) { ib_logf(IB_LOG_LEVEL_ERROR, "%s in tablespace %s (table %s)", check_msg, fsp->filepath, tablename); @@ -4909,6 +4859,7 @@ check_first_page: in Xtrabackup, this does not work.*/ return; } + if (!restore_attempted) { if (!fil_user_tablespace_find_space_id(fsp)) { return; @@ -5152,11 +5103,11 @@ will_not_choose: if (def.success && remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, "Tablespaces for %s have been found in two places;\n" - "Location 1: SpaceID: %lu LSN: %lu File: %s\n" - "Location 2: SpaceID: %lu LSN: %lu File: %s\n" + "Location 1: SpaceID: " ULINTPF " File: %s\n" + "Location 2: SpaceID: " ULINTPF " File: %s\n" "You must delete one of them.", - tablename, (ulong) def.id, (ulong) def.lsn, - def.filepath, (ulong) remote.id, (ulong) remote.lsn, + tablename, def.id, + def.filepath, remote.id, remote.filepath); def.success = FALSE; @@ -6141,19 +6092,19 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ ulint type) /*!< in: I/O type */ { - fprintf(stderr, - "InnoDB: Error: trying to access page number %lu" - " in space %lu,\n" - "InnoDB: space name %s,\n" - "InnoDB: which is outside the tablespace bounds.\n" - "InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n" - "InnoDB: If you get this error at mysqld startup," - " please check that\n" - "InnoDB: your my.cnf matches the ibdata files" - " that you have in the\n" - "InnoDB: MySQL server.\n", - (ulong) block_offset, (ulong) space_id, space_name, - (ulong) byte_offset, (ulong) len, (ulong) type); + ib_logf(IB_LOG_LEVEL_FATAL, + "Trying to access page number " ULINTPF + " in space " ULINTPF + " space name %s," + " which is outside the tablespace bounds." + " Byte offset " ULINTPF ", len " ULINTPF + " i/o type " ULINTPF ".%s", + block_offset, space_id, space_name, + byte_offset, len, type, + space_id == 0 && !srv_was_started + ? "Please check that the configuration matches" + " the InnoDB system tablespace location (ibdata files)" + : ""); } /********************************************************************//** @@ -6373,11 +6324,10 @@ _fil_io( mutex_exit(&fil_system->mutex); return(DB_ERROR); } + fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Open file if closed */ @@ -6389,10 +6339,11 @@ _fil_io( ib_logf(IB_LOG_LEVEL_ERROR, "Trying to do i/o to a tablespace which " "exists without .ibd data file. " - "i/o type %lu, space id %lu, page no %lu, " - "i/o length %lu bytes", - (ulong) type, (ulong) space_id, - (ulong) block_offset, (ulong) len); + "i/o type " ULINTPF ", space id " + ULINTPF ", page no " ULINTPF ", " + "i/o length " ULINTPF " bytes", + type, space_id, + block_offset, len); return(DB_TABLESPACE_DELETED); } @@ -6412,8 +6363,6 @@ _fil_io( fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Now we have made the changes in the data structures of fil_system */ @@ -6886,7 +6835,7 @@ fil_buf_block_init( } struct fil_iterator_t { - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ const char* filepath; /*!< File path name */ os_offset_t start; /*!< From where to start */ os_offset_t end; /*!< Where to stop */ @@ -6929,15 +6878,15 @@ fil_iterate( /* TODO: For compressed tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ + const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { byte* io_buffer = iter.io_buffer; - bool row_compressed = false; block->frame = io_buffer; - if (callback.get_zip_size() > 0) { + if (row_compressed) { page_zip_des_init(&block->page.zip); page_zip_set_size(&block->page.zip, iter.page_size); block->page.zip.data = block->frame + UNIV_PAGE_SIZE; @@ -6946,9 +6895,6 @@ fil_iterate( /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; - row_compressed = true; - } else { - io_buffer = iter.io_buffer; } /* We have to read the exact number of bytes. Otherwise the @@ -6961,16 +6907,12 @@ fil_iterate( ut_ad(n_bytes > 0); ut_ad(!(n_bytes % iter.page_size)); - byte* readptr = io_buffer; - byte* writeptr = io_buffer; - bool encrypted = false; - + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); /* Use additional crypt io buffer if tablespace is encrypted */ - if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) { - encrypted = true; - readptr = iter.crypt_io_buffer; - writeptr = iter.crypt_io_buffer; - } + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { @@ -6993,8 +6935,9 @@ fil_iterate( ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED || - page_type == FIL_PAGE_PAGE_COMPRESSED); + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; /* If tablespace is encrypted, we need to decrypt the page. Note that tablespaces are not in @@ -7096,8 +7039,7 @@ fil_iterate( 0,/* FIXME: compression level */ 512,/* FIXME: use proper block size */ encrypted, - &len, - NULL); + &len); updated = true; } @@ -7165,7 +7107,7 @@ fil_tablespace_iterate( PageCallback& callback) { dberr_t err; - os_file_t file; + pfs_os_file_t file; char* filepath; ut_a(n_io_buffers > 0); diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc index 8b2449983df..2b6ae95640f 100644 --- a/storage/xtradb/fil/fil0pagecompress.cc +++ b/storage/xtradb/fil/fil0pagecompress.cc @@ -99,17 +99,16 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len) /*!< out: actual length of compressed page */ - byte* lzo_mem) /*!< in: temporal memory used by LZO */ { int err = Z_OK; int comp_level = level; ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - ulint write_size=0; + ulint write_size = 0; /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; - bool allocated=false; + bool allocated = false; /* page_compression does not apply to tables or tablespaces that use ROW_FORMAT=COMPRESSED */ @@ -121,13 +120,23 @@ fil_compress_page( if (!out_buf) { allocated = true; - out_buf = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE)); -#ifdef HAVE_LZO + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + if (comp_method == PAGE_SNAPPY_ALGORITHM) { + size = snappy_max_compressed_length(size); + } +#endif +#if HAVE_LZO if (comp_method == PAGE_LZO_ALGORITHM) { - lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + size += LZO1X_1_15_MEM_COMPRESS; } #endif + + out_buf = static_cast<byte *>(ut_malloc(size)); } ut_ad(buf); @@ -163,8 +172,14 @@ fil_compress_page( switch(comp_method) { #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: + +#ifdef HAVE_LZ4_COMPRESS_DEFAULT + err = LZ4_compress_default((const char *)buf, + (char *)out_buf+header_len, len, write_size); +#else err = LZ4_compress_limitedOutput((const char *)buf, (char *)out_buf+header_len, len, write_size); +#endif /* HAVE_LZ4_COMPRESS_DEFAULT */ write_size = err; if (err == 0) { @@ -192,7 +207,7 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size, lzo_mem); + buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE); if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { if (space && !space->printed_compression_failure) { @@ -283,6 +298,7 @@ fil_compress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; + write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); cstatus = snappy_compress( (const char *)buf, @@ -438,11 +454,6 @@ fil_compress_page( err_exit: if (allocated) { ut_free(out_buf); -#ifdef HAVE_LZO - if (comp_method == PAGE_LZO_ALGORITHM) { - ut_free(lzo_mem); - } -#endif } return (buf); @@ -504,7 +515,7 @@ fil_decompress_page( ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " CRC %lu type %lu len %lu.", + " CRC " ULINTPF " type " ULINTPF " len " ULINTPF ".", mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM), mach_read_from_2(buf+FIL_PAGE_TYPE), len); @@ -528,7 +539,7 @@ fil_decompress_page( if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " actual size %lu compression %s.", + " actual size " ULINTPF " compression %s.", actual_size, fil_get_compression_alg_name(compression_alg)); fflush(stderr); if (return_error) { @@ -543,12 +554,9 @@ fil_decompress_page( *write_size = actual_size; } -#ifdef UNIV_PAGECOMPRESS_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Preparing for decompress for len %lu\n", - actual_size); -#endif /* UNIV_PAGECOMPRESS_DEBUG */ - + DBUG_PRINT("compress", + ("Preparing for decompress for len " ULINTPF ".", + actual_size)); switch(compression_alg) { case PAGE_ZLIB_ALGORITHM: @@ -560,7 +568,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but uncompress failed with error %d " - " size %lu len %lu.", + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); fflush(stderr); @@ -579,9 +587,10 @@ fil_decompress_page( if (err != (int)actual_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %d bytes " - " size %lu len %lu.", + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); + fflush(stderr); if (return_error) { @@ -593,16 +602,17 @@ fil_decompress_page( #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { - ulint olen=0; + ulint olen = 0; err = lzo1x_decompress((const unsigned char *)buf+header_len, actual_size,(unsigned char *)in_buf, &olen, NULL); if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size %lu len %lu.", - olen, actual_size, len); + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", + err, actual_size, len); + fflush(stderr); if (return_error) { @@ -637,7 +647,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %ld bytes" - " size %lu len %lu.", + " size " ULINTPF "len " ULINTPF ".", dst_pos, actual_size, len); fflush(stderr); @@ -666,7 +676,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %du bytes" - " size %lu len %lu err %d.", + " size " ULINTPF " len " ULINTPF " err %d.", dst_pos, actual_size, len, err); fflush(stderr); @@ -682,7 +692,7 @@ fil_decompress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - ulint olen = 0; + ulint olen = UNIV_PAGE_SIZE; cstatus = snappy_uncompress( (const char *)(buf+header_len), @@ -690,11 +700,11 @@ fil_decompress_page( (char *)in_buf, (size_t*)&olen); - if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { + if (cstatus != SNAPPY_OK || olen != UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %lu bytes" - " size %lu len %lu err %d.", + " but decompression read only " ULINTPF " bytes" + " size " ULINTPF " len " ULINTPF " err %d.", olen, actual_size, len, (int)cstatus); fflush(stderr); @@ -703,6 +713,7 @@ fil_decompress_page( } ut_error; } + break; } #endif /* HAVE_SNAPPY */ @@ -728,8 +739,7 @@ fil_decompress_page( memcpy(buf, in_buf, len); error_return: - // Need to free temporal buffer if no buffer was given - if (page_buf == NULL) { + if (page_buf != in_buf) { ut_free(in_buf); } } diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index 934824c6462..df8c6ffe222 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -673,16 +673,13 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initialize a tablespace header. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr mini-transaction */ UNIV_INTERN void -fsp_header_init( -/*============*/ - ulint space_id, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr) /*!< in/out: mini-transaction */ +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -725,14 +722,8 @@ fsp_header_init( flst_init(header + FSP_SEG_INODES_FREE, mtr); mlog_write_ull(header + FSP_SEG_ID, 1, mtr); - if (space_id == 0) { - fsp_fill_free_list(FALSE, space_id, header, mtr); - btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, - 0, 0, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, mtr); - } else { - fsp_fill_free_list(TRUE, space_id, header, mtr); - } + + fsp_fill_free_list(space_id != TRX_SYS_SPACE, space_id, header, mtr); fil_space_t* space = fil_space_acquire(space_id); ut_ad(space); @@ -2075,7 +2066,6 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space_header, mtr); if (inode == NULL) { - goto funct_exit; } @@ -2750,7 +2740,6 @@ fsp_reserve_free_extents( ibool success; ulint n_pages_added; size_t total_reserved = 0; - ulint rounds = 0; ut_ad(mtr); *n_reserved = n_ext; @@ -2829,17 +2818,7 @@ try_to_extend: success = fsp_try_extend_data_file(&n_pages_added, space, space_header, mtr); if (success && n_pages_added > 0) { - - rounds++; total_reserved += n_pages_added; - - if (rounds > 50) { - ib_logf(IB_LOG_LEVEL_INFO, - "Space id %lu trying to reserve %lu extents actually reserved %lu " - " reserve %lu free %lu size %lu rounds %lu total_reserved %llu", - space, n_ext, n_pages_added, reserve, n_free, size, rounds, (ullint) total_reserved); - } - goto try_again; } @@ -4160,20 +4139,8 @@ ulint fsp_header_get_crypt_offset( const ulint zip_size) { - ulint pageno = 0; - /* compute first page_no that will have xdes stored on page != 0*/ - for (ulint i = 0; - (pageno = xdes_calc_descriptor_page(zip_size, i)) == 0; ) - i++; - - /* use pageno prior to this...i.e last page on page 0 */ - ut_ad(pageno > 0); - pageno--; - - ulint iv_offset = XDES_ARR_OFFSET + - XDES_SIZE * (1 + xdes_calc_descriptor_index(zip_size, pageno)); - - return FSP_HEADER_OFFSET + iv_offset; + return (FSP_HEADER_OFFSET + (XDES_ARR_OFFSET + XDES_SIZE * + (zip_size ? zip_size : UNIV_PAGE_SIZE) / FSP_EXTENT_SIZE)); } /**********************************************************************//** diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc index 2e335c1c255..f24973e26fb 100644 --- a/storage/xtradb/fts/fts0que.cc +++ b/storage/xtradb/fts/fts0que.cc @@ -953,6 +953,18 @@ fts_query_free_doc_ids( query->total_size -= SIZEOF_RBT_CREATE; } +/** +Free the query intersection +@param[in] query query instance */ +static +void +fts_query_free_intersection( + fts_query_t* query) +{ + fts_query_free_doc_ids(query, query->intersection); + query->intersection = NULL; +} + /*******************************************************************//** Add the word to the documents "list" of matching words from the query. We make a copy of the word from the query heap. */ @@ -1311,6 +1323,7 @@ fts_query_intersect( /* error is passed by 'query->error' */ if (query->error != DB_SUCCESS) { ut_ad(query->error == DB_FTS_EXCEED_RESULT_CACHE_LIMIT); + fts_query_free_intersection(query); return(query->error); } @@ -1339,6 +1352,8 @@ fts_query_intersect( ut_a(!query->multi_exist || (query->multi_exist && rbt_size(query->doc_ids) <= n_doc_ids)); + } else if (query->intersection != NULL) { + fts_query_free_intersection(query); } } @@ -1557,6 +1572,11 @@ fts_merge_doc_ids( query, ranking->doc_id, ranking->rank); if (query->error != DB_SUCCESS) { + if (query->intersection != NULL) + { + ut_a(query->oper == FTS_EXIST); + fts_query_free_intersection(query); + } DBUG_RETURN(query->error); } diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 49b0fedb3b4..6097f87b43d 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2016, Oracle and/or its affiliates. +Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. @@ -876,17 +876,31 @@ innobase_purge_changed_page_bitmaps( /*================================*/ ulonglong lsn) __attribute__((unused)); /*!< in: LSN to purge files up to */ +/** Empty free list algorithm. +Checks if buffer pool is big enough to enable backoff algorithm. +InnoDB empty free list algorithm backoff requires free pages +from LRU for the best performance. +buf_LRU_buf_pool_running_out cancels query if 1/4 of +buffer pool belongs to LRU or freelist. +At the same time buf_flush_LRU_list_batch +keeps up to BUF_LRU_MIN_LEN in LRU. +In order to avoid deadlock baclkoff requires buffer pool +to be at least 4*BUF_LRU_MIN_LEN, +but flush peformance is bad because of trashing +and additional BUF_LRU_MIN_LEN pages are requested. +@param[in] algorithm desired algorithm from srv_empty_free_list_t +@return true if it's possible to enable backoff. */ +static inline +bool +innodb_empty_free_list_algorithm_allowed( + srv_empty_free_list_t algorithm) +{ + long long buf_pool_pages = srv_buf_pool_size / srv_page_size + / srv_buf_pool_instances; -/*****************************************************************//** -Check whether this is a fake change transaction. -@return TRUE if a fake change transaction */ -static -my_bool -innobase_is_fake_change( -/*====================*/ - handlerton *hton, /*!< in: InnoDB handlerton */ - THD* thd) __attribute__((unused)); /*!< in: MySQL thread handle of the user for - whom the transaction is being committed */ + return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1) + || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF); +} /** Get the list of foreign keys referencing a specified table table. @@ -1601,14 +1615,11 @@ innobase_drop_database( the path is used as the database name: for example, in 'mysql/data/test' the database name is 'test' */ -/*******************************************************************//** -Closes an InnoDB database. */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /* in: Innodb handlerton */ - ha_panic_function type); +innobase_end(handlerton*, ha_panic_function); #if NOT_USED /*****************************************************************//** @@ -1703,28 +1714,6 @@ normalize_table_name_low( ibool set_lower_case); /* in: TRUE if we want to set name to lower case */ -/*************************************************************//** -Checks if buffer pool is big enough to enable backoff algorithm. -InnoDB empty free list algorithm backoff requires free pages -from LRU for the best performance. -buf_LRU_buf_pool_running_out cancels query if 1/4 of -buffer pool belongs to LRU or freelist. -At the same time buf_flush_LRU_list_batch -keeps up to BUF_LRU_MIN_LEN in LRU. -In order to avoid deadlock baclkoff requires buffer pool -to be at least 4*BUF_LRU_MIN_LEN, -but flush peformance is bad because of trashing -and additional BUF_LRU_MIN_LEN pages are requested. -@return true if it's possible to enable backoff. */ -static -bool -innodb_empty_free_list_algorithm_backoff_allowed( - srv_empty_free_list_t - algorithm, /*!< in: desired algorithm - from srv_empty_free_list_t */ - long long buf_pool_pages); /*!< in: total number - of pages inside buffer pool */ - #ifdef NOT_USED /*************************************************************//** Removes old archived transaction log files. @@ -2134,39 +2123,6 @@ ha_innobase::is_fake_change_enabled(THD* thd) } /********************************************************************//** -In XtraDB it is impossible for a transaction to own a search latch outside of -InnoDB code, so there is nothing to release on demand. We keep this function to -simplify maintenance. -@return 0 */ -static -int -innobase_release_temporary_latches( -/*===============================*/ - handlerton* hton MY_ATTRIBUTE((unused)), /*!< in: handlerton */ - THD* thd MY_ATTRIBUTE((unused))) /*!< in: MySQL thread */ -{ -#ifdef UNIV_DEBUG - DBUG_ASSERT(hton == innodb_hton_ptr); - - if (!innodb_inited || thd == NULL) { - - return(0); - } - - trx_t* trx = thd_to_trx(thd); - - if (trx != NULL) { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!btr_search_own_any()); -#endif - trx_search_latch_release_if_reserved(trx); - } -#endif - - return(0); -} - -/********************************************************************//** Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth time calls srv_active_wake_master_thread. This function should be used when a single database operation may introduce a small need for @@ -2596,16 +2552,11 @@ innobase_get_stmt( THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ { - const char* query = NULL; - LEX_STRING *stmt = NULL; - if (thd) { - stmt = thd_query_string(thd); - if (stmt) { - *length = stmt->length; - query = stmt->str; - } + if (const LEX_STRING *stmt = thd_query_string(thd)) { + *length = stmt->length; + return stmt->str; } - return (query); + return NULL; } /**********************************************************************//** @@ -3505,13 +3456,13 @@ innobase_convert_identifier( ibool file_id)/*!< in: TRUE=id is a table or database name; FALSE=id is an UTF-8 string */ { + char nz2[MAX_TABLE_NAME_LEN + 1]; const char* s = id; int q; if (file_id) { char nz[MAX_TABLE_NAME_LEN + 1]; - char nz2[MAX_TABLE_NAME_LEN + 1]; /* Decode the table name. The MySQL function expects a NUL-terminated string. The input and output strings @@ -3829,9 +3780,6 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; - innobase_hton->release_temporary_latches = - innobase_release_temporary_latches; - innobase_hton->kill_query = innobase_kill_connection; if (srv_file_per_table) @@ -4374,10 +4322,9 @@ innobase_change_buffering_inited_ok: srv_use_posix_fallocate = (ibool) innobase_use_fallocate; #endif /* Do not enable backoff algorithm for small buffer pool. */ - if (!innodb_empty_free_list_algorithm_backoff_allowed( + if (!innodb_empty_free_list_algorithm_allowed( static_cast<srv_empty_free_list_t>( - srv_empty_free_list_algorithm), - innobase_buffer_pool_size / srv_page_size)) { + srv_empty_free_list_algorithm))) { sql_print_information( "InnoDB: innodb_empty_free_list_algorithm " "has been changed to legacy " @@ -4512,21 +4459,13 @@ error: DBUG_RETURN(TRUE); } -/*******************************************************************//** -Closes an InnoDB database. -@return TRUE if error */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /*!< in/out: InnoDB handlerton */ - ha_panic_function type MY_ATTRIBUTE((unused))) - /*!< in: ha_panic() parameter */ +innobase_end(handlerton*, ha_panic_function) { - int err= 0; - DBUG_ENTER("innobase_end"); - DBUG_ASSERT(hton == innodb_hton_ptr); if (innodb_inited) { @@ -4543,9 +4482,7 @@ innobase_end( innodb_inited = 0; hash_table_free(innobase_open_tables); innobase_open_tables = NULL; - if (innobase_shutdown_for_mysql() != DB_SUCCESS) { - err = 1; - } + innodb_shutdown(); srv_free_paths_and_sizes(); my_free(internal_innobase_data_file_path); mysql_mutex_destroy(&innobase_share_mutex); @@ -4554,7 +4491,7 @@ innobase_end( mysql_mutex_destroy(&pending_checkpoint_mutex); } - DBUG_RETURN(err); + DBUG_RETURN(0); } /****************************************************************//** @@ -4609,22 +4546,6 @@ innobase_purge_changed_page_bitmaps( } /*****************************************************************//** -Check whether this is a fake change transaction. -@return TRUE if a fake change transaction */ -static -my_bool -innobase_is_fake_change( -/*====================*/ - handlerton *hton MY_ATTRIBUTE((unused)), - /*!< in: InnoDB handlerton */ - THD* thd) /*!< in: MySQL thread handle of the user for - whom the transaction is being committed */ -{ - trx_t* trx = check_trx_exists(thd); - return UNIV_UNLIKELY(trx->fake_changes); -} - -/*****************************************************************//** Commits a transaction in an InnoDB database. */ static void @@ -6305,9 +6226,6 @@ ha_innobase::open( thd = ha_thd(); - /* No-op in XtraDB */ - innobase_release_temporary_latches(ht, thd); - normalize_table_name(norm_name, name); user_thd = NULL; @@ -6477,8 +6395,6 @@ table_opened: innobase_copy_frm_flags_from_table_share(ib_table, table->s); - ib_table->thd = (void*)thd; - /* No point to init any statistics if tablespace is still encrypted. */ if (ib_table->is_readable()) { dict_stats_init(ib_table); @@ -6769,9 +6685,6 @@ ha_innobase::close() thd = ha_thd(); - /* No-op in XtraDB */ - innobase_release_temporary_latches(ht, thd); - row_prebuilt_free(prebuilt, FALSE); if (upd_buf != NULL) { @@ -8172,17 +8085,31 @@ build_template_field( templ->rec_field_is_prefix = FALSE; if (dict_index_is_clust(index)) { + templ->rec_field_is_prefix = false; templ->rec_field_no = templ->clust_rec_field_no; templ->rec_prefix_field_no = ULINT_UNDEFINED; } else { - /* If we're in a secondary index, keep track - * of the original index position even if this - * is just a prefix index; we will use this - * later to avoid a cluster index lookup in - * some cases.*/ + /* If we're in a secondary index, keep track of the original + index position even if this is just a prefix index; we will use + this later to avoid a cluster index lookup in some cases.*/ templ->rec_field_no = dict_index_get_nth_col_pos(index, i, &templ->rec_prefix_field_no); + templ->rec_field_is_prefix + = (templ->rec_field_no == ULINT_UNDEFINED) + && (templ->rec_prefix_field_no != ULINT_UNDEFINED); +#ifdef UNIV_DEBUG + if (templ->rec_prefix_field_no != ULINT_UNDEFINED) + { + const dict_field_t* field = dict_index_get_nth_field( + index, + templ->rec_prefix_field_no); + ut_ad(templ->rec_field_is_prefix + == (field->prefix_len != 0)); + } else { + ut_ad(!templ->rec_field_is_prefix); + } +#endif } if (field->real_maybe_null()) { @@ -8572,8 +8499,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -9150,8 +9077,8 @@ calc_row_difference( } } - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { /* The field has changed */ ufield = uvect->fields + n_changed; @@ -11875,7 +11802,8 @@ create_options_are_invalid( case ROW_TYPE_DYNAMIC: CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: if (kbs_specified) { @@ -12285,7 +12213,8 @@ index_bad: break; /* Correct row_format */ } zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -12294,6 +12223,7 @@ index_bad: thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=COMPACT."); + /* fall through */ case ROW_TYPE_DEFAULT: /* If we fell through, set row format to Compact. */ row_format = ROW_TYPE_COMPACT; @@ -13109,7 +13039,8 @@ ha_innobase::delete_table( extension, in contrast to ::create */ normalize_table_name(norm_name, name); - if (srv_read_only_mode) { + if (srv_read_only_mode + || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) { DBUG_RETURN(HA_ERR_TABLE_READONLY); } else if (row_is_magic_monitor_table(norm_name) && check_global_access(thd, PROCESS_ACL)) { @@ -15078,7 +15009,8 @@ fill_foreign_key_list(THD* thd, { ut_ad(mutex_own(&dict_sys->mutex)); - for (dict_foreign_set::iterator it = table->referenced_set.begin(); + for (dict_foreign_set::const_iterator it + = table->referenced_set.begin(); it != table->referenced_set.end(); ++it) { dict_foreign_t* foreign = *it; @@ -18633,15 +18565,17 @@ innodb_buffer_pool_evict_uncompressed(void) ut_ad(block->page.in_LRU_list); mutex_enter(&block->mutex); - if (!buf_LRU_free_page(&block->page, false)) { - mutex_exit(&block->mutex); - all_evicted = false; - } else { - mutex_exit(&block->mutex); + all_evicted = buf_LRU_free_page(&block->page, false); + mutex_exit(&block->mutex); + + if (all_evicted) { + mutex_enter(&buf_pool->LRU_list_mutex); - } + block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); + } else { - block = prev_block; + block = prev_block; + } } mutex_exit(&buf_pool->LRU_list_mutex); @@ -18832,6 +18766,10 @@ innodb_sched_priority_cleaner_update( const void* save) /*!< in: immediate result from check function */ { + if (srv_read_only_mode) { + return; + } + ulint priority = *static_cast<const ulint *>(save); ulint actual_priority; ulint nice = 0; @@ -18858,10 +18796,6 @@ innodb_sched_priority_cleaner_update( } /* Set the priority for the page cleaner thread */ - if (srv_read_only_mode) { - - return; - } ut_ad(buf_page_cleaner_is_active); nice = os_thread_get_priority(srv_cleaner_tid); @@ -19252,8 +19186,15 @@ checkpoint_now_set( log_make_checkpoint_at(LSN_MAX, TRUE); fil_flush_file_spaces(FIL_LOG); } - fil_write_flushed_lsn_to_data_files(log_sys->lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); + + dberr_t err = fil_write_flushed_lsn(log_sys->lsn); + + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at checkpoint err=%s", + ut_strerr(err)); + } } } @@ -19855,32 +19796,6 @@ wsrep_fake_trx_id( /*************************************************************//** -Empty free list algorithm. -Checks if buffer pool is big enough to enable backoff algorithm. -InnoDB empty free list algorithm backoff requires free pages -from LRU for the best performance. -buf_LRU_buf_pool_running_out cancels query if 1/4 of -buffer pool belongs to LRU or freelist. -At the same time buf_flush_LRU_list_batch -keeps up to BUF_LRU_MIN_LEN in LRU. -In order to avoid deadlock baclkoff requires buffer pool -to be at least 4*BUF_LRU_MIN_LEN, -but flush peformance is bad because of trashing -and additional BUF_LRU_MIN_LEN pages are requested. -@return true if it's possible to enable backoff. */ -static -bool -innodb_empty_free_list_algorithm_backoff_allowed( - srv_empty_free_list_t algorithm, /*!< in: desired algorithm - from srv_empty_free_list_t */ - long long buf_pool_pages) /*!< in: total number - of pages inside buffer pool */ -{ - return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1) - || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF); -} - -/*************************************************************//** Empty free list algorithm. This function is registered as a callback with MySQL. @return 0 for valid algorithm */ @@ -19921,13 +19836,11 @@ innodb_srv_empty_free_list_algorithm_validate( return(1); algorithm = static_cast<srv_empty_free_list_t>(algo); - if (!innodb_empty_free_list_algorithm_backoff_allowed( - algorithm, - innobase_buffer_pool_size / srv_page_size)) { + if (!innodb_empty_free_list_algorithm_allowed(algorithm)) { sql_print_warning( "InnoDB: innodb_empty_free_list_algorithm " "= 'backoff' requires at least" - " 20MB buffer pool.\n"); + " 20MB buffer pool instances.\n"); return(1); } diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index f6f2f1b0eee..3bb67532954 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -540,7 +540,7 @@ innobase_index_name_is_reserved( const KEY* key_info, /*!< in: Indexes to be created */ ulint num_of_keys) /*!< in: Number of indexes to be created. */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /*****************************************************************//** #ifdef WITH_WSREP diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index ef8c62849ae..0e7cc9a655b 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -2307,10 +2307,10 @@ online_retry_drop_indexes_with_trx( @param drop_fk constraints being dropped @param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ -inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result)) +inline bool innobase_dropping_foreign( -/*======================*/ const dict_foreign_t* foreign, dict_foreign_t** drop_fk, ulint n_drop_fk) @@ -2334,10 +2334,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result)) +static bool innobase_check_foreigns_low( -/*========================*/ const dict_table_t* user_table, dict_foreign_t** drop_fk, ulint n_drop_fk, @@ -2434,10 +2434,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result)) +static bool innobase_check_foreigns( -/*====================*/ Alter_inplace_info* ha_alter_info, const TABLE* altered_table, const TABLE* old_table, diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc index 086d5642dbb..9cef04c4244 100644 --- a/storage/xtradb/handler/i_s.cc +++ b/storage/xtradb/handler/i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 2007, 2016, Oracle and/or its affiliates. -Copyrigth (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2966,14 +2966,16 @@ i_s_fts_deleted_generic_fill( fields = table->field; + int ret = 0; + for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) { doc_id_t doc_id; doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j); - OK(fields[I_S_FTS_DOC_ID]->store((longlong) doc_id, true)); + BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true)); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); } trx_free_for_background(trx); @@ -2984,7 +2986,7 @@ i_s_fts_deleted_generic_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3222,13 +3224,13 @@ i_s_fts_index_cache_fill_one_index( /*===============================*/ fts_index_cache_t* index_cache, /*!< in: FTS index cache */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { TABLE* table = (TABLE*) tables->table; Field** fields; CHARSET_INFO* index_charset; const ib_rbt_node_t* rbt_node; - fts_string_t conv_str; uint dummy_errors; char* word_str; @@ -3237,10 +3239,9 @@ i_s_fts_index_cache_fill_one_index( fields = table->field; index_charset = index_cache->charset; - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; + conv_str->f_n_char = 0; + + int ret = 0; /* Go through each word in the index cache */ for (rbt_node = rbt_first(index_cache->words); @@ -3252,16 +3253,16 @@ i_s_fts_index_cache_fill_one_index( /* Convert word from index charset to system_charset_info */ if (index_charset->cset != system_charset_info->cset) { - conv_str.f_n_char = my_convert( - reinterpret_cast<char*>(conv_str.f_str), - static_cast<uint32>(conv_str.f_len), + conv_str->f_n_char = my_convert( + reinterpret_cast<char*>(conv_str->f_str), + static_cast<uint32>(conv_str->f_len), system_charset_info, reinterpret_cast<char*>(word->text.f_str), static_cast<uint32>(word->text.f_len), index_charset, &dummy_errors); - ut_ad(conv_str.f_n_char <= conv_str.f_len); - conv_str.f_str[conv_str.f_n_char] = 0; - word_str = reinterpret_cast<char*>(conv_str.f_str); + ut_ad(conv_str->f_n_char <= conv_str->f_len); + conv_str->f_str[conv_str->f_n_char] = 0; + word_str = reinterpret_cast<char*>(conv_str->f_str); } else { word_str = reinterpret_cast<char*>(word->text.f_str); } @@ -3319,9 +3320,7 @@ i_s_fts_index_cache_fill_one_index( } } - ut_free(conv_str.f_str); - - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED @@ -3365,18 +3364,27 @@ i_s_fts_index_cache_fill( ut_a(cache); + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len)); + for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) { fts_index_cache_t* index_cache; index_cache = static_cast<fts_index_cache_t*> ( ib_vector_get(cache->indexes, i)); - i_s_fts_index_cache_fill_one_index(index_cache, thd, tables); + BREAK_IF(ret = i_s_fts_index_cache_fill_one_index( + index_cache, thd, &conv_str, tables)); } + ut_free(conv_str.f_str); + dict_table_close(user_table, FALSE, FALSE); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3679,8 +3687,6 @@ i_s_fts_index_table_fill_one_fetch( } } - i_s_fts_index_table_free_one_fetch(words); - DBUG_RETURN(ret); } @@ -3694,13 +3700,13 @@ i_s_fts_index_table_fill_one_index( /*===============================*/ dict_index_t* index, /*!< in: FTS index */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { ib_vector_t* words; mem_heap_t* heap; fts_string_t word; CHARSET_INFO* index_charset; - fts_string_t conv_str; dberr_t error; int ret = 0; @@ -3717,10 +3723,6 @@ i_s_fts_index_table_fill_one_index( word.f_n_char = 0; index_charset = fts_index_get_charset(index); - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; /* Iterate through each auxiliary table as described in fts_index_selector */ @@ -3754,17 +3756,17 @@ i_s_fts_index_table_fill_one_index( /* Fill into tables */ ret = i_s_fts_index_table_fill_one_fetch( - index_charset, thd, tables, words, &conv_str, has_more); + index_charset, thd, tables, words, conv_str, + has_more); + i_s_fts_index_table_free_one_fetch(words); if (ret != 0) { - i_s_fts_index_table_free_one_fetch(words); goto func_exit; } } while (has_more); } func_exit: - ut_free(conv_str.f_str); mem_heap_free(heap); DBUG_RETURN(ret); @@ -3806,10 +3808,17 @@ i_s_fts_index_table_fill( DBUG_RETURN(0); } + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast<byte*>(ut_malloc(conv_str.f_len)); + for (index = dict_table_get_first_index(user_table); index; index = dict_table_get_next_index(index)) { if (index->type & DICT_FTS) { - i_s_fts_index_table_fill_one_index(index, thd, tables); + BREAK_IF(ret = i_s_fts_index_table_fill_one_index( + index, thd, &conv_str, tables)); } } @@ -3817,7 +3826,9 @@ i_s_fts_index_table_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + ut_free(conv_str.f_str); + + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3982,6 +3993,8 @@ i_s_fts_config_fill( DBUG_ASSERT(!dict_index_is_online_ddl(index)); } + int ret = 0; + while (fts_config_key[i]) { fts_string_t value; char* key_name; @@ -4006,13 +4019,14 @@ i_s_fts_config_fill( ut_free(key_name); } - OK(field_store_string( - fields[FTS_CONFIG_KEY], fts_config_key[i])); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_KEY], fts_config_key[i])); - OK(field_store_string( - fields[FTS_CONFIG_VALUE], (const char*) value.f_str)); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_VALUE], + reinterpret_cast<const char*>(value.f_str))); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); i++; } @@ -4025,7 +4039,7 @@ i_s_fts_config_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4864,34 +4878,29 @@ i_s_innodb_buffer_page_fill( state_str = NULL; OK(fields[IDX_BUFFER_POOL_ID]->store( - static_cast<double>(page_info->pool_id))); + page_info->pool_id, true)); OK(fields[IDX_BUFFER_BLOCK_ID]->store( - static_cast<double>(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUFFER_PAGE_SPACE]->store( - static_cast<double>(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUFFER_PAGE_NUM]->store( - static_cast<double>(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( fields[IDX_BUFFER_PAGE_TYPE], i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store( - page_info->flush_type)); + page_info->flush_type, true)); OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store( - page_info->fix_count)); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store( (longlong) page_info->newest_mod, true)); @@ -4900,7 +4909,7 @@ i_s_innodb_buffer_page_fill( (longlong) page_info->oldest_mod, true)); OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null(); @@ -4909,44 +4918,48 @@ i_s_innodb_buffer_page_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store( - table_name, - static_cast<uint>(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUFFER_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUFFER_PAGE_TABLE_NAME] + ->store(table_name, + static_cast<uint>( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUFFER_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUFFER_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUFFER_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize - : 0)); + page_info->zip_ssize + ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize + : 0, true)); #if BUF_PAGE_STATE_BITS > 3 # error "BUF_PAGE_STATE_BITS > 3, please ensure that all 1<<BUF_PAGE_STATE_BITS values are checked for" @@ -4984,32 +4997,29 @@ i_s_innodb_buffer_page_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; break; case BUF_IO_PIN: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_PIN")); + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD], (page_info->is_old) ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store( page_info->freed_page_clock)); - if (schema_table_store_record(thd, table)) { - DBUG_RETURN(1); - } + OK(schema_table_store_record(thd, table)); } DBUG_RETURN(0); @@ -5550,17 +5560,10 @@ i_s_innodb_buf_page_lru_fill( ulint num_page) /*!< in: number of page info cached */ { - TABLE* table; - Field** fields; - mem_heap_t* heap; - DBUG_ENTER("i_s_innodb_buf_page_lru_fill"); - table = tables->table; - - fields = table->field; - - heap = mem_heap_create(1000); + TABLE* table = tables->table; + Field** fields = table->field; /* Iterate through the cached array and fill the I_S table rows */ for (ulint i = 0; i < num_page; i++) { @@ -5575,43 +5578,37 @@ i_s_innodb_buf_page_lru_fill( page_info = info_array + i; OK(fields[IDX_BUF_LRU_POOL_ID]->store( - static_cast<double>(page_info->pool_id))); - + page_info->pool_id, true)); OK(fields[IDX_BUF_LRU_POS]->store( - static_cast<double>(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUF_LRU_PAGE_SPACE]->store( - static_cast<double>(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUF_LRU_PAGE_NUM]->store( - static_cast<double>(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_TYPE], - i_s_page_type[page_info->page_type].type_str)); + fields[IDX_BUF_LRU_PAGE_TYPE], + i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUF_LRU_PAGE_FLUSH_TYPE]->store( - static_cast<double>(page_info->flush_type))); + page_info->flush_type, true)); OK(fields[IDX_BUF_LRU_PAGE_FIX_COUNT]->store( - static_cast<double>(page_info->fix_count))); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store( - page_info->newest_mod, true)); + page_info->newest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_OLDEST_MOD]->store( - page_info->oldest_mod, true)); + page_info->oldest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_null(); @@ -5620,43 +5617,47 @@ i_s_innodb_buf_page_lru_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store( - table_name, - static_cast<uint>(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUF_LRU_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME] + ->store(table_name, + static_cast<uint>( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUF_LRU_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUF_LRU_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize ? - 512 << page_info->zip_ssize : 0)); + page_info->zip_ssize + ? 512 << page_info->zip_ssize : 0, true)); state = static_cast<enum buf_page_state>(page_info->page_state); @@ -5685,35 +5686,31 @@ i_s_innodb_buf_page_lru_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; + break; + case BUF_IO_PIN: + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD], - (page_info->is_old) ? "YES" : "NO")); + page_info->is_old ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store( - page_info->freed_page_clock)); - - if (schema_table_store_record(thd, table)) { - mem_heap_free(heap); - DBUG_RETURN(1); - } + page_info->freed_page_clock, true)); - mem_heap_empty(heap); + OK(schema_table_store_record(thd, table)); } - mem_heap_free(heap); - DBUG_RETURN(0); } diff --git a/storage/xtradb/handler/i_s.h b/storage/xtradb/handler/i_s.h index 55ef6e7ea42..4bb3ea33462 100644 --- a/storage/xtradb/handler/i_s.h +++ b/storage/xtradb/handler/i_s.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. -Copyrigth (c) 2014, 2015, MariaDB Corporation +Copyrigth (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -77,6 +77,8 @@ extern struct st_mysql_plugin i_s_innodb_changed_page_bitmaps; DBUG_RETURN(1); \ } +#define BREAK_IF(expr) if ((expr)) break + #define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \ do { \ if (!srv_was_started) { \ diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc index e66568565e1..0445bb557e1 100644 --- a/storage/xtradb/ibuf/ibuf0ibuf.cc +++ b/storage/xtradb/ibuf/ibuf0ibuf.cc @@ -2963,8 +2963,7 @@ ibuf_get_volume_buffered_hash( fold = ut_fold_binary(data, len); hash += (fold / (CHAR_BIT * sizeof *hash)) % size; - bitmask = static_cast<ulint>( - 1 << (fold % (CHAR_BIT * sizeof(*hash)))); + bitmask = static_cast<ulint>(1) << (fold % (CHAR_BIT * sizeof(*hash))); if (*hash & bitmask) { @@ -3733,7 +3732,7 @@ fail_exit: if (mode == BTR_MODIFY_PREV) { err = btr_cur_optimistic_insert( - BTR_NO_LOCKING_FLAG, + BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG, cursor, &offsets, &offsets_heap, ibuf_entry, &ins_rec, &dummy_big_rec, 0, thr, &mtr); diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h index 960bd55d3d9..e478b33bf8e 100644 --- a/storage/xtradb/include/btr0cur.h +++ b/storage/xtradb/include/btr0cur.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -220,15 +221,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -256,15 +259,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** @@ -392,12 +397,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ diff --git a/storage/xtradb/include/btr0defragment.h b/storage/xtradb/include/btr0defragment.h index 5c54b898e37..477824c1a35 100644 --- a/storage/xtradb/include/btr0defragment.h +++ b/storage/xtradb/include/btr0defragment.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. +Copyright (C) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -89,15 +89,14 @@ UNIV_INTERN void btr_defragment_save_defrag_stats_if_needed( dict_index_t* index); /*!< in: index */ -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg); /*!< in: a dummy parameter required by - os_thread_create */ +DECLARE_THREAD(btr_defragment_thread)(void*); + +/** Whether btr_defragment_thread is active */ +extern bool btr_defragment_thread_active; #endif /* !UNIV_HOTBACKUP */ #endif diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index 9b4276efaa8..1899165ace0 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -1577,20 +1577,13 @@ directory (buf) to see it. Do not use from outside! */ typedef struct { bool reserved; /*!< true if this slot is reserved */ -#ifdef HAVE_LZO - byte* lzo_mem; /*!< Temporal memory used by LZO */ -#endif byte* crypt_buf; /*!< for encryption the data needs to be copied to a separate buffer before it's encrypted&written. this as a page can be read while it's being flushed */ - byte* crypt_buf_free; /*!< for encryption, allocated buffer - that is then alligned */ byte* comp_buf; /*!< for compression we need temporal buffer because page can be read while it's being flushed */ - byte* comp_buf_free; /*!< for compression, allocated - buffer that is then alligned */ byte* out_buf; /*!< resulting buffer after encryption/compression. This is a pointer and not allocated. */ diff --git a/storage/xtradb/include/buf0dblwr.h b/storage/xtradb/include/buf0dblwr.h index 5582778825c..7b7464761cc 100644 --- a/storage/xtradb/include/buf0dblwr.h +++ b/storage/xtradb/include/buf0dblwr.h @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,13 +39,15 @@ extern buf_dblwr_t* buf_dblwr; /** Set to TRUE when the doublewrite buffer is being created */ extern ibool buf_dblwr_being_created; -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void); -/*==================*/ +bool +buf_dblwr_create() + MY_ATTRIBUTE((warn_unused_result)); /****************************************************************//** At a database startup initializes the doublewrite buffer memory structure if @@ -56,7 +58,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages); diff --git a/storage/xtradb/include/buf0flu.h b/storage/xtradb/include/buf0flu.h index af50a5498ef..6089baf81e8 100644 --- a/storage/xtradb/include/buf0flu.h +++ b/storage/xtradb/include/buf0flu.h @@ -34,7 +34,7 @@ Created 11/5/1995 Heikki Tuuri #include "buf0types.h" /** Flag indicating if the page_cleaner is in active state. */ -extern ibool buf_page_cleaner_is_active; +extern bool buf_page_cleaner_is_active; /** Flag indicating if the lru_manager is in active state. */ extern bool buf_lru_manager_is_active; diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic index 555852474aa..8f5cee0fd5f 100644 --- a/storage/xtradb/include/data0type.ic +++ b/storage/xtradb/include/data0type.ic @@ -576,7 +576,8 @@ dtype_get_fixed_size_low( #else /* !UNIV_HOTBACKUP */ return(len); #endif /* !UNIV_HOTBACKUP */ - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index 6da8eb892d9..0290b884ece 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1192,7 +1192,7 @@ dict_index_get_nth_col_pos( const dict_index_t* index, /*!< in: index */ ulint n, /*!< in: column number */ ulint* prefix_col_pos) /*!< out: col num if prefix */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /********************************************************************//** Looks for column n in an index. @return position in internal representation of the index; @@ -1207,7 +1207,7 @@ dict_index_get_nth_col_or_prefix_pos( column prefixes too */ ulint* prefix_col_pos) /*!< out: col num if prefix */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /********************************************************************//** Returns TRUE if the index contains a column or a prefix of that column. @return TRUE if contains the column or its prefix */ diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic index 4ed1afc8094..f68d4e176da 100644 --- a/storage/xtradb/include/dict0dict.ic +++ b/storage/xtradb/include/dict0dict.ic @@ -705,8 +705,6 @@ dict_sys_tables_type_validate( ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type); ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(type); - ut_a(atomic_writes <= ATOMIC_WRITES_OFF); - /* The low order bit of SYS_TABLES.TYPE is always set to 1. If the format is UNIV_FORMAT_B or higher, this field is the same as dict_table_t::flags. Zero is not allowed here. */ @@ -716,16 +714,12 @@ dict_sys_tables_type_validate( if (redundant) { if (zip_ssize || atomic_blobs) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=Redundant, zip_ssize " ULINTPF " atomic_blobs " ULINTPF "\n", - zip_ssize, atomic_blobs); return(ULINT_UNDEFINED); } } /* Make sure there are no bits that we do not know about. */ if (unused) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", unused " ULINTPF "\n", - type, unused); return(ULINT_UNDEFINED); } @@ -737,11 +731,8 @@ dict_sys_tables_type_validate( The DICT_N_COLS_COMPACT flag should be in N_COLS, but we already know that. */ - } else if (zip_ssize) { /* Antelope does not support COMPRESSED format. */ - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", zip_ssize " ULINTPF "\n", - type, zip_ssize); return(ULINT_UNDEFINED); } @@ -751,15 +742,11 @@ dict_sys_tables_type_validate( should be in N_COLS, but we already know about the low_order_bit and DICT_N_COLS_COMPACT flags. */ if (!atomic_blobs) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", zip_ssize " ULINTPF " atomic_blobs " ULINTPF "\n", - type, zip_ssize, atomic_blobs); return(ULINT_UNDEFINED); } /* Validate that the number is within allowed range. */ if (zip_ssize > PAGE_ZIP_SSIZE_MAX) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", zip_ssize " ULINTPF " max %d\n", - type, zip_ssize, PAGE_ZIP_SSIZE_MAX); return(ULINT_UNDEFINED); } } @@ -776,18 +763,13 @@ dict_sys_tables_type_validate( low_order_bit and DICT_N_COLS_COMPACT flags. */ if (!atomic_blobs || !page_compression) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", page_compression " ULINTPF " page_compression_level " ULINTPF "\n" - "InnoDB: Error: atomic_blobs " ULINTPF "\n", - type, page_compression, page_compression_level, atomic_blobs); return(ULINT_UNDEFINED); } } /* Validate that the atomic writes number is within allowed range. */ if (atomic_writes > ATOMIC_WRITES_OFF) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=" ULINTPF ", atomic_writes " ULINTPF "\n", - type, atomic_writes); - return(ULINT_UNDEFINED); + return(ULINT_UNDEFINED); } /* Return the validated SYS_TABLES.TYPE. */ diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h index a32581a0e90..2a4422fc18b 100644 --- a/storage/xtradb/include/dict0mem.h +++ b/storage/xtradb/include/dict0mem.h @@ -1048,10 +1048,6 @@ struct dict_table_t{ table_id_t id; /*!< id of the table */ mem_heap_t* heap; /*!< memory heap */ char* name; /*!< table name */ - void* thd; /*!< thd */ - bool page_0_read; /*!< true if page 0 has - been already read */ - fil_space_crypt_t *crypt_data; /*!< crypt data if present */ const char* dir_path_of_temp_table;/*!< NULL or the directory path where a TEMPORARY table that was explicitly created by a user should be placed if diff --git a/storage/xtradb/include/dict0stats_bg.h b/storage/xtradb/include/dict0stats_bg.h index d5f0870718d..8f3385eb22b 100644 --- a/storage/xtradb/include/dict0stats_bg.h +++ b/storage/xtradb/include/dict0stats_bg.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -144,6 +144,10 @@ DECLARE_THREAD(dict_stats_thread)( void* arg); /*!< in: a dummy parameter required by os_thread_create */ +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown(); + # ifndef UNIV_NONINL # include "dict0stats_bg.ic" # endif diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 2f03d2aa0f5..6eab5db6883 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -210,9 +210,8 @@ struct fsp_open_info { ibool success; /*!< Has the tablespace been opened? */ const char* check_msg; /*!< fil_check_first_page() message */ ibool valid; /*!< Is the tablespace valid? */ - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ char* filepath; /*!< File path to open */ - lsn_t lsn; /*!< Flushed LSN from header page */ ulint id; /*!< Space ID */ ulint flags; /*!< Tablespace flags */ ulint encryption_error; /*!< if an encryption error occurs */ @@ -228,7 +227,7 @@ struct fil_node_t { belongs */ char* name; /*!< path to the file */ ibool open; /*!< TRUE if file open */ - os_file_t handle; /*!< OS handle to the file, if file open */ + pfs_os_file_t handle; /*!< OS handle to the file, if file open */ os_event_t sync_event;/*!< Condition event to group and serialize calls to fsync; os_event_set() and os_event_reset() @@ -351,9 +350,6 @@ struct fil_space_t { compression failure */ fil_space_crypt_t* crypt_data; /*!< tablespace crypt data or NULL */ - bool page_0_crypt_read; - /*!< tablespace crypt data has been - read */ ulint file_block_size; /*!< file system block size */ @@ -643,17 +639,17 @@ void fil_set_max_space_id_if_bigger( /*===========================*/ ulint max_id);/*!< in: maximum known id */ + #ifndef UNIV_HOTBACKUP -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN + +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no); /*!< in: latest archived log file number */ +fil_write_flushed_lsn( + lsn_t lsn) + MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace when it could be dropped concurrently. Used by background threads that do not necessarily hold proper locks @@ -799,28 +795,28 @@ private: fil_space_t* m_space; }; -/*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*!< out: crypt data */ - - __attribute__((warn_unused_result)); + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) + MY_ATTRIBUTE((warn_unused_result)); + #endif /* !UNIV_HOTBACKUP */ /*******************************************************************//** Parses the body of a log record written about an .ibd file operation. That is, @@ -1006,7 +1002,7 @@ fil_create_new_single_table_tablespace( must be >= FIL_IBD_FILE_INITIAL_SIZE */ fil_encryption_t mode, /*!< in: encryption mode */ ulint key_id) /*!< in: encryption key_id */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(2), warn_unused_result)); #ifndef UNIV_HOTBACKUP /** Try to adjust FSP_SPACE_FLAGS if they differ from the expectations. (Typically when upgrading from MariaDB 10.1.0..10.1.20.) @@ -1047,8 +1043,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* filepath, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* filepath) /*!< in: tablespace filepath */ __attribute__((nonnull(5), warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ @@ -1340,12 +1335,12 @@ struct PageCallback { Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; + block->frame + UNIV_PAGE_SIZE; @param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ virtual dberr_t operator()( - os_offset_t offset, + os_offset_t offset, buf_block_t* block) UNIV_NOTHROW = 0; /** @@ -1353,7 +1348,7 @@ struct PageCallback { to open it for the file that is being iterated over. @param filename - then physical name of the tablespace file. @param file - OS file handle */ - void set_file(const char* filename, os_file_t file) UNIV_NOTHROW + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW { m_file = file; m_filepath = filename; @@ -1389,7 +1384,7 @@ struct PageCallback { ulint m_page_size; /** File handle to the tablespace */ - os_file_t m_file; + pfs_os_file_t m_file; /** Physical file path. */ const char* m_filepath; diff --git a/storage/xtradb/include/fil0pagecompress.h b/storage/xtradb/include/fil0pagecompress.h index 73667c5420e..03e16699ce3 100644 --- a/storage/xtradb/include/fil0pagecompress.h +++ b/storage/xtradb/include/fil0pagecompress.h @@ -65,9 +65,8 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len); /*!< out: actual length of compressed page */ - byte* lzo_mem); /*!< in: temporal memory used by LZO */ /****************************************************************//** For page compressed pages decompress the page after actual read diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h index 6ed78eba6f9..715572199ab 100644 --- a/storage/xtradb/include/fsp0fsp.h +++ b/storage/xtradb/include/fsp0fsp.h @@ -519,16 +519,14 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initialize a tablespace header. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr mini-transaction */ UNIV_INTERN void -fsp_header_init( -/*============*/ - ulint space, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr); /*!< in/out: mini-transaction */ +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr); + /**********************************************************************//** Increases the space size field of a space. */ UNIV_INTERN diff --git a/storage/xtradb/include/ha0ha.h b/storage/xtradb/include/ha0ha.h index 7351b407e8c..58eb581e76a 100644 --- a/storage/xtradb/include/ha0ha.h +++ b/storage/xtradb/include/ha0ha.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -107,7 +107,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) #else /* UNIV_SYNC_DEBUG */ /** Creates a hash table. @return own: created table @@ -116,7 +116,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) #endif /* UNIV_SYNC_DEBUG */ /*************************************************************//** diff --git a/storage/xtradb/include/ha_prototypes.h b/storage/xtradb/include/ha_prototypes.h index a161ec8c06c..b053be9e61d 100644 --- a/storage/xtradb/include/ha_prototypes.h +++ b/storage/xtradb/include/ha_prototypes.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -144,7 +145,7 @@ enum durability_properties thd_requested_durability( /*=====================*/ const THD* thd) /*!< in: thread handle */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Returns true if the transaction this thread is processing has edited diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h index 722336dd6b4..5c3e7d07fd9 100644 --- a/storage/xtradb/include/log0online.h +++ b/storage/xtradb/include/log0online.h @@ -130,7 +130,7 @@ log_online_bitmap_iterator_next( /** Struct for single bitmap file information */ struct log_online_bitmap_file_struct { char name[FN_REFLEN]; /*!< Name with full path */ - os_file_t file; /*!< Handle to opened file */ + pfs_os_file_t file; /*!< Handle to opened file */ ib_uint64_t size; /*!< Size of the file */ os_offset_t offset; /*!< Offset of the next read, or count of already-read bytes diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h index e7b6a937f01..73d53d2ddab 100644 --- a/storage/xtradb/include/log0recv.h +++ b/storage/xtradb/include/log0recv.h @@ -137,26 +137,25 @@ a freshly read page) */ # define recv_recover_page(jri, block) recv_recover_page_func(block) #endif /* !UNIV_HOTBACKUP */ -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able + +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or - LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn - if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from - data files */ - lsn_t max_flushed_lsn);/*!< in: max flushed lsn from - data files */ + lsn_t flushed_lsn) + MY_ATTRIBUTE((warn_unused_result)); + #ifdef UNIV_LOG_ARCHIVE /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -165,11 +164,10 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type in: LOG_CHECKPOINT or LOG_ARCHIVE @param lim in: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(type,lim,min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(type,lim,lsn) #else /* UNIV_LOG_ARCHIVE */ /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -178,12 +176,12 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type ignored: LOG_CHECKPOINT or LOG_ARCHIVE @param lim ignored: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(lsn) #endif /* UNIV_LOG_ARCHIVE */ + /********************************************************//** Completes recovery from a checkpoint. */ UNIV_INTERN diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic index 3904d96c09f..3b1cf9c0378 100644 --- a/storage/xtradb/include/mach0data.ic +++ b/storage/xtradb/include/mach0data.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -779,13 +780,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index 06bb6a6fbac..b17e09cf0fa 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -65,26 +65,54 @@ extern ibool os_aio_print_debug; /** File offset in bytes */ typedef ib_uint64_t os_offset_t; -#ifdef __WIN__ -#define SRV_PATH_SEPARATOR '\\' +#ifdef _WIN32 +# define SRV_PATH_SEPARATOR '\\' /** File handle */ -# define os_file_t HANDLE -# define os_file_invalid INVALID_HANDLE_VALUE +typedef HANDLE os_file_t; /** Convert a C file descriptor to a native file handle @param fd file descriptor @return native file handle */ -# define OS_FILE_FROM_FD(fd) (HANDLE) _get_osfhandle(fd) +# define OS_FILE_FROM_FD(fd) reinterpret_cast<HANDLE>(_get_osfhandle(fd)) #else -#define SRV_PATH_SEPARATOR '/' +# define SRV_PATH_SEPARATOR '/' /** File handle */ typedef int os_file_t; -# define os_file_invalid (-1) /** Convert a C file descriptor to a native file handle @param fd file descriptor @return native file handle */ # define OS_FILE_FROM_FD(fd) fd #endif +/** File descriptor with optional PERFORMANCE_SCHEMA instrumentation */ +struct pfs_os_file_t +{ + /** Default constructor */ + pfs_os_file_t() : m_file( +#ifdef _WIN32 + INVALID_HANDLE_VALUE +#else + -1 +#endif + ) +#ifdef UNIV_PFS_IO + , m_psi(NULL) +#endif + {} + + /** The wrapped file handle */ + os_file_t m_file; +#ifdef UNIV_PFS_IO + /** PERFORMANCE_SCHEMA descriptor */ + struct PSI_file *m_psi; +#endif + /** Implicit type conversion. + @return the wrapped file handle */ + operator os_file_t() const { return m_file; } + /** Assignment operator. + @param[in] file file handle to be assigned */ + void operator=(os_file_t file) { m_file = file; } +}; + /** Umask for creating files */ extern ulint os_innodb_umask; @@ -120,6 +148,21 @@ enum os_file_create_t { ON_ERROR_NO_EXIT is set */ }; +/** Options for os_file_advise_func @{ */ +enum os_file_advise_t { + OS_FILE_ADVISE_NORMAL = 1, /*!< no advice on access pattern + (default) */ + OS_FILE_ADVISE_RANDOM = 2, /*!< access in random order */ + OS_FILE_ADVISE_SEQUENTIAL = 4, /*!< access the specified data + sequentially (with lower offsets read + before higher ones) */ + OS_FILE_ADVISE_WILLNEED = 8, /*!< specified data will be accessed + in the near future */ + OS_FILE_ADVISE_DONTNEED = 16, /*!< specified data will not be + accessed in the near future */ + OS_FILE_ADVISE_NOREUSE = 32 /*!< access only once */ +}; + #define OS_FILE_READ_ONLY 333 #define OS_FILE_READ_WRITE 444 #define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */ @@ -221,6 +264,8 @@ extern mysql_pfs_key_t innodb_file_bmp_key; various file I/O operations with performance schema. 1) register_pfs_file_open_begin() and register_pfs_file_open_end() are used to register file creation, opening, closing and renaming. +2) register_pfs_file_rename_begin() and register_pfs_file_rename_end() +are used to register file renaming 2) register_pfs_file_io_begin() and register_pfs_file_io_end() are used to register actual file read, write and flush 3) register_pfs_file_close_begin() and register_pfs_file_close_end() @@ -230,17 +275,30 @@ are used to register file deletion operations*/ do { \ locker = PSI_FILE_CALL(get_thread_file_name_locker)( \ state, key, op, name, &locker); \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_open_wait)( \ locker, src_file, src_line); \ } \ } while (0) -# define register_pfs_file_open_end(locker, file) \ +# define register_pfs_file_open_end(locker, file, result) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\ - locker, file); \ + if (locker != NULL) { \ + file.m_psi = PSI_FILE_CALL( \ + end_file_open_wait)( \ + locker, result); \ + } \ +} while (0) + +# define register_pfs_file_rename_begin(state, locker, key, op, name, \ + src_file, src_line) \ + register_pfs_file_open_begin(state, locker, key, op, name, \ + src_file, src_line) \ + +# define register_pfs_file_rename_end(locker, result) \ +do { \ + if (locker != NULL) { \ + PSI_FILE_CALL(end_file_open_wait)(locker, result); \ } \ } while (0) @@ -266,9 +324,9 @@ do { \ # define register_pfs_file_io_begin(state, locker, file, count, op, \ src_file, src_line) \ do { \ - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \ - state, file, op); \ - if (UNIV_LIKELY(locker != NULL)) { \ + locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \ + state, file.m_psi, op); \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_wait)( \ locker, count, src_file, src_line); \ } \ @@ -276,7 +334,7 @@ do { \ # define register_pfs_file_io_end(locker, count) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(end_file_wait)(locker, count); \ } \ } while (0) @@ -290,11 +348,16 @@ os_file_create os_file_create_simple os_file_create_simple_no_error_handling os_file_close +os_file_close_no_error_handling os_file_rename os_aio os_file_read os_file_read_no_error_handling +os_file_read_no_error_handling_int_fd os_file_write +os_file_write_int_fd +os_file_set_eof_at +os_file_allocate The wrapper functions have the prefix of "innodb_". */ @@ -315,6 +378,9 @@ The wrapper functions have the prefix of "innodb_". */ # define os_file_close(file) \ pfs_os_file_close_func(file, __FILE__, __LINE__) +# define os_file_close_no_error_handling(file) \ + pfs_os_file_close_no_error_handling_func(file, __FILE__, __LINE__) + # define os_aio(type, is_log, mode, name, file, buf, offset, \ n, page_size, message1, message2, space_id, \ trx, write_size) \ @@ -334,9 +400,18 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_read_no_error_handling_func(file, buf, offset, n, \ __FILE__, __LINE__) -# define os_file_write(name, file, buf, offset, n) \ - pfs_os_file_write_func(name, file, buf, offset, n, \ - __FILE__, __LINE__) +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + pfs_os_file_read_no_error_handling_int_fd_func( \ + file, buf, offset, n, __FILE__, __LINE__) + +# define os_file_write(name, file, buf, offset, n) \ + pfs_os_file_write_func(name, file, buf, offset, \ + n, __FILE__, __LINE__) + +# define os_file_write_int_fd(name, file, buf, offset, n) \ + pfs_os_file_write_int_fd_func(name, file, buf, offset, \ + n, __FILE__, __LINE__) # define os_file_flush(file) \ pfs_os_file_flush_func(file, __FILE__, __LINE__) @@ -349,6 +424,15 @@ The wrapper functions have the prefix of "innodb_". */ # define os_file_delete_if_exists(key, name) \ pfs_os_file_delete_if_exists_func(key, name, __FILE__, __LINE__) + +# define os_file_set_eof_at(file, new_len) \ + pfs_os_file_set_eof_at_func(file, new_len, __FILE__, __LINE__) + +# ifdef HAVE_POSIX_FALLOCATE +# define os_file_allocate(file, offset, len) \ + pfs_os_file_allocate_func(file, offset, len, __FILE__, __LINE__) +# endif + #else /* UNIV_PFS_IO */ /* If UNIV_PFS_IO is not defined, these I/O APIs point @@ -364,7 +448,11 @@ to original un-instrumented file I/O APIs */ os_file_create_simple_no_error_handling_func( \ name, create_mode, access, success, atomic_writes) -# define os_file_close(file) os_file_close_func(file) +# define os_file_close(file) \ + os_file_close_func(file) + +# define os_file_close_no_error_handling(file) \ + os_file_close_no_error_handling_func(file) # define os_aio(type, is_log, mode, name, file, buf, offset, n, page_size, message1, \ message2, space_id, trx, write_size) \ @@ -379,11 +467,17 @@ to original un-instrumented file I/O APIs */ # define os_file_read_no_error_handling(file, buf, offset, n) \ os_file_read_no_error_handling_func(file, buf, offset, n) +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + os_file_read_no_error_handling_func(file, buf, offset, n) +# define os_file_write_int_fd(name, file, buf, offset, n) \ + os_file_write_func(name, file, buf, offset, n) # define os_file_write(name, file, buf, offset, n) \ os_file_write_func(name, file, buf, offset, n) -# define os_file_flush(file) os_file_flush_func(file) + +# define os_file_flush(file) os_file_flush_func(file) # define os_file_rename(key, oldpath, newpath) \ os_file_rename_func(oldpath, newpath) @@ -393,6 +487,9 @@ to original un-instrumented file I/O APIs */ # define os_file_delete_if_exists(key, name) \ os_file_delete_if_exists_func(name) +# define os_file_set_eof_at(file, new_len) \ + os_file_set_eof_at_func(file, new_len) + #endif /* UNIV_PFS_IO */ /* File types for directory entry data type */ @@ -530,7 +627,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -565,7 +662,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -626,6 +723,42 @@ ibool os_file_close_func( /*===============*/ os_file_t file); /*!< in, own: handle to a file */ +/***********************************************************************//** +NOTE! Use the corresponding macro os_file_close(), not directly this +function! +Closes a file handle. In case of error, error number can be retrieved with +os_file_get_last_error. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_close_no_error_handling_func( +/*===============*/ + os_file_t file); /*!< in, own: handle to a file */ + +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +Truncates a file at the specified position. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_set_eof_at_func( + os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len);/*!< in: new file length */ + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_allocate_func( + os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len); /*!< in: file region length */ +#endif #ifdef UNIV_PFS_IO /****************************************************************//** @@ -636,7 +769,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -661,7 +794,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -687,7 +820,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -718,7 +851,20 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_close_no_error_handling(), +not directly this function! +A performance schema instrumented wrapper function for +os_file_close_no_error_handling(). +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_close_no_error_handling_func( +/*===================*/ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ /*******************************************************************//** @@ -731,7 +877,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -750,7 +896,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -772,7 +918,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -807,7 +953,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -824,7 +970,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ @@ -876,16 +1022,66 @@ pfs_os_file_delete_if_exists_func( string */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ + +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_set_eof_at() +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_set_eof_at_func( + pfs_os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len,/*!< in: new file length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_allocate_func( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ +#endif + #endif /* UNIV_PFS_IO */ /***********************************************************************//** -Closes a file handle. -@return TRUE if success */ +Checks if the file is marked as invalid. +@return TRUE if invalid */ UNIV_INTERN -ibool -os_file_close_no_error_handling( -/*============================*/ - os_file_t file); /*!< in, own: handle to a file */ +bool +os_file_is_invalid( + pfs_os_file_t file); /*!< in, own: handle to a file */ + +/***********************************************************************//** +Marks the file as invalid. */ +UNIV_INTERN +void +os_file_mark_invalid( + pfs_os_file_t* file); /*!< out: pointer to a handle to a file */ + +/***********************************************************************//** +Announces an intention to access file data in a specific pattern in the +future. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_advise( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + ulint advice);/*!< in: advice for access pattern */ + /***********************************************************************//** Gets a file size. @return file size, or (os_offset_t) -1 on failure */ @@ -893,7 +1089,7 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ MY_ATTRIBUTE((warn_unused_result)); /** Set the size of a newly created file. @param[in] name file name @@ -905,7 +1101,7 @@ UNIV_INTERN bool os_file_set_size( const char* name, - os_file_t file, + pfs_os_file_t file, os_offset_t size, bool is_sparse = false) MY_ATTRIBUTE((nonnull, warn_unused_result)); @@ -918,14 +1114,6 @@ os_file_set_eof( /*============*/ FILE* file); /*!< in: file to be truncated */ /***********************************************************************//** -Truncates a file at the specified position. -@return TRUE if success */ -UNIV_INTERN -ibool -os_file_set_eof_at( - os_file_t file, /*!< in: handle to a file */ - ib_uint64_t new_len);/*!< in: new file length */ -/***********************************************************************//** NOTE! Use the corresponding macro os_file_flush(), not directly this function! Flushes the write buffers of a given file to the disk. @return TRUE if success */ @@ -1155,7 +1343,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ diff --git a/storage/xtradb/include/os0file.ic b/storage/xtradb/include/os0file.ic index b284d7ea9ac..72ac9d9dd6a 100644 --- a/storage/xtradb/include/os0file.ic +++ b/storage/xtradb/include/os0file.ic @@ -1,7 +1,7 @@ /***************************************************************************** -Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, SkySQL Ab. All Rights Reserved. +Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -35,7 +35,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -50,7 +50,7 @@ pfs_os_file_create_simple_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -64,8 +64,9 @@ pfs_os_file_create_simple_func( file = os_file_create_simple_func(name, create_mode, access_type, success, atomic_writes); - /* Regsiter the returning "file" value with the system */ - register_pfs_file_open_end(locker, file); + /* Register psi value for the file */ + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -79,7 +80,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -96,7 +97,7 @@ pfs_os_file_create_simple_no_error_handling_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -108,9 +109,10 @@ pfs_os_file_create_simple_no_error_handling_func( name, src_file, src_line); file = os_file_create_simple_no_error_handling_func( - name, create_mode, access_type, success, atomic_writes); + name, create_mode, access_type, success, atomic_writes); - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -123,7 +125,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -144,7 +146,7 @@ pfs_os_file_create_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -158,7 +160,8 @@ pfs_os_file_create_func( file = os_file_create_func(name, create_mode, purpose, type, success, atomic_writes); - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -172,7 +175,7 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -190,6 +193,34 @@ pfs_os_file_close_func( return(result); } +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_close_no_error_handling(), +not directly this function! +A performance schema instrumented wrapper function for +os_file_close_no_error_handling(). +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_close_no_error_handling_func( +/*===================*/ + pfs_os_file_t file, /*!< in, own: handle to a file */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + /* register the file close */ + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CLOSE, + src_file, src_line); + + result = os_file_close_no_error_handling_func(file); + + register_pfs_file_io_end(locker, 0); + + return(result); +} /*******************************************************************//** NOTE! Please use the corresponding macro os_aio(), not directly this @@ -206,7 +237,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -260,7 +291,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -294,7 +325,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -315,6 +346,42 @@ pfs_os_file_read_no_error_handling_func( return(result); } +/** NOTE! Please use the corresponding macro +os_file_read_no_error_handling_int_fd(), not directly this function! +This is the performance schema instrumented wrapper function for +os_file_read_no_error_handling_int_fd_func() which requests a +synchronous read operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_read_no_error_handling_int_fd_func( + int file, /*!< in: handle to a file */ + void* buf, /*!< in: buffer where to read */ + os_offset_t offset, /*!< in: file offset where to read */ + ulint n, /*!< in: number of bytes to read */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + PSI_file_locker_state state; + struct PSI_file_locker* locker; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_READ); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_read_no_error_handling_func( + OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /*******************************************************************//** NOTE! Please use the corresponding macro os_file_write(), not directly this function! @@ -327,7 +394,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -348,6 +415,43 @@ pfs_os_file_write_func( return(result); } +/** NOTE! Please use the corresponding macro os_file_write(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_write() which requests a synchronous write operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_write_int_fd_func( + const char* name, /*!< in: name of the file or path as a + null-terminated string */ + int file, /*!< in: handle to a file */ + const void* buf, /*!< in: buffer from which to write */ + os_offset_t offset, /*!< in: file offset where to write */ + ulint n, /*!< in: number of bytes to write */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + PSI_file_locker_state state; + struct PSI_file_locker* locker = NULL; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_WRITE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_write_func( + name, OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /***********************************************************************//** NOTE! Please use the corresponding macro os_file_flush(), not directly this function! @@ -358,7 +462,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -396,12 +500,12 @@ pfs_os_file_rename_func( struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, key, PSI_FILE_RENAME, newpath, + register_pfs_file_rename_begin(&state, locker, key, PSI_FILE_RENAME, newpath, src_file, src_line); result = os_file_rename_func(oldpath, newpath); - register_pfs_file_open_end(locker, 0); + register_pfs_file_rename_end(locker, 0); return(result); } @@ -465,4 +569,61 @@ pfs_os_file_delete_if_exists_func( return(result); } + +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_set_eof_at() +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_set_eof_at_func( + pfs_os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len,/*!< in: new file length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE, + src_file, src_line); + result = os_file_set_eof_at_func(file, new_len); + + register_pfs_file_io_end(locker, 0); + + return(result); +} + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_allocate_func( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE, + src_file, src_line); + result = os_file_allocate_func(file, offset, len); + + register_pfs_file_io_end(locker, 0); + + return(result); +} +#endif + #endif /* UNIV_PFS_IO */ diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index 62f651413e1..ce03f6a2124 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -959,7 +959,14 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) simple_counter { compile_time_assert(!atomic || sizeof(Type) == sizeof(ulint)); if (atomic) { - return os_atomic_increment_ulint(&m_counter, i); + /* GCC would perform a type check in this code + also in case the template is instantiated with + simple_counter<Type=not_ulint, atomic=false>. + On Solaris, os_atomic_increment_ulint() maps + to atomic_add_long_nv(), which expects the + parameter to be correctly typed. */ + return os_atomic_increment_ulint( + reinterpret_cast<ulint*>(&m_counter), i); } else { return m_counter += i; } diff --git a/storage/xtradb/include/page0zip.ic b/storage/xtradb/include/page0zip.ic index 6c7d8cd32c7..9a583086925 100644 --- a/storage/xtradb/include/page0zip.ic +++ b/storage/xtradb/include/page0zip.ic @@ -2,6 +2,7 @@ Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -172,7 +173,8 @@ page_zip_rec_needs_ext( ignored if zip_size == 0 */ ulint zip_size) /*!< in: compressed page size in bytes, or 0 */ { - ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES); + ut_ad(rec_size + > (comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES)); ut_ad(ut_is_2pow(zip_size)); ut_ad(comp || !zip_size); diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h index 2bd17980896..a8503a5cfda 100644 --- a/storage/xtradb/include/row0mysql.h +++ b/storage/xtradb/include/row0mysql.h @@ -614,7 +614,7 @@ struct mysql_row_templ_t { Innobase record in the current index; not defined if template_type is ROW_MYSQL_WHOLE_ROW */ - ibool rec_field_is_prefix; /* is this field in a prefix index? */ + bool rec_field_is_prefix; /* is this field in a prefix index? */ ulint rec_prefix_field_no; /* record field, even if just a prefix; same as rec_field_no when not a prefix, otherwise rec_field_no is diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index cf7824d91e7..4e98ce0f1cb 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -1096,6 +1096,13 @@ UNIV_INTERN void srv_purge_wakeup(); +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id); + /** Status variables to be passed to MySQL */ struct export_var_t{ ulint innodb_adaptive_hash_hash_searches; diff --git a/storage/xtradb/include/srv0start.h b/storage/xtradb/include/srv0start.h index 963b767f0fb..b055a9d834f 100644 --- a/storage/xtradb/include/srv0start.h +++ b/storage/xtradb/include/srv0start.h @@ -1,6 +1,7 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -75,22 +76,12 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void); -/*====================================*/ -/****************************************************************//** -Shuts down the Innobase database. -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void); +innobase_start_or_create_for_mysql(); -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ +/** Shut down InnoDB. */ UNIV_INTERN void -srv_shutdown_table_bg_threads(void); -/*=============================*/ +innodb_shutdown(); /*************************************************************//** Copy the file path component of the physical file to parameter. It will @@ -139,6 +130,8 @@ extern ibool srv_startup_is_before_trx_rollback_phase; /** TRUE if a raw partition is in use */ extern ibool srv_start_raw_disk_in_use; +/** Undo tablespaces starts with space_id. */ +extern ulint srv_undo_space_id_start; /** Shutdown state */ enum srv_shutdown_state { @@ -156,6 +149,9 @@ enum srv_shutdown_state { SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */ }; +/** Whether any undo log records can be generated */ +extern bool srv_undo_sources; + /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ extern enum srv_shutdown_state srv_shutdown_state; diff --git a/storage/xtradb/include/trx0rec.h b/storage/xtradb/include/trx0rec.h index 359937e3583..a6e202d04e4 100644 --- a/storage/xtradb/include/trx0rec.h +++ b/storage/xtradb/include/trx0rec.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -212,10 +213,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -233,7 +230,7 @@ trx_undo_report_row_operation( inserted undo log record, 0 if BTR_NO_UNDO_LOG flag was specified */ - MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result)); /******************************************************************//** Copies an undo record to heap. This function can be called if we know that the undo log record exists. @@ -313,10 +310,6 @@ record */ storage fields: used by purge to free the external storage */ -/* Operation type flags used in trx_undo_report_row_operation */ -#define TRX_UNDO_INSERT_OP 1 -#define TRX_UNDO_MODIFY_OP 2 - #ifndef UNIV_NONINL #include "trx0rec.ic" #endif diff --git a/storage/xtradb/include/trx0rseg.h b/storage/xtradb/include/trx0rseg.h index b9c84ef2b06..e2853df7045 100644 --- a/storage/xtradb/include/trx0rseg.h +++ b/storage/xtradb/include/trx0rseg.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -124,13 +125,13 @@ trx_rseg_mem_free( /*==============*/ trx_rseg_t* rseg); /*!< in, own: instance to free */ -/********************************************************************* -Creates a rollback segment. */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space); /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space); /******************************************************************** Get the number of unique rollback tablespaces in use except space id 0. diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index 766d61039b4..1b490eca2af 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -1,8 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2016, MariaDB Corporation. All Rights Reserved. - +Copyright (c) 2015, 2017, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -334,6 +333,24 @@ trx_print_low( /*!< in: mem_heap_get_size(trx->lock.lock_heap) */ MY_ATTRIBUTE((nonnull)); +#ifdef WITH_WSREP +/**********************************************************************//** +Prints info about a transaction. +Transaction information may be retrieved without having trx_sys->mutex acquired +so it may not be completely accurate. The caller must own lock_sys->mutex +and the trx must have some locks to make sure that it does not escape +without locking lock_sys->mutex. */ +UNIV_INTERN +void +wsrep_trx_print_locking( +/*==============*/ + FILE* f, /*!< in: output stream */ + const trx_t* trx, /*!< in: transaction */ + ulint max_query_len) /*!< in: max query length to print, + or 0 to use the default max length */ + MY_ATTRIBUTE((nonnull)); +#endif /* WITH_WSREP */ + /**********************************************************************//** Prints info about a transaction. The caller must hold lock_sys->mutex and trx_sys->mutex. diff --git a/storage/xtradb/include/trx0xa.h b/storage/xtradb/include/trx0xa.h index 7caddfb7ba4..4d5adc68dcd 100644 --- a/storage/xtradb/include/trx0xa.h +++ b/storage/xtradb/include/trx0xa.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,6 +24,8 @@ this program; if not, write to the Free Software Foundation, Inc., #ifndef XA_H #define XA_H +#include "handler.h" + /* * Transaction branch identification: XID and NULLXID: */ @@ -35,17 +37,6 @@ this program; if not, write to the Free Software Foundation, Inc., #define MAXGTRIDSIZE 64 /*!< maximum size in bytes of gtrid */ #define MAXBQUALSIZE 64 /*!< maximum size in bytes of bqual */ -/** X/Open XA distributed transaction identifier */ -struct xid_t { - long formatID; /*!< format identifier; -1 - means that the XID is null */ - long gtrid_length; /*!< value from 1 through 64 */ - long bqual_length; /*!< value from 1 through 64 */ - char data[XIDDATASIZE]; /*!< distributed transaction - identifier */ -}; -/** X/Open XA distributed transaction identifier */ -typedef struct xid_t XID; #endif /** X/Open XA distributed transaction status codes */ /* @{ */ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 310053b9145..23c8c0a659d 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 35 +#define INNODB_VERSION_BUGFIX 36 #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 80.0 +#define PERCONA_INNODB_VERSION 82.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ @@ -146,14 +146,8 @@ HAVE_PSI_INTERFACE is defined. */ #if defined HAVE_PSI_INTERFACE && !defined UNIV_HOTBACKUP # define UNIV_PFS_MUTEX # define UNIV_PFS_RWLOCK -/* For I/O instrumentation, performance schema rely -on a native descriptor to identify the file, this -descriptor could conflict with our OS level descriptor. -Disable IO instrumentation on Windows until this is -resolved */ -# ifndef __WIN__ -# define UNIV_PFS_IO -# endif + +# define UNIV_PFS_IO # define UNIV_PFS_THREAD /* There are mutexes/rwlocks that we want to exclude from diff --git a/storage/xtradb/include/ut0rnd.ic b/storage/xtradb/include/ut0rnd.ic index 024c59e553b..987dfac03c1 100644 --- a/storage/xtradb/include/ut0rnd.ic +++ b/storage/xtradb/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -237,16 +238,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 717fbf02536..71612f66fcd 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -921,12 +921,18 @@ lock_reset_lock_and_trx_wait( const char* stmt2=NULL; size_t stmt_len; trx_id_t trx_id = 0; - stmt = innobase_get_stmt(lock->trx->mysql_thd, &stmt_len); + stmt = lock->trx->mysql_thd + ? innobase_get_stmt(lock->trx->mysql_thd, &stmt_len) + : NULL; if (lock->trx->lock.wait_lock && lock->trx->lock.wait_lock->trx) { trx_id = lock->trx->lock.wait_lock->trx->id; - stmt2 = innobase_get_stmt(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len); + stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd + ? innobase_get_stmt( + lock->trx->lock.wait_lock + ->trx->mysql_thd, &stmt_len) + : NULL; } ib_logf(IB_LOG_LEVEL_INFO, @@ -5636,13 +5642,11 @@ lock_rec_unlock( trx_mutex_exit(trx); stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: unlock row could not" - " find a %lu mode lock on the record\n", - (ulong) lock_mode); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: current statement: %.*s\n", + + ib_logf(IB_LOG_LEVEL_ERROR, + "unlock row could not find a %u mode lock on the record;" + " statement=%.*s", + lock_mode, (int) stmt_len, stmt); return; diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 309de7daaf8..833f3240369 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Google Inc. Copyright (c) 2014, 2017, MariaDB Corporation. @@ -55,12 +55,13 @@ Created 12/9/1995 Heikki Tuuri #include "mem0mem.h" #include "buf0buf.h" #include "buf0flu.h" -#include "srv0srv.h" #include "lock0lock.h" #include "log0recv.h" #include "fil0fil.h" #include "dict0boot.h" -#include "dict0stats_bg.h" /* dict_stats_event */ +#include "dict0stats_bg.h" +#include "dict0stats_bg.h" +#include "btr0defragment.h" #include "srv0srv.h" #include "srv0start.h" #include "trx0sys.h" @@ -859,43 +860,16 @@ ibool log_calc_max_ages(void) /*===================*/ { - log_group_t* group; lsn_t margin; ulint free; - ibool success = TRUE; - lsn_t smallest_capacity; - lsn_t archive_margin; - lsn_t smallest_archive_margin; - - mutex_enter(&(log_sys->mutex)); - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - ut_ad(group); - smallest_capacity = LSN_MAX; - smallest_archive_margin = LSN_MAX; - - while (group) { - if (log_group_get_capacity(group) < smallest_capacity) { - - smallest_capacity = log_group_get_capacity(group); - } - - archive_margin = log_group_get_capacity(group) - - (group->file_size - LOG_FILE_HDR_SIZE) - - LOG_ARCHIVE_EXTRA_MARGIN; - - if (archive_margin < smallest_archive_margin) { - - smallest_archive_margin = archive_margin; - } - - group = UT_LIST_GET_NEXT(log_groups, group); - } + lsn_t smallest_capacity = ((srv_log_file_size_requested + << srv_page_size_shift) + - LOG_FILE_HDR_SIZE) + * srv_n_log_files; /* Add extra safety */ - smallest_capacity = smallest_capacity - smallest_capacity / 10; + smallest_capacity -= smallest_capacity / 10; /* For each OS thread we must reserve so much free space in the smallest log group that it can accommodate the log entries produced @@ -905,15 +879,16 @@ log_calc_max_ages(void) free = LOG_CHECKPOINT_FREE_PER_THREAD * (10 + srv_thread_concurrency) + LOG_CHECKPOINT_EXTRA_FREE; if (free >= smallest_capacity / 2) { - success = FALSE; - - goto failure; - } else { - margin = smallest_capacity - free; + ib_logf(IB_LOG_LEVEL_FATAL, + "The combined size of ib_logfiles" + " should be bigger than\n" + "InnoDB: 200 kB * innodb_thread_concurrency."); } - + margin = smallest_capacity - free; margin = margin - margin / 10; /* Add still some extra safety */ + mutex_enter(&log_sys->mutex); + log_sys->log_group_capacity = smallest_capacity; log_sys->max_modified_age_async = margin @@ -926,22 +901,17 @@ log_calc_max_ages(void) log_sys->max_checkpoint_age = margin; #ifdef UNIV_LOG_ARCHIVE - log_sys->max_archived_lsn_age = smallest_archive_margin; + lsn_t archive_margin = smallest_capacity + - (srv_log_file_size_requested - LOG_FILE_HDR_SIZE) + - LOG_ARCHIVE_EXTRA_MARGIN; + log_sys->max_archived_lsn_age = archive_margin; - log_sys->max_archived_lsn_age_async = smallest_archive_margin - - smallest_archive_margin / LOG_ARCHIVE_RATIO_ASYNC; + log_sys->max_archived_lsn_age_async = archive_margin + - archive_margin / LOG_ARCHIVE_RATIO_ASYNC; #endif /* UNIV_LOG_ARCHIVE */ -failure: - mutex_exit(&(log_sys->mutex)); - - if (!success) { - ib_logf(IB_LOG_LEVEL_FATAL, - "The combined size of ib_logfiles" - " should be bigger than\n" - "InnoDB: 200 kB * innodb_thread_concurrency."); - } + mutex_exit(&log_sys->mutex); - return(success); + return(true); } /******************************************************//** @@ -2804,7 +2774,7 @@ log_group_archive( /*==============*/ log_group_t* group) /*!< in: log group */ { - os_file_t file_handle; + pfs_os_file_t file_handle; lsn_t start_lsn; lsn_t end_lsn; char name[OS_FILE_MAX_PATH]; @@ -3618,6 +3588,8 @@ loop: thread_name = "lock_wait_timeout_thread"; } else if (srv_buf_dump_thread_active) { thread_name = "buf_dump_thread"; + } else if (btr_defragment_thread_active) { + thread_name = "btr_defragment_thread"; } else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) { thread_name = "rollback of recovered transactions"; } else { @@ -3639,8 +3611,8 @@ wait_suspend_loop: switch (srv_get_active_thread_type()) { case SRV_NONE: - srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; if (!srv_n_fil_crypt_threads_started) { + srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; break; } os_event_set(fil_crypt_threads_event); @@ -3820,7 +3792,8 @@ wait_suspend_loop: ut_a(freed); ut_a(lsn == log_sys->lsn); - ut_ad(lsn == log_sys->last_checkpoint_lsn); + ut_ad(srv_force_recovery >= SRV_FORCE_NO_LOG_REDO + || lsn == log_sys->last_checkpoint_lsn); if (lsn < srv_start_lsn) { ib_logf(IB_LOG_LEVEL_ERROR, @@ -3832,9 +3805,14 @@ wait_suspend_loop: srv_shutdown_lsn = lsn; if (!srv_read_only_mode) { - fil_write_flushed_lsn_to_data_files(lsn, 0); + dberr_t err = fil_write_flushed_lsn(lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at shutdown err=%s", + ut_strerr(err)); + } } fil_close_all_files(); diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index 74f2e2360a8..27382977e5c 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -329,7 +329,7 @@ log_online_read_last_tracked_lsn(void) lsn_t result; os_offset_t read_offset = log_bmp_sys->out.offset; - while (!checksum_ok && read_offset > 0 && !is_last_page) + while ((!checksum_ok || !is_last_page) && read_offset > 0) { read_offset -= MODIFIED_PAGE_BLOCK_SIZE; log_bmp_sys->out.offset = read_offset; @@ -554,9 +554,9 @@ log_online_rotate_bitmap_file( lsn_t next_file_start_lsn) /*!<in: the start LSN name part */ { - if (log_bmp_sys->out.file != os_file_invalid) { + if (!os_file_is_invalid(log_bmp_sys->out.file)) { os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } log_bmp_sys->out_seq_num++; log_online_make_bitmap_name(next_file_start_lsn); @@ -723,7 +723,11 @@ log_online_read_init(void) } last_tracked_lsn = log_online_read_last_tracked_lsn(); + /* Do not rotate if we truncated the file to zero length - we + can just start writing there */ + const bool need_rotate = (last_tracked_lsn != 0); if (!last_tracked_lsn) { + last_tracked_lsn = last_file_start_lsn; } @@ -735,7 +739,8 @@ log_online_read_init(void) } else { file_start_lsn = tracking_start_lsn; } - ut_a(log_online_rotate_bitmap_file(file_start_lsn)); + ut_a(!need_rotate + || log_online_rotate_bitmap_file(file_start_lsn)); if (last_tracked_lsn < tracking_start_lsn) { @@ -773,9 +778,9 @@ log_online_read_shutdown(void) ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list; - if (log_bmp_sys->out.file != os_file_invalid) { + if (!os_file_is_invalid(log_bmp_sys->out.file)) { os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } rbt_free(log_bmp_sys->modified_pages); @@ -1114,6 +1119,18 @@ log_online_write_bitmap_page( } }); + /* A crash injection site that ensures last checkpoint LSN > last + tracked LSN, so that LSN tracking for this interval is tested. */ + DBUG_EXECUTE_IF("crash_before_bitmap_write", + { + ulint space_id + = mach_read_from_4(block + + MODIFIED_PAGE_SPACE_ID); + if (space_id > 0) + DBUG_SUICIDE(); + }); + + ibool success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file, block, log_bmp_sys->out.offset, @@ -1137,10 +1154,8 @@ log_online_write_bitmap_page( return FALSE; } -#ifdef UNIV_LINUX - posix_fadvise(log_bmp_sys->out.file, log_bmp_sys->out.offset, - MODIFIED_PAGE_BLOCK_SIZE, POSIX_FADV_DONTNEED); -#endif + os_file_advise(log_bmp_sys->out.file, log_bmp_sys->out.offset, + MODIFIED_PAGE_BLOCK_SIZE, OS_FILE_ADVISE_DONTNEED); log_bmp_sys->out.offset += MODIFIED_PAGE_BLOCK_SIZE; return TRUE; @@ -1262,10 +1277,6 @@ log_online_follow_redo_log(void) group = UT_LIST_GET_NEXT(log_groups, group); } - /* A crash injection site that ensures last checkpoint LSN > last - tracked LSN, so that LSN tracking for this interval is tested. */ - DBUG_EXECUTE_IF("crash_before_bitmap_write", DBUG_SUICIDE();); - result = log_online_write_bitmap(); log_bmp_sys->start_lsn = log_bmp_sys->end_lsn; log_set_tracked_lsn(log_bmp_sys->start_lsn); @@ -1433,6 +1444,7 @@ log_online_setup_bitmap_file_range( if (UNIV_UNLIKELY(array_pos >= bitmap_files->count)) { log_online_diagnose_inconsistent_dir(bitmap_files); + os_file_closedir(bitmap_dir); return FALSE; } @@ -1535,10 +1547,8 @@ log_online_open_bitmap_file_read_only( bitmap_file->size = os_file_get_size(bitmap_file->file); bitmap_file->offset = 0; -#ifdef UNIV_LINUX - posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_SEQUENTIAL); - posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_NOREUSE); -#endif + os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_SEQUENTIAL); + os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_NOREUSE); return TRUE; } @@ -1624,7 +1634,7 @@ log_online_bitmap_iterator_init( /* Empty range */ i->in_files.count = 0; i->in_files.files = NULL; - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); i->page = NULL; i->failed = FALSE; return TRUE; @@ -1642,7 +1652,7 @@ log_online_bitmap_iterator_init( if (i->in_files.count == 0) { /* Empty range */ - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); i->page = NULL; i->failed = FALSE; return TRUE; @@ -1681,10 +1691,10 @@ log_online_bitmap_iterator_release( { ut_a(i); - if (i->in.file != os_file_invalid) { + if (!os_file_is_invalid(i->in.file)) { os_file_close(i->in.file); - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); } if (i->in_files.files) { @@ -1738,8 +1748,9 @@ log_online_bitmap_iterator_next( /* Advance file */ i->in_i++; - success = os_file_close_no_error_handling(i->in.file); - i->in.file = os_file_invalid; + success = os_file_close_no_error_handling( + i->in.file); + os_file_mark_invalid(&i->in.file); if (UNIV_UNLIKELY(!success)) { os_file_get_last_error(TRUE); @@ -1848,7 +1859,7 @@ log_online_purge_changed_page_bitmaps( /* If we have to delete the current output file, close it first. */ os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } for (i = 0; i < bitmap_files.count; i++) { diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index 978e6051711..fb64309cee4 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -332,6 +332,7 @@ DECLARE_THREAD(recv_writer_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -362,6 +363,7 @@ DECLARE_THREAD(recv_writer_thread)( recv_writer_thread_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ @@ -3002,11 +3004,6 @@ recv_init_crash_recovery(void) possible */ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { - - ib_logf(IB_LOG_LEVEL_INFO, - "Restoring possible half-written data pages " - "from the doublewrite buffer..."); - buf_dblwr_process(); /* Spawn the background thread to flush dirty pages @@ -3017,22 +3014,22 @@ recv_init_crash_recovery(void) } } -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from data files */ - lsn_t max_flushed_lsn)/*!< in: max flushed lsn from data files */ + lsn_t flushed_lsn) { log_group_t* group; log_group_t* max_cp_group; @@ -3260,6 +3257,7 @@ recv_recovery_from_checkpoint_start_func( group = UT_LIST_GET_NEXT(log_groups, group); } + /* Done with startup scan. Clear the flag. */ recv_log_scan_is_startup_type = FALSE; @@ -3272,38 +3270,16 @@ recv_recovery_from_checkpoint_start_func( there is something wrong we will print a message to the user about recovery: */ - if (checkpoint_lsn != max_flushed_lsn - || checkpoint_lsn != min_flushed_lsn) { - - if (checkpoint_lsn < max_flushed_lsn) { - - ib_logf(IB_LOG_LEVEL_WARN, - "The log sequence number " - "in the ibdata files is higher " - "than the log sequence number " - "in the ib_logfiles! Are you sure " - "you are using the right " - "ib_logfiles to start up the database. " - "Log sequence number in the " - "ib_logfiles is " LSN_PF ", log" - "sequence numbers stamped " - "to ibdata file headers are between " - "" LSN_PF " and " LSN_PF ".", - checkpoint_lsn, - min_flushed_lsn, - max_flushed_lsn); - } - + if (checkpoint_lsn != flushed_lsn) { if (!recv_needed_recovery) { ib_logf(IB_LOG_LEVEL_INFO, - "The log sequence numbers " - LSN_PF " and " LSN_PF - " in ibdata files do not match" + "The log sequence number " + LSN_PF + " in ibdata file do not match" " the log sequence number " LSN_PF " in the ib_logfiles!", - min_flushed_lsn, - max_flushed_lsn, + flushed_lsn, checkpoint_lsn); if (!srv_read_only_mode) { diff --git a/storage/xtradb/mysql-test/storage_engine/suite.pm b/storage/xtradb/mysql-test/storage_engine/suite.pm new file mode 100644 index 00000000000..e186a532dcc --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/suite.pm @@ -0,0 +1,8 @@ +package My::Suite::SE::XtraDB; + +@ISA = qw(My::Suite); + +return "Need XtraDB engine"; + +bless { }; + diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff index e09e50b17ec..e09e50b17ec 100644 --- a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 4f219b18428..7fbee0312ee 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -114,10 +114,12 @@ my_umask */ #ifndef __WIN__ /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +# define os_file_invalid (-1) #else /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = 0; -#define ECANCELED 125 +# define ECANCELED 125 +# define os_file_invalid INVALID_HANDLE_VALUE #endif /* __WIN__ */ #ifndef UNIV_HOTBACKUP @@ -221,7 +223,7 @@ struct os_aio_slot_t{ ulint page_size; /*!< UNIV_PAGE_SIZE or zip_size */ os_offset_t offset; /*!< file offset in bytes */ - os_file_t file; /*!< file where to read or write */ + pfs_os_file_t file; /*!< file where to read or write */ const char* name; /*!< file name or path */ ibool io_already_done;/*!< used only in simulated aio: TRUE if the physical i/o already @@ -1568,7 +1570,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -1584,7 +1586,7 @@ os_file_create_simple_no_error_handling_func( ulint atomic_writes) /*! in: atomic writes table option value */ { - os_file_t file; + pfs_os_file_t file; atomic_writes_t awrites = (atomic_writes_t) atomic_writes; *success = FALSE; @@ -1593,7 +1595,6 @@ os_file_create_simple_no_error_handling_func( DWORD create_flag; DWORD attributes = 0; DWORD share_mode = FILE_SHARE_READ; - ut_a(name); ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT)); @@ -1610,8 +1611,8 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file = INVALID_HANDLE_VALUE; + return(file); } if (access_type == OS_FILE_READ_ONLY) { @@ -1635,8 +1636,8 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file access type (%lu) for file '%s'", access_type, name); - - return((os_file_t) -1); + file = INVALID_HANDLE_VALUE; + return(file); } if (IS_XTRABACKUP()) { @@ -1668,11 +1669,10 @@ os_file_create_simple_no_error_handling_func( } } - *success = (file != INVALID_HANDLE_VALUE); + *success = file != INVALID_HANDLE_VALUE; #else /* __WIN__ */ int create_flag; const char* mode_str = NULL; - ut_a(name); if (create_mode != OS_FILE_OPEN && create_mode != OS_FILE_OPEN_RAW) WAIT_ALLOW_WRITES(); @@ -1717,13 +1717,13 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file = -1; + return(file); } - file = ::open(name, create_flag, os_innodb_umask); + file = open(name, create_flag, os_innodb_umask); - *success = file == -1 ? FALSE : TRUE; + *success = file != -1; /* This function is always called for data files, we should disable OS caching (O_DIRECT) here as we do in os_file_create_func(), so @@ -1872,7 +1872,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -1890,7 +1890,7 @@ os_file_create_func( ulint atomic_writes) /*! in: atomic writes table option value */ { - os_file_t file; + pfs_os_file_t file; ibool retry; ibool on_error_no_exit; ibool on_error_silent; @@ -1901,14 +1901,16 @@ os_file_create_func( "ib_create_table_fail_disk_full", *success = FALSE; SetLastError(ERROR_DISK_FULL); - return((os_file_t) -1); + file = INVALID_HANDLE_VALUE; + return(file); ); #else /* __WIN__ */ DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; errno = ENOSPC; - return((os_file_t) -1); + file = -1; + return(file); ); #endif /* __WIN__ */ @@ -1962,7 +1964,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file = INVALID_HANDLE_VALUE; + return(file); } DWORD attributes = 0; @@ -1986,8 +1989,8 @@ os_file_create_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown purpose flag (%lu) while opening file '%s'", purpose, name); - - return((os_file_t)(-1)); + file = INVALID_HANDLE_VALUE; + return(file); } #ifdef UNIV_NON_BUFFERED_IO @@ -2113,7 +2116,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file = -1; + return(file); } ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE); @@ -2133,7 +2137,7 @@ os_file_create_func( #endif /* O_SYNC */ do { - file = ::open(name, create_flag, os_innodb_umask); + file = open(name, create_flag, os_innodb_umask); if (file == -1) { const char* operation; @@ -2442,8 +2446,8 @@ os_file_close_func( Closes a file handle. @return TRUE if success */ UNIV_INTERN -ibool -os_file_close_no_error_handling( +bool +os_file_close_no_error_handling_func( /*============================*/ os_file_t file) /*!< in, own: handle to a file */ { @@ -2453,10 +2457,10 @@ os_file_close_no_error_handling( ret = CloseHandle(file); if (ret) { - return(TRUE); + return(true); } - return(FALSE); + return(false); #else int ret; @@ -2464,10 +2468,83 @@ os_file_close_no_error_handling( if (ret == -1) { - return(FALSE); + return(false); } - return(TRUE); + return(true); +#endif /* __WIN__ */ +} + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_allocate_func( + os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len) /*!< in: file region length */ +{ + return(posix_fallocate(file, offset, len) == 0); +} +#endif + +/***********************************************************************//** +Checks if the file is marked as invalid. +@return TRUE if invalid */ +UNIV_INTERN +bool +os_file_is_invalid( + pfs_os_file_t file) /*!< in, own: handle to a file */ +{ + return(file == os_file_invalid); +} + +/***********************************************************************//** +Marks the file as invalid. */ +UNIV_INTERN +void +os_file_mark_invalid( + pfs_os_file_t* file) /*!< out: pointer to a handle to a file */ +{ + file->m_file = os_file_invalid; +} + +/***********************************************************************//** +Announces an intention to access file data in a specific pattern in the +future. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_advise( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + ulint advice)/*!< in: advice for access pattern */ +{ +#ifdef __WIN__ + return(true); +#else +#ifdef UNIV_LINUX + int native_advice = 0; + if ((advice & OS_FILE_ADVISE_NORMAL) != 0) + native_advice |= POSIX_FADV_NORMAL; + if ((advice & OS_FILE_ADVISE_RANDOM) != 0) + native_advice |= POSIX_FADV_RANDOM; + if ((advice & OS_FILE_ADVISE_SEQUENTIAL) != 0) + native_advice |= POSIX_FADV_SEQUENTIAL; + if ((advice & OS_FILE_ADVISE_WILLNEED) != 0) + native_advice |= POSIX_FADV_WILLNEED; + if ((advice & OS_FILE_ADVISE_DONTNEED) != 0) + native_advice |= POSIX_FADV_DONTNEED; + if ((advice & OS_FILE_ADVISE_NOREUSE) != 0) + native_advice |= POSIX_FADV_NOREUSE; + + return(posix_fadvise(file, offset, len, native_advice) == 0); +#else + return(true); +#endif #endif /* __WIN__ */ } @@ -2478,7 +2555,7 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ { #ifdef __WIN__ os_offset_t offset; @@ -2496,6 +2573,7 @@ os_file_get_size( return(offset); #else return((os_offset_t) lseek(file, 0, SEEK_END)); + #endif /* __WIN__ */ } @@ -2509,7 +2587,7 @@ UNIV_INTERN bool os_file_set_size( const char* name, - os_file_t file, + pfs_os_file_t file, os_offset_t size, bool is_sparse) { @@ -2618,8 +2696,8 @@ os_file_set_eof( Truncates a file at the specified position. @return TRUE if success */ UNIV_INTERN -ibool -os_file_set_eof_at( +bool +os_file_set_eof_at_func( os_file_t file, /*!< in: handle to a file */ ib_uint64_t new_len)/*!< in: new file length */ { @@ -4323,7 +4401,7 @@ os_aio_init( #ifdef _WIN32 ut_a(completion_port == 0 && read_completion_port == 0); completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); - read_completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); + read_completion_port = srv_read_only_mode? completion_port : CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); ut_a(completion_port && read_completion_port); #endif @@ -4553,7 +4631,7 @@ os_aio_array_reserve_slot( the aio operation */ void* message2,/*!< in: message to be passed along with the aio operation */ - os_file_t file, /*!< in: file handle */ + pfs_os_file_t file, /*!< in: file handle */ const char* name, /*!< in: name of the file or path as a null-terminated string */ void* buf, /*!< in: buffer where to read or from which @@ -4928,7 +5006,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -4958,7 +5036,6 @@ os_aio_func( BOOL ret; #endif ulint wake_later; - ut_ad(buf); ut_ad(n > 0); ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0); @@ -5205,7 +5282,6 @@ os_aio_windows_handle( break; } } - *message1 = slot->message1; *message2 = slot->message2; @@ -5229,12 +5305,14 @@ os_aio_windows_handle( switch (slot->type) { case OS_FILE_WRITE: - ret_val = os_file_write(slot->name, slot->file, slot->buf, - slot->offset, slot->len); + ret_val = os_file_write( + slot->name, slot->file, slot->buf, + slot->offset, slot->len); break; case OS_FILE_READ: - ret_val = os_file_read(slot->file, slot->buf, - slot->offset, slot->len); + ret_val = os_file_read( + slot->file, slot->buf, + slot->offset, slot->len); break; default: ut_error; @@ -5503,12 +5581,14 @@ found: iocb = &(slot->control); if (slot->type == OS_FILE_READ) { - io_prep_pread(&slot->control, slot->file, slot->buf, - slot->len, (off_t) slot->offset); + io_prep_pread(&slot->control, slot->file, + slot->buf, slot->len, + (off_t) slot->offset); } else { ut_a(slot->type == OS_FILE_WRITE); - io_prep_pwrite(&slot->control, slot->file, slot->buf, - slot->len, (off_t) slot->offset); + io_prep_pwrite(&slot->control, slot->file, + slot->buf, slot->len, + (off_t) slot->offset); } /* Resubmit an I/O request */ submit_ret = io_submit(array->aio_ctx[segment], 1, &iocb); @@ -5742,7 +5822,6 @@ consecutive_loop: os_aio_slot_t* slot; slot = os_aio_array_get_nth_slot(array, i + segment * n); - if (slot->reserved && slot != aio_slot && slot->offset == aio_slot->offset + aio_slot->len @@ -6296,7 +6375,9 @@ os_file_trim( #ifdef __linux__ #if defined(HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE) - int ret = fallocate(slot->file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, off, trim_len); + int ret = fallocate(slot->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + off, trim_len); if (ret) { /* After first failure do not try to trim again */ @@ -6342,22 +6423,27 @@ os_file_trim( flt.Ranges[0].Offset = off; flt.Ranges[0].Length = trim_len; + OVERLAPPED overlapped = { 0 }; + overlapped.hEvent = win_get_syncio_event(); BOOL ret = DeviceIoControl(slot->file, FSCTL_FILE_LEVEL_TRIM, - &flt, sizeof(flt), NULL, NULL, NULL, NULL); - + &flt, sizeof(flt), NULL, NULL, NULL, &overlapped); + DWORD tmp; + if (ret) { + ret = GetOverlappedResult(slot->file, &overlapped, &tmp, FALSE); + } + else if (GetLastError() == ERROR_IO_PENDING) { + ret = GetOverlappedResult(slot->file, &overlapped, &tmp, TRUE); + } if (!ret) { + DWORD last_error = GetLastError(); /* After first failure do not try to trim again */ os_fallocate_failed = true; srv_use_trim = FALSE; ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: fallocate call failed with error.\n" - " InnoDB: start: %lu len: %lu payload: %lu\n" - " InnoDB: Disabling fallocate for now.\n", off, trim_len, len); - os_file_handle_error_no_exit(slot->name, - " DeviceIOControl(FSCTL_FILE_LEVEL_TRIM) ", - FALSE, __FILE__, __LINE__); + fprintf(stderr, + " InnoDB: Warning: DeviceIoControl(FSCTL_FILE_LEVEL_TRIM) call failed with error %u%s. Disabling trimming.\n", + last_error, last_error == ERROR_NOT_SUPPORTED ? "(ERROR_NOT_SUPPORTED)" : ""); if (slot->write_size) { *slot->write_size = 0; diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc index 6770748c38b..c62e8c90434 100644 --- a/storage/xtradb/rem/rem0rec.cc +++ b/storage/xtradb/rem/rem0rec.cc @@ -1293,8 +1293,10 @@ rec_convert_dtuple_to_rec_comp( } } - memcpy(end, dfield_get_data(field), len); - end += len; + if (len) { + memcpy(end, dfield_get_data(field), len); + end += len; + } } } diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc index 4542aa31a6c..7ffcc59dc5f 100644 --- a/storage/xtradb/row/row0ftsort.cc +++ b/storage/xtradb/row/row0ftsort.cc @@ -249,9 +249,6 @@ row_fts_psort_info_init( each parallel sort thread. Each "sort bucket" holds records for a particular "FTS index partition" */ for (j = 0; j < fts_sort_pll_degree; j++) { - - UT_LIST_INIT(psort_info[j].fts_doc_list); - for (i = 0; i < FTS_NUM_AUX_INDEX; i++) { psort_info[j].merge_file[i] = diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 81d6fda9e53..86b2d782b7b 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -1995,6 +1995,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( buf_block_get_page_no(block), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: @@ -3617,7 +3618,7 @@ row_import_for_mysql( err = fil_open_single_table_tablespace( true, true, table->space, dict_tf_to_fsp_flags(table->flags), - table->name, filepath, table); + table->name, filepath); DBUG_EXECUTE_IF("ib_import_open_tablespace_failure", err = DB_TABLESPACE_NOT_FOUND;); diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index f4f96d32c50..6072b303d3a 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2166,14 +2167,10 @@ for a clustered index! @retval DB_SUCCESS if no error @retval DB_DUPLICATE_KEY if error, @retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate -record -@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found -in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */ +record */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust( -/*=============================*/ - ulint flags, /*!< in: undo logging and locking flags */ btr_cur_t* cursor, /*!< in: B-tree cursor */ const dtuple_t* entry, /*!< in: entry to insert */ que_thr_t* thr, /*!< in: query thread */ @@ -2454,7 +2451,7 @@ row_ins_clust_index_entry_low( DB_LOCK_WAIT */ err = row_ins_duplicate_error_in_clust( - flags, &cursor, entry, thr, &mtr); + &cursor, entry, thr, &mtr); } if (err != DB_SUCCESS) { diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index 666b59b42db..2cd663fd600 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -367,9 +368,9 @@ row_log_online_op( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -483,9 +484,9 @@ row_log_table_close_func( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -1880,6 +1881,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; @@ -2617,11 +2619,10 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); - if (!success) { fprintf(stderr, "InnoDB: unable to read temporary file" " for table %s\n", index->table_name); @@ -3444,8 +3445,8 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 57b08801225..6a1298087eb 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -967,8 +967,8 @@ row_merge_read( } #endif /* UNIV_DEBUG */ - success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf, - ofs, srv_sort_buf_size); + success = os_file_read_no_error_handling_int_fd(fd, buf, + ofs, srv_sort_buf_size); /* For encrypted tables, decrypt data after reading and copy data */ if (crypt_data && crypt_buf) { @@ -1023,7 +1023,7 @@ row_merge_write( mach_write_to_4((byte *)out_buf, 0); } - ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), out_buf, ofs, buf_len); + ret = os_file_write_int_fd("(merge)", fd, out_buf, ofs, buf_len); #ifdef UNIV_DEBUG if (row_merge_print_block_write) { @@ -3427,14 +3427,21 @@ row_merge_file_create_low( performance schema */ struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, innodb_file_temp_key, - PSI_FILE_OPEN, - "Innodb Merge Temp File", - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_name_locker)( + &state, innodb_file_temp_key, PSI_FILE_OPEN, + "Innodb Merge Temp File", &locker); + if (locker != NULL) { + PSI_FILE_CALL(start_file_open_wait)(locker, + __FILE__, + __LINE__); + } #endif fd = innobase_mysql_tmpfile(path); #ifdef UNIV_PFS_IO - register_pfs_file_open_end(locker, fd); + if (locker != NULL) { + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( + locker, fd); + } #endif if (fd < 0) { @@ -3481,15 +3488,20 @@ row_merge_file_destroy_low( #ifdef UNIV_PFS_IO struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_io_begin(&state, locker, - fd, 0, PSI_FILE_CLOSE, - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, fd, PSI_FILE_CLOSE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, 0, __FILE__, __LINE__); + } #endif if (fd >= 0) { close(fd); } #ifdef UNIV_PFS_IO - register_pfs_file_io_end(locker, 0); + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, 0); + } #endif } /*********************************************************************//** @@ -4025,6 +4037,7 @@ row_merge_build_indexes( for (i = 0; i < n_indexes; i++) { merge_files[i].fd = -1; + merge_files[i].offset = 0; } total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX; diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 463981f51dd..0079fc79a0e 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -4272,18 +4272,6 @@ row_drop_table_for_mysql( rw_lock_x_unlock(dict_index_get_lock(index)); } - /* If table has not yet have crypt_data, try to read it to - make freeing the table easier. */ - if (!table->crypt_data) { - - if (fil_space_t* space = fil_space_acquire_silent(table->space)) { - /* We use crypt data in dict_table_t in ha_innodb.cc - to push warnings to user thread. */ - table->crypt_data = space->crypt_data; - fil_space_release(space); - } - } - /* We use the private SQL parser of Innobase to generate the query graphs needed in deleting the dictionary data from system tables in Innobase. Deleting a row from SYS_INDEXES table also @@ -5542,7 +5530,8 @@ loop: fputs(" InnoDB: Warning: CHECK TABLE on ", stderr); dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, " returned %lu\n", ret); - /* fall through (this error is ignored by CHECK TABLE) */ + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: func_exit: mem_free(buf); diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc index 8a1dbd6f69f..333677edf21 100644 --- a/storage/xtradb/row/row0purge.cc +++ b/storage/xtradb/row/row0purge.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 7d4435eba5b..8e3ed3d1a4e 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -66,6 +66,8 @@ Created 12/19/1997 Heikki Tuuri #include "my_compare.h" /* enum icp_result */ +#include <vector> + /* Maximum number of rows to prefetch; MySQL interface has another parameter */ #define SEL_MAX_N_PREFETCH 16 @@ -2715,7 +2717,8 @@ row_sel_field_store_in_mysql_format_func( || !(templ->mysql_col_len % templ->mbmaxlen)); ut_ad(len * templ->mbmaxlen >= templ->mysql_col_len || (field_no == templ->icp_rec_field_no - && field->prefix_len > 0)); + && field->prefix_len > 0) + || templ->rec_field_is_prefix); ut_ad(!(field->prefix_len % templ->mbmaxlen)); if (templ->mbminlen == 1 && templ->mbmaxlen != 1) { @@ -2757,27 +2760,32 @@ row_sel_field_store_in_mysql_format_func( # define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ row_sel_store_mysql_field_func(m,p,r,o,f,t) #endif /* UNIV_DEBUG */ -/**************************************************************//** -Convert a field in the Innobase format to a field in the MySQL format. */ +/** Convert a field in the Innobase format to a field in the MySQL format. +@param[out] mysql_rec record in the MySQL format +@param[in,out] prebuilt prebuilt struct +@param[in] rec InnoDB record; must be protected + by a page latch +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets() +@param[in] field_no templ->rec_field_no or + templ->clust_rec_field_no + or templ->icp_rec_field_no + or sec field no if clust_templ_for_sec + is TRUE +@param[in] templ row template +*/ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_field_func( -/*===========================*/ - byte* mysql_rec, /*!< out: record in the - MySQL format */ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */ - const rec_t* rec, /*!< in: InnoDB record; - must be protected by - a page latch */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, #ifdef UNIV_DEBUG - const dict_index_t* index, /*!< in: index of rec */ + const dict_index_t* index, #endif - const ulint* offsets, /*!< in: array returned by - rec_get_offsets() */ - ulint field_no, /*!< in: templ->rec_field_no or - templ->clust_rec_field_no or - templ->icp_rec_field_no */ - const mysql_row_templ_t*templ) /*!< in: row template */ + const ulint* offsets, + ulint field_no, + const mysql_row_templ_t*templ) { const byte* data; ulint len; @@ -2906,31 +2914,31 @@ row_sel_store_mysql_field_func( return(TRUE); } -/**************************************************************//** -Convert a row in the Innobase format to a row in the MySQL format. +/** Convert a row in the Innobase format to a row in the MySQL format. Note that the template in prebuilt may advise us to copy only a few columns to mysql_rec, other columns are left blank. All columns may not be needed in the query. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt prebuilt structure +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] rec_clust TRUE if the rec in the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) @return TRUE on success, FALSE if not all columns could be retrieved */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_rec( -/*====================*/ - byte* mysql_rec, /*!< out: row in the MySQL format */ - row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */ - const rec_t* rec, /*!< in: Innobase record in the index - which was described in prebuilt's - template, or in the clustered index; - must be protected by a page latch */ - ibool rec_clust, /*!< in: TRUE if rec is in the - clustered index instead of - prebuilt->index */ - const dict_index_t* index, /*!< in: index of rec */ - const ulint* offsets) /*!< in: array returned by - rec_get_offsets(rec) */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + ibool rec_clust, + const dict_index_t* index, + const ulint* offsets) { ulint i; - ut_ad(rec_clust || index == prebuilt->index); ut_ad(!rec_clust || dict_index_is_clust(index)); @@ -2946,12 +2954,14 @@ row_sel_store_mysql_rec( ? templ->clust_rec_field_no : templ->rec_field_no; /* We should never deliver column prefixes to MySQL, - except for evaluating innobase_index_cond(). */ + except for evaluating innobase_index_cond() and if the prefix + index is longer than the actual row data. */ /* ...actually, we do want to do this in order to support the prefix query optimization. ut_ad(dict_index_get_nth_field(index, field_no)->prefix_len - == 0); + == 0 || templ->rec_field_is_prefix); + ...so we disable this assert. */ @@ -3681,7 +3691,7 @@ row_search_for_mysql( trx_t* trx = prebuilt->trx; dict_index_t* clust_index; que_thr_t* thr; - const rec_t* rec; + const rec_t* rec = NULL; const rec_t* result_rec = NULL; const rec_t* clust_rec; dberr_t err = DB_SUCCESS; @@ -3706,7 +3716,7 @@ row_search_for_mysql( ulint* offsets = offsets_; ibool table_lock_waited = FALSE; byte* next_buf = 0; - ibool use_clustered_index = FALSE; + bool use_clustered_index = false; rec_offs_init(offsets_); @@ -3966,7 +3976,8 @@ row_search_for_mysql( if (!row_sel_store_mysql_rec( buf, prebuilt, - rec, FALSE, index, offsets)) { + rec, FALSE, index, + offsets)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such @@ -4248,7 +4259,6 @@ rec_loop: } if (page_rec_is_supremum(rec)) { - if (set_also_gap_locks && !(srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) @@ -4777,17 +4787,17 @@ locks_ok: indexes are shorter than the prefix size This optimization can avoid many IOs for certain schemas. */ - ibool row_contains_all_values = TRUE; - int i; + bool row_contains_all_values = true; + unsigned int i; for (i = 0; i < prebuilt->n_template; i++) { /* Condition (1) from above: is the field in the index (prefix or not)? */ - mysql_row_templ_t* templ = + const mysql_row_templ_t* templ = prebuilt->mysql_template + i; ulint secondary_index_field_no = templ->rec_prefix_field_no; if (secondary_index_field_no == ULINT_UNDEFINED) { - row_contains_all_values = FALSE; + row_contains_all_values = false; break; } /* Condition (2) from above: if this is a @@ -4802,8 +4812,9 @@ locks_ok: index, secondary_index_field_no); ut_a(field->prefix_len > 0); - if (record_size >= field->prefix_len) { - row_contains_all_values = FALSE; + if (record_size >= field->prefix_len + / templ->mbmaxlen) { + row_contains_all_values = false; break; } } @@ -4819,7 +4830,7 @@ locks_ok: templ->rec_prefix_field_no; ut_a(templ->rec_field_no != ULINT_UNDEFINED); } - use_clustered_index = FALSE; + use_clustered_index = false; srv_stats.n_sec_rec_cluster_reads_avoided.inc(); } } diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index bf4b9124da7..cc5d1320142 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation. @@ -171,7 +171,8 @@ UNIV_INTERN unsigned long long srv_online_max_size; OS (provided we compiled Innobase with it in), otherwise we will use simulated aio we build below with threads. Currently we support native aio on windows and linux */ -UNIV_INTERN my_bool srv_use_native_aio = TRUE; +/* make srv_use_native_aio to be visible for other plugins */ +my_bool srv_use_native_aio = TRUE; UNIV_INTERN my_bool srv_numa_interleave = FALSE; /* Default compression level if page compression is used and no compression @@ -450,7 +451,7 @@ UNIV_INTERN my_bool srv_cleaner_thread_priority = FALSE; UNIV_INTERN my_bool srv_master_thread_priority = FALSE; /* The number of purge threads to use.*/ -UNIV_INTERN ulong srv_n_purge_threads = 1; +UNIV_INTERN ulong srv_n_purge_threads; /* the number of pages to purge in one batch */ UNIV_INTERN ulong srv_purge_batch_size = 20; @@ -689,16 +690,16 @@ UNIV_INTERN ulong srv_buf_dump_status_frequency = 0; /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ - mutex_enter(&srv_sys->mutex); \ + mutex_enter(&srv_sys.mutex); \ } while (0) /** Test if the system mutex is owned. */ -#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \ +#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \ && !srv_read_only_mode) /** Release the system mutex. */ #define srv_sys_mutex_exit() do { \ - mutex_exit(&srv_sys->mutex); \ + mutex_exit(&srv_sys.mutex); \ } while (0) #define fetch_lock_wait_timeout(trx) \ @@ -793,7 +794,7 @@ struct srv_sys_t{ ulint n_sys_threads; /*!< size of the sys_threads array */ - srv_slot_t* sys_threads; /*!< server thread table; + srv_slot_t sys_threads[32 + 1]; /*!< server thread table; os_event_set() and os_event_reset() on sys_threads[]->event are @@ -817,7 +818,7 @@ struct srv_sys_t{ UNIV_INTERN ib_mutex_t server_mutex; #endif /* !HAVE_ATOMIC_BUILTINS */ -static srv_sys_t* srv_sys = NULL; +static srv_sys_t srv_sys; /** Event to signal srv_monitor_thread. Not protected by a mutex. Set after setting srv_print_innodb_monitor. */ @@ -839,10 +840,10 @@ and/or load it during startup. */ UNIV_INTERN char srv_buffer_pool_dump_at_shutdown = FALSE; UNIV_INTERN char srv_buffer_pool_load_at_startup = FALSE; -/** Slot index in the srv_sys->sys_threads array for the purge thread. */ +/** Slot index in the srv_sys.sys_threads array for the purge thread. */ static const ulint SRV_PURGE_SLOT = 1; -/** Slot index in the srv_sys->sys_threads array for the master thread. */ +/** Slot index in the srv_sys.sys_threads array for the master thread. */ static const ulint SRV_MASTER_SLOT = 0; UNIV_INTERN os_event_t srv_checkpoint_completed_event; @@ -952,21 +953,21 @@ srv_reserve_slot( switch (type) { case SRV_MASTER: - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; break; case SRV_PURGE: - slot = &srv_sys->sys_threads[SRV_PURGE_SLOT]; + slot = &srv_sys.sys_threads[SRV_PURGE_SLOT]; break; case SRV_WORKER: /* Find an empty slot, skip the master and purge slots. */ - for (slot = &srv_sys->sys_threads[2]; + for (slot = &srv_sys.sys_threads[2]; slot->in_use; ++slot) { - ut_a(slot < &srv_sys->sys_threads[ - srv_sys->n_sys_threads]); + ut_a(slot < &srv_sys.sys_threads[ + srv_sys.n_sys_threads]); } break; @@ -982,7 +983,7 @@ srv_reserve_slot( ut_ad(srv_slot_get_type(slot) == type); - ++srv_sys->n_threads_active[type]; + ++srv_sys.n_threads_active[type]; srv_sys_mutex_exit(); @@ -1012,27 +1013,27 @@ srv_suspend_thread_low( case SRV_MASTER: /* We have only one master thread and it should be the first entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_PURGE: /* We have only one purge coordinator thread and it should be the second entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); break; } ut_a(!slot->suspended); slot->suspended = TRUE; - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); - srv_sys->n_threads_active[type]--; + srv_sys.n_threads_active[type]--; return(os_event_reset(slot->event)); } @@ -1087,7 +1088,7 @@ srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true, ut_ad(slot->suspended); slot->suspended = FALSE; - ++srv_sys->n_threads_active[slot->type]; + ++srv_sys.n_threads_active[slot->type]; srv_sys_mutex_exit(); return(timeout); } @@ -1109,8 +1110,8 @@ srv_release_threads(enum srv_thread_type type, ulint n) srv_sys_mutex_enter(); - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; if (!slot->in_use || srv_slot_get_type(slot) != type) { continue; @@ -1130,7 +1131,7 @@ srv_release_threads(enum srv_thread_type type, ulint n) should be the first entry always. */ ut_a(n == 1); ut_a(i == SRV_MASTER_SLOT); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_PURGE: @@ -1139,12 +1140,12 @@ srv_release_threads(enum srv_thread_type type, ulint n) ut_a(n == 1); ut_a(i == SRV_PURGE_SLOT); ut_a(srv_n_purge_threads > 0); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] + ut_a(srv_sys.n_threads_active[type] < srv_n_purge_threads - 1); break; } @@ -1182,9 +1183,6 @@ void srv_init(void) /*==========*/ { - ulint n_sys_threads = 0; - ulint srv_sys_sz = sizeof(*srv_sys); - #ifndef HAVE_ATOMIC_BUILTINS mutex_create(server_mutex_key, &server_mutex, SYNC_ANY_LATCH); #endif /* !HAVE_ATOMIC_BUILTINS */ @@ -1192,29 +1190,19 @@ srv_init(void) mutex_create(srv_innodb_monitor_mutex_key, &srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK); - if (!srv_read_only_mode) { - - /* Number of purge threads + master thread */ - n_sys_threads = srv_n_purge_threads + 1; - - srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads); - } - - srv_sys = static_cast<srv_sys_t*>(mem_zalloc(srv_sys_sz)); - - srv_sys->n_sys_threads = n_sys_threads; + srv_sys.n_sys_threads = srv_read_only_mode + ? 0 + : srv_n_purge_threads + 1/* purge coordinator */; if (!srv_read_only_mode) { - mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS); + mutex_create(srv_sys_mutex_key, &srv_sys.mutex, SYNC_THREADS); mutex_create(srv_sys_tasks_mutex_key, - &srv_sys->tasks_mutex, SYNC_ANY_LATCH); - - srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1]; + &srv_sys.tasks_mutex, SYNC_ANY_LATCH); - for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; slot->event = os_event_create(); @@ -1234,8 +1222,6 @@ srv_init(void) if (srv_track_changed_pages) { os_event_set(srv_redo_log_tracked_event); } - - UT_LIST_INIT(srv_sys->tasks); } /* page_zip_stat_per_index_mutex is acquired from: @@ -1283,8 +1269,8 @@ srv_free(void) if (!srv_read_only_mode) { - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) - os_event_free(srv_sys->sys_threads[i].event); + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) + os_event_free(srv_sys.sys_threads[i].event); os_event_free(srv_error_event); srv_error_event = NULL; @@ -1296,8 +1282,8 @@ srv_free(void) srv_checkpoint_completed_event = NULL; os_event_free(srv_redo_log_tracked_event); srv_redo_log_tracked_event = NULL; - mutex_free(&srv_sys->mutex); - mutex_free(&srv_sys->tasks_mutex); + mutex_free(&srv_sys.mutex); + mutex_free(&srv_sys.tasks_mutex); } #ifdef WITH_INNODB_DISALLOW_WRITES @@ -1311,10 +1297,10 @@ srv_free(void) mutex_free(&srv_innodb_monitor_mutex); mutex_free(&page_zip_stat_per_index_mutex); - mem_free(srv_sys); - srv_sys = NULL; - trx_i_s_cache_free(trx_i_s_cache); + + /* This is needed for Mariabackup. */ + memset(&srv_sys, 0, sizeof srv_sys); } /*********************************************************************//** @@ -1793,8 +1779,10 @@ srv_export_innodb_status(void) buf_get_total_stat(&stat); buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len); buf_get_total_list_size_in_bytes(&buf_pools_list_size); - fil_crypt_total_stat(&crypt_stat); - btr_scrub_total_stat(&scrub_stat); + if (!srv_read_only_mode) { + fil_crypt_total_stat(&crypt_stat); + btr_scrub_total_stat(&scrub_stat); + } mem_adaptive_hash = 0; @@ -2108,6 +2096,7 @@ srv_export_innodb_status(void) export_vars.innodb_sec_rec_cluster_reads_avoided = srv_stats.n_sec_rec_cluster_reads_avoided; + if (!srv_read_only_mode) { export_vars.innodb_encryption_rotation_pages_read_from_cache = crypt_stat.pages_read_from_cache; export_vars.innodb_encryption_rotation_pages_read_from_disk = @@ -2135,6 +2124,7 @@ srv_export_innodb_status(void) scrub_stat.page_split_failures_missing_index; export_vars.innodb_scrub_page_split_failures_unknown = scrub_stat.page_split_failures_unknown; + } mutex_exit(&srv_innodb_monitor_mutex); } @@ -2289,7 +2279,7 @@ loop: } } - if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { goto exit_func; } @@ -2427,7 +2417,7 @@ loop: os_event_wait_time_low(srv_error_event, 1000000, sig_count); - if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state == SRV_SHUTDOWN_NONE) { goto loop; } @@ -2452,9 +2442,9 @@ srv_inc_activity_count( is caused by the background change buffer merge */ { - srv_sys->activity_count.inc(); + srv_sys.activity_count.inc(); if (ibuf_merge_activity) - srv_sys->ibuf_merge_activity_count.inc(); + srv_sys.ibuf_merge_activity_count.inc(); } /**********************************************************************//** @@ -2476,7 +2466,7 @@ srv_get_active_thread_type(void) srv_sys_mutex_enter(); for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) { - if (srv_sys->n_threads_active[i] != 0) { + if (srv_sys.n_threads_active[i] != 0) { ret = static_cast<srv_thread_type>(i); break; } @@ -2599,7 +2589,8 @@ purge_archived_logs( if (dirnamelen + strlen(fileinfo.name) + 2 > OS_FILE_MAX_PATH) continue; - snprintf(archived_log_filename + dirnamelen, OS_FILE_MAX_PATH, + snprintf(archived_log_filename + dirnamelen, + OS_FILE_MAX_PATH - dirnamelen - 1, "%s", fileinfo.name); if (before_no) { @@ -2695,12 +2686,12 @@ srv_active_wake_master_thread(void) srv_inc_activity_count(); - if (srv_sys->n_threads_active[SRV_MASTER] == 0) { + if (srv_sys.n_threads_active[SRV_MASTER] == 0) { srv_slot_t* slot; srv_sys_mutex_enter(); - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; /* Only if the master thread has been started. */ @@ -2727,7 +2718,7 @@ srv_wake_purge_thread_if_not_active(void) ut_ad(!srv_sys_mutex_own()); if (purge_sys->state == PURGE_STATE_RUN - && srv_sys->n_threads_active[SRV_PURGE] == 0) { + && srv_sys.n_threads_active[SRV_PURGE] == 0) { srv_release_threads(SRV_PURGE, 1); } @@ -2756,7 +2747,7 @@ ulint srv_get_activity_count(void) /*========================*/ { - return(srv_sys->activity_count); + return(srv_sys.activity_count); } /** Get current server ibuf merge activity count. @@ -2765,7 +2756,7 @@ static ulint srv_get_ibuf_merge_activity_count(void) { - return(srv_sys->ibuf_merge_activity_count); + return(srv_sys.ibuf_merge_activity_count); } /*******************************************************************//** @@ -2784,14 +2775,14 @@ srv_check_activity( ULINT_UNDEFINED */ ulint old_ibuf_merge_activity_count) { - ulint new_activity_count = srv_sys->activity_count; + ulint new_activity_count = srv_sys.activity_count; if (old_ibuf_merge_activity_count == ULINT_UNDEFINED) return(new_activity_count != old_activity_count); /* If we care about ibuf merge activity, then the server is considered idle if all activity, if any, was due to ibuf merge. */ ulint new_ibuf_merge_activity_count - = srv_sys->ibuf_merge_activity_count; + = srv_sys.ibuf_merge_activity_count; ut_ad(new_ibuf_merge_activity_count <= new_activity_count); ut_ad(new_ibuf_merge_activity_count >= old_ibuf_merge_activity_count); @@ -2871,7 +2862,7 @@ srv_shutdown_print_master_pending( time_elapsed = ut_difftime(current_time, *last_print_time); if (time_elapsed > 60) { - *last_print_time = ut_time(); + *last_print_time = current_time; if (n_tables_to_drop) { ut_print_timestamp(stderr); @@ -2924,7 +2915,7 @@ srv_master_do_active_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2958,11 +2949,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_MEM_VALIDATE_MICROSECOND, counter_time); } #endif - if (srv_shutdown_state > 0) { - return; - } - - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2975,7 +2962,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time); } - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -3019,7 +3006,7 @@ srv_master_do_idle_tasks(void) MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -3035,7 +3022,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -3051,7 +3038,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -3072,70 +3059,42 @@ srv_master_do_idle_tasks(void) } } -/*********************************************************************//** -Perform the tasks during shutdown. The tasks that we do at shutdown -depend on srv_fast_shutdown: -2 => very fast shutdown => do no book keeping -1 => normal shutdown => clear drop table queue and make checkpoint -0 => slow shutdown => in addition to above do complete purge and ibuf -merge -@return TRUE if some work was done. FALSE otherwise */ +/** Perform shutdown tasks. +@param[in] ibuf_merge whether to complete the change buffer merge */ static -ibool -srv_master_do_shutdown_tasks( -/*=========================*/ - ib_time_t* last_print_time)/*!< last time the function - print the message */ +void +srv_shutdown(bool ibuf_merge) { - ulint n_bytes_merged = 0; - ulint n_tables_to_drop = 0; + ulint n_bytes_merged = 0; + ulint n_tables_to_drop; + ib_time_t now = ut_time(); - ut_ad(!srv_read_only_mode); - - ++srv_main_shutdown_loops; - - ut_a(srv_shutdown_state > 0); - - /* In very fast shutdown none of the following is necessary */ - if (srv_fast_shutdown == 2) { - return(FALSE); - } - - /* ALTER TABLE in MySQL requires on Unix that the table handler - can drop tables lazily after there no longer are SELECT - queries to them. */ - srv_main_thread_op_info = "doing background drop tables"; - n_tables_to_drop = row_drop_tables_for_mysql_in_background(); - - /* make sure that there is enough reusable space in the redo - log files */ - srv_main_thread_op_info = "checking free log space"; - log_free_check(); - - /* In case of normal shutdown we don't do ibuf merge or purge */ - if (srv_fast_shutdown == 1) { - goto func_exit; - } - - /* Do an ibuf merge */ - srv_main_thread_op_info = "doing insert buffer merge"; - n_bytes_merged = ibuf_merge_in_background(true); - - /* Flush logs if needed */ - srv_sync_log_buffer_in_background(); - -func_exit: - /* Make a new checkpoint about once in 10 seconds */ - srv_main_thread_op_info = "making checkpoint"; - log_checkpoint(TRUE, FALSE, FALSE); - - /* Print progress message every 60 seconds during shutdown */ - if (srv_shutdown_state > 0 && srv_print_verbose_log) { - srv_shutdown_print_master_pending( - last_print_time, n_tables_to_drop, n_bytes_merged); - } + do { + ut_ad(!srv_read_only_mode); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); + ++srv_main_shutdown_loops; + + /* FIXME: Remove the background DROP TABLE queue; it is not + crash-safe and breaks ACID. */ + srv_main_thread_op_info = "doing background drop tables"; + n_tables_to_drop = row_drop_tables_for_mysql_in_background(); + + if (ibuf_merge) { + srv_main_thread_op_info = "checking free log space"; + log_free_check(); + srv_main_thread_op_info = "doing insert buffer merge"; + n_bytes_merged = ibuf_merge_in_background(true); + + /* Flush logs if needed */ + srv_sync_log_buffer_in_background(); + } - return(n_bytes_merged || n_tables_to_drop); + /* Print progress message every 60 seconds during shutdown */ + if (srv_print_verbose_log) { + srv_shutdown_print_master_pending( + &now, n_tables_to_drop, n_bytes_merged); + } + } while (n_bytes_merged || n_tables_to_drop); } /*********************************************************************//** @@ -3163,11 +3122,12 @@ DECLARE_THREAD(srv_master_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint old_activity_count = srv_get_activity_count(); ulint old_ibuf_merge_activity_count = srv_get_ibuf_merge_activity_count(); - ib_time_t last_print_time; ut_ad(!srv_read_only_mode); @@ -3188,9 +3148,8 @@ DECLARE_THREAD(srv_master_thread)( srv_main_thread_id = os_thread_pf(os_thread_get_curr_id()); slot = srv_reserve_slot(SRV_MASTER); - ut_a(slot == srv_sys->sys_threads); + ut_a(slot == srv_sys.sys_threads); - last_print_time = ut_time(); loop: if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { goto suspend_thread; @@ -3216,13 +3175,26 @@ loop: } } - while (srv_master_do_shutdown_tasks(&last_print_time)) { - - /* Shouldn't loop here in case of very fast shutdown */ - ut_ad(srv_fast_shutdown < 2); +suspend_thread: + switch (srv_shutdown_state) { + case SRV_SHUTDOWN_NONE: + break; + case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_LAST_PHASE: + ut_ad(0); + /* fall through */ + case SRV_SHUTDOWN_EXIT_THREADS: + /* srv_init_abort() must have been invoked */ + case SRV_SHUTDOWN_CLEANUP: + if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP + && srv_fast_shutdown < 2) { + srv_shutdown(srv_fast_shutdown == 0); + } + srv_suspend_thread(slot); + my_thread_end(); + os_thread_exit(NULL); } -suspend_thread: srv_main_thread_op_info = "suspending"; srv_suspend_thread(slot); @@ -3234,41 +3206,32 @@ suspend_thread: srv_main_thread_op_info = "waiting for server activity"; srv_resume_thread(slot); - - if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { - os_thread_exit(NULL); - } - goto loop; - - OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */ } -/*********************************************************************//** -Check if purge should stop. -@return true if it should shutdown. */ +/** Check if purge should stop. +@param[in] n_purged pages purged in the last batch +@return whether purge should exit */ static bool -srv_purge_should_exit( -/*==============*/ - ulint n_purged) /*!< in: pages purged in last batch */ +srv_purge_should_exit(ulint n_purged) { - switch (srv_shutdown_state) { - case SRV_SHUTDOWN_NONE: - /* Normal operation. */ - break; + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE + || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); - case SRV_SHUTDOWN_CLEANUP: - case SRV_SHUTDOWN_EXIT_THREADS: - /* Exit unless slow shutdown requested or all done. */ - return(srv_fast_shutdown != 0 || n_purged == 0); - - case SRV_SHUTDOWN_LAST_PHASE: - case SRV_SHUTDOWN_FLUSH_PHASE: - ut_error; + if (srv_undo_sources) { + return(false); } - - return(false); + if (srv_fast_shutdown) { + return(true); + } + /* Slow shutdown was requested. */ + if (n_purged) { + /* The previous round still did some work. */ + return(false); + } + /* Exit if there are no active transactions to roll back. */ + return(trx_sys_any_active_transactions() == 0); } /*********************************************************************//** @@ -3284,18 +3247,18 @@ srv_task_execute(void) ut_ad(!srv_read_only_mode); ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) { + if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) { - thr = UT_LIST_GET_FIRST(srv_sys->tasks); + thr = UT_LIST_GET_FIRST(srv_sys.tasks); ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE); - UT_LIST_REMOVE(queue, srv_sys->tasks, thr); + UT_LIST_REMOVE(queue, srv_sys.tasks, thr); } - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); if (thr != NULL) { @@ -3322,6 +3285,8 @@ DECLARE_THREAD(srv_worker_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint tid_i = os_atomic_increment_ulint(&purge_tid_i, 1); @@ -3345,7 +3310,7 @@ DECLARE_THREAD(srv_worker_thread)( srv_sys_mutex_enter(); - ut_a(srv_sys->n_threads_active[SRV_WORKER] < srv_n_purge_threads); + ut_a(srv_sys.n_threads_active[SRV_WORKER] < srv_n_purge_threads); srv_sys_mutex_exit(); @@ -3387,6 +3352,7 @@ DECLARE_THREAD(srv_worker_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -3540,7 +3506,7 @@ srv_purge_coordinator_suspend( } rw_lock_x_unlock(&purge_sys->latch); - } while (stop); + } while (stop && srv_undo_sources); srv_resume_thread(slot, 0, false); } @@ -3555,6 +3521,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint n_total_purged = ULINT_UNDEFINED; @@ -3592,6 +3560,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( purge didn't purge any records then wait for activity. */ if (srv_shutdown_state == SRV_SHUTDOWN_NONE + && srv_undo_sources && (purge_sys->state == PURGE_STATE_STOP || n_total_purged == 0)) { @@ -3612,36 +3581,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( srv_n_purge_threads, &n_total_purged); srv_inc_activity_count(); - } while (!srv_purge_should_exit(n_total_purged)); - /* Ensure that we don't jump out of the loop unless the - exit condition is satisfied. */ - - ut_a(srv_purge_should_exit(n_total_purged)); - - ulint n_pages_purged = ULINT_MAX; - - /* Ensure that all records are purged if it is not a fast shutdown. - This covers the case where a record can be added after we exit the - loop above. */ - while (srv_fast_shutdown == 0 && n_pages_purged > 0) { - n_pages_purged = trx_purge(1, srv_purge_batch_size, false); - } - - /* This trx_purge is called to remove any undo records (added by - background threads) after completion of the above loop. When - srv_fast_shutdown != 0, a large batch size can cause significant - delay in shutdown ,so reducing the batch size to magic number 20 - (which was default in 5.5), which we hope will be sufficient to - remove all the undo records */ - const uint temp_batch_size = 20; - - n_pages_purged = trx_purge(1, srv_purge_batch_size <= temp_batch_size - ? srv_purge_batch_size : temp_batch_size, - true); - ut_a(n_pages_purged == 0 || srv_fast_shutdown != 0); - /* The task queue should always be empty, independent of fast shutdown state. */ ut_a(srv_get_task_queue_length() == 0); @@ -3668,6 +3609,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( srv_release_threads(SRV_WORKER, srv_n_purge_threads - 1); } + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -3685,11 +3627,11 @@ srv_que_task_enqueue_low( que_thr_t* thr) /*!< in: query thread */ { ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr); + UT_LIST_ADD_LAST(queue, srv_sys.tasks, thr); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); srv_release_threads(SRV_WORKER, 1); } @@ -3706,11 +3648,11 @@ srv_get_task_queue_length(void) ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - n_tasks = UT_LIST_GET_LEN(srv_sys->tasks); + n_tasks = UT_LIST_GET_LEN(srv_sys.tasks); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); return(n_tasks); } @@ -3733,3 +3675,19 @@ srv_purge_wakeup() } } } + +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id) +{ + if (srv_undo_space_id_start == 0) { + return (false); + } + + return(space_id >= srv_undo_space_id_start + && space_id < (srv_undo_space_id_start + + srv_undo_tablespaces_open)); +} diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index aa51012816d..fd129c3e55f 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2013, 2017, MariaDB Corporation @@ -121,6 +121,9 @@ UNIV_INTERN ibool srv_have_fullfsync = FALSE; /** TRUE if a raw partition is in use */ UNIV_INTERN ibool srv_start_raw_disk_in_use = FALSE; +/** UNDO tablespaces starts with space id. */ +ulint srv_undo_space_id_start; + /** TRUE if the server is being started, before rolling back any incomplete transactions */ UNIV_INTERN ibool srv_startup_is_before_trx_rollback_phase = FALSE; @@ -129,7 +132,11 @@ UNIV_INTERN ibool srv_is_being_started = FALSE; /** TRUE if the server was successfully started */ UNIV_INTERN ibool srv_was_started = FALSE; /** TRUE if innobase_start_or_create_for_mysql() has been called */ -static ibool srv_start_has_been_called = FALSE; +static ibool srv_start_has_been_called; + +/** Whether any undo log records can be generated */ +UNIV_INTERN bool srv_undo_sources; + #ifdef UNIV_DEBUG /** InnoDB system tablespace to set during recovery */ UNIV_INTERN uint srv_sys_space_size_debug; @@ -139,8 +146,8 @@ UNIV_INTERN uint srv_sys_space_size_debug; SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ UNIV_INTERN enum srv_shutdown_state srv_shutdown_state = SRV_SHUTDOWN_NONE; -/** Files comprising the system tablespace */ -os_file_t files[1000]; +/** Files comprising the system tablespace. Also used by Mariabackup. */ +UNIV_INTERN pfs_os_file_t files[1000]; /** io_handler_thread parameters for thread identification */ static ulint n[SRV_MAX_N_IO_THREADS]; @@ -203,6 +210,39 @@ UNIV_INTERN mysql_pfs_key_t srv_purge_thread_key; UNIV_INTERN mysql_pfs_key_t srv_log_tracking_thread_key; #endif /* UNIV_PFS_THREAD */ +/** Innobase start-up aborted. Perform cleanup actions. +@param[in] create_new_db TRUE if new db is being created +@param[in] file File name +@param[in] line Line number +@param[in] err Reason for aborting InnoDB startup +@return DB_SUCCESS or error code. */ +static +dberr_t +srv_init_abort( + bool create_new_db, + const char* file, + ulint line, + dberr_t err) +{ + if (create_new_db) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Database creation was aborted" + " at %s [" ULINTPF "]" + " with error %s. You may need" + " to delete the ibdata1 file before trying to start" + " up again.", + file, line, ut_strerr(err)); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Plugin initialization aborted" + " at %s [" ULINTPF "]" + " with error %s.", + file, line, ut_strerr(err)); + } + + return(err); +} + /*********************************************************************//** Convert a numeric string that optionally ends in G or M or K, to a number containing megabytes. @@ -584,7 +624,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t create_log_file( /*============*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name) /*!< in: log file name */ { ibool ret; @@ -802,7 +842,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_log_file( /*==========*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name, /*!< in: log file name */ os_offset_t* size) /*!< out: file size */ { @@ -823,32 +863,32 @@ open_log_file( return(DB_SUCCESS); } -/*********************************************************************//** -Creates or opens database data files and closes them. + +/** Creates or opens database data files and closes them. +@param[out] create_new_db true = create new database +@param[out] min_arch_log_no min of archived log numbers in + data files +@param[out] max_arch_log_no max of archived log numbers in + data files +@param[out] flushed_lsn flushed lsn in fist datafile +@param[out] sum_of_new_sizes sum of sizes of the new files + added @return DB_SUCCESS or error code */ MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_or_create_data_files( -/*======================*/ - ibool* create_new_db, /*!< out: TRUE if new database should be - created */ + bool* create_new_db, #ifdef UNIV_LOG_ARCHIVE - lsn_t* min_arch_log_no,/*!< out: min of archived log - numbers in data files */ - lsn_t* max_arch_log_no,/*!< out: max of archived log - numbers in data files */ + lsn_t* min_arch_log_no, + lsn_t* max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn,/*!< out: min of flushed lsn - values in data files */ - lsn_t* max_flushed_lsn,/*!< out: max of flushed lsn - values in data files */ - ulint* sum_of_new_sizes)/*!< out: sum of sizes of the - new files added */ + lsn_t* flushed_lsn, + ulint* sum_of_new_sizes) { ibool ret; ulint i; - ibool one_opened = FALSE; - ibool one_created = FALSE; + bool one_opened = false; + bool one_created = false; os_offset_t size; ulint flags; ulint space; @@ -867,7 +907,7 @@ open_or_create_data_files( *sum_of_new_sizes = 0; - *create_new_db = FALSE; + *create_new_db = false; srv_normalize_path_for_win(srv_data_home); @@ -919,7 +959,7 @@ open_or_create_data_files( && os_file_get_last_error(false) != OS_FILE_ALREADY_EXISTS #ifdef UNIV_AIX - /* AIX 5.1 after security patch ML7 may have + /* AIX 5.1 after security patch ML7 may have errno set to 0 here, which causes our function to return 100; work around that AIX problem */ @@ -955,9 +995,10 @@ open_or_create_data_files( } const char* check_msg; + check_msg = fil_read_first_page( files[i], FALSE, &flags, &space, - min_flushed_lsn, max_flushed_lsn, NULL); + flushed_lsn, NULL); /* If first page is valid, don't overwrite DB. It prevents overwriting DB when mysql_install_db @@ -988,6 +1029,7 @@ open_or_create_data_files( name); return(DB_ERROR); } + if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) { ut_a(!srv_read_only_mode); files[i] = os_file_create( @@ -1007,7 +1049,6 @@ open_or_create_data_files( } if (!ret) { - os_file_get_last_error(true); ib_logf(IB_LOG_LEVEL_ERROR, @@ -1017,7 +1058,6 @@ open_or_create_data_files( } if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) { - goto skip_size_check; } @@ -1044,16 +1084,15 @@ size_check: "auto-extending " "data file %s is " "of a different size " - "%lu pages (rounded " + ULINTPF " pages (rounded " "down to MB) than specified " "in the .cnf file: " - "initial %lu pages, " - "max %lu (relevant if " + "initial " ULINTPF " pages, " + "max " ULINTPF " (relevant if " "non-zero) pages!", name, - (ulong) rounded_size_pages, - (ulong) srv_data_file_sizes[i], - (ulong) + rounded_size_pages, + srv_data_file_sizes[i], srv_last_file_size_max); return(DB_ERROR); @@ -1066,12 +1105,12 @@ size_check: ib_logf(IB_LOG_LEVEL_ERROR, "Data file %s is of a different " - "size %lu pages (rounded down to MB) " + "size " ULINTPF " pages (rounded down to MB) " "than specified in the .cnf file " - "%lu pages!", + ULINTPF " pages!", name, - (ulong) rounded_size_pages, - (ulong) srv_data_file_sizes[i]); + rounded_size_pages, + srv_data_file_sizes[i]); return(DB_ERROR); } @@ -1090,7 +1129,7 @@ skip_size_check: check_first_page: check_msg = fil_read_first_page( files[i], one_opened, &flags, &space, - min_flushed_lsn, max_flushed_lsn, &crypt_data); + flushed_lsn, &crypt_data); if (check_msg) { @@ -1127,9 +1166,9 @@ check_first_page: != fsp_flags_get_page_size(flags)) { ib_logf(IB_LOG_LEVEL_ERROR, - "Data file \"%s\" uses page size %lu," + "Data file \"%s\" uses page size " ULINTPF " ," "but the start-up parameter " - "is --innodb-page-size=%lu", + "is --innodb-page-size=" ULINTPF " .", name, fsp_flags_get_page_size(flags), UNIV_PAGE_SIZE); @@ -1160,9 +1199,9 @@ check_first_page: } ib_logf(IB_LOG_LEVEL_INFO, - "Setting file %s size to %lu MB", + "Setting file %s size to " ULINTPF " MB", name, - (ulong) (srv_data_file_sizes[i] + (srv_data_file_sizes[i] >> (20 - UNIV_PAGE_SIZE_SHIFT))); ret = os_file_set_size( @@ -1221,7 +1260,7 @@ srv_undo_tablespace_create( const char* name, /*!< in: tablespace name */ ulint size) /*!< in: tablespace size in pages */ { - os_file_t fh; + pfs_os_file_t fh; ibool ret; dberr_t err = DB_SUCCESS; @@ -1299,7 +1338,7 @@ srv_undo_tablespace_open( const char* name, /*!< in: tablespace name */ ulint space) /*!< in: tablespace id */ { - os_file_t fh; + pfs_os_file_t fh; dberr_t err = DB_ERROR; ibool ret; ulint flags; @@ -1404,13 +1443,23 @@ srv_undo_tablespaces_init( for (i = 0; create_new_db && i < n_conf_tablespaces; ++i) { char name[OS_FILE_MAX_PATH]; + ulint space_id = i + 1; + + DBUG_EXECUTE_IF("innodb_undo_upgrade", + space_id = i + 3;); ut_snprintf( name, sizeof(name), "%s%cundo%03lu", - srv_undo_dir, SRV_PATH_SEPARATOR, i + 1); + srv_undo_dir, SRV_PATH_SEPARATOR, space_id); + + if (i == 0) { + srv_undo_space_id_start = space_id; + prev_space_id = srv_undo_space_id_start - 1; + } + + undo_tablespace_ids[i] = space_id; - /* Undo space ids start from 1. */ err = srv_undo_tablespace_create( name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES); @@ -1432,14 +1481,16 @@ srv_undo_tablespaces_init( if (!create_new_db && !backup_mode) { n_undo_tablespaces = trx_rseg_get_n_undo_tablespaces( undo_tablespace_ids); - } else { - n_undo_tablespaces = n_conf_tablespaces; - for (i = 1; i <= n_undo_tablespaces; ++i) { - undo_tablespace_ids[i - 1] = i; + if (n_undo_tablespaces != 0) { + srv_undo_space_id_start = undo_tablespace_ids[0]; + prev_space_id = srv_undo_space_id_start - 1; } - undo_tablespace_ids[i] = ULINT_UNDEFINED; + } else { + n_undo_tablespaces = n_conf_tablespaces; + + undo_tablespace_ids[n_conf_tablespaces] = ULINT_UNDEFINED; } /* Open all the undo tablespaces that are currently in use. If we @@ -1463,8 +1514,6 @@ srv_undo_tablespaces_init( ut_a(undo_tablespace_ids[i] != 0); ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED); - /* Undo space ids start from 1. */ - err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]); if (err != DB_SUCCESS) { @@ -1499,11 +1548,23 @@ srv_undo_tablespaces_init( break; } + /** Note the first undo tablespace id in case of + no active undo tablespace. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = i; + } + ++n_undo_tablespaces; ++*n_opened; } + /** Explictly specify the srv_undo_space_id_start + as zero when there are no undo tablespaces. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = 0; + } + /* If the user says that there are fewer than what we find we tolerate that discrepancy but not the inverse. Because there could be unused undo tablespaces for future use. */ @@ -1548,10 +1609,11 @@ srv_undo_tablespaces_init( mtr_start(&mtr); /* The undo log tablespace */ - for (i = 1; i <= n_undo_tablespaces; ++i) { + for (i = 0; i < n_undo_tablespaces; ++i) { fsp_header_init( - i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + undo_tablespace_ids[i], + SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); } mtr_commit(&mtr); @@ -1626,12 +1688,10 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void) -/*====================================*/ +innobase_start_or_create_for_mysql() { - ibool create_new_db; - lsn_t min_flushed_lsn; - lsn_t max_flushed_lsn; + bool create_new_db; + lsn_t flushed_lsn; #ifdef UNIV_LOG_ARCHIVE lsn_t min_arch_log_no = LSN_MAX; lsn_t max_arch_log_no = LSN_MAX; @@ -1665,6 +1725,10 @@ innobase_start_or_create_for_mysql(void) /* This should be initialized early */ ut_init_timer(); + if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) { + srv_read_only_mode = 1; + } + high_level_read_only = srv_read_only_mode || srv_force_recovery > SRV_FORCE_NO_TRX_UNDO; @@ -2176,7 +2240,7 @@ innobase_start_or_create_for_mysql(void) #ifdef UNIV_LOG_ARCHIVE &min_arch_log_no, &max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &min_flushed_lsn, &max_flushed_lsn, + &flushed_lsn, &sum_of_new_sizes); if (err == DB_FAIL) { @@ -2220,12 +2284,12 @@ innobase_start_or_create_for_mysql(void) bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); err = create_log_files(create_new_db, logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2245,19 +2309,8 @@ innobase_start_or_create_for_mysql(void) if (err == DB_NOT_FOUND) { if (i == 0) { - if (max_flushed_lsn - != min_flushed_lsn) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create" - " log files because" - " data files are" - " corrupt or" - " not in sync" - " with each other"); - return(DB_ERROR); - } - if (max_flushed_lsn < (lsn_t) 1000) { + if (flushed_lsn < (lsn_t) 1000) { ib_logf(IB_LOG_LEVEL_ERROR, "Cannot create" " log files because" @@ -2272,14 +2325,14 @@ innobase_start_or_create_for_mysql(void) err = create_log_files( create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); if (err == DB_SUCCESS) { err = create_log_files_rename( logfilename, dirnamelen, - max_flushed_lsn, + flushed_lsn, logfile0); } @@ -2289,8 +2342,7 @@ innobase_start_or_create_for_mysql(void) /* Suppress the message about crash recovery. */ - max_flushed_lsn = min_flushed_lsn - = log_get_lsn(); + flushed_lsn = log_get_lsn(); goto files_checked; } else if (i < 2 && !IS_XTRABACKUP()) { /* must have at least 2 log files */ @@ -2420,9 +2472,23 @@ files_checked: mtr_start(&mtr); fsp_header_init(0, sum_of_new_sizes, &mtr); + compile_time_assert(TRX_SYS_SPACE == 0); + compile_time_assert(IBUF_SPACE_ID == 0); + + ulint ibuf_root = btr_create( + DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + 0, 0, DICT_IBUF_ID_MIN, + dict_ind_redundant, &mtr); mtr_commit(&mtr); + if (ibuf_root == FIL_NULL) { + return(srv_init_abort(true, __FILE__, __LINE__, + DB_ERROR)); + } + + ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO); + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2448,17 +2514,19 @@ files_checked: bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files(max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + return(err); + } err = create_log_files_rename(logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2513,7 +2581,7 @@ files_checked: err = recv_recovery_from_checkpoint_start( LOG_CHECKPOINT, LSN_MAX, - min_flushed_lsn, max_flushed_lsn); + flushed_lsn); if (err != DB_SUCCESS) { return(err); @@ -2696,7 +2764,7 @@ files_checked: DBUG_EXECUTE_IF("innodb_log_abort_1", return(DB_ERROR);); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); ib_logf(IB_LOG_LEVEL_WARN, "Resizing redo log from %u*%u to %u*%u pages" @@ -2705,7 +2773,7 @@ files_checked: (unsigned) srv_log_file_size, (unsigned) srv_n_log_files, (unsigned) srv_log_file_size_requested, - max_flushed_lsn); + flushed_lsn); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); @@ -2715,7 +2783,7 @@ files_checked: we need to explicitly flush the log buffers. */ fil_flush(SRV_LOG_SPACE_FIRST_ID); - ut_ad(max_flushed_lsn == log_get_lsn()); + ut_ad(flushed_lsn == log_get_lsn()); /* Prohibit redo log writes from any other threads until creating a log checkpoint at the @@ -2727,8 +2795,7 @@ files_checked: return(DB_ERROR);); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files( - max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;); @@ -2736,8 +2803,6 @@ files_checked: return(err); } - fil_flush_file_spaces(FIL_TABLESPACE); - /* Close and free the redo log files, so that we can replace them. */ fil_close_log_files(true); @@ -2754,28 +2819,23 @@ files_checked: srv_log_file_size = srv_log_file_size_requested; err = create_log_files(create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); } - /* create_log_files() can increase system lsn that is - why FIL_PAGE_FILE_FLUSH_LSN have to be updated */ - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); - fil_write_flushed_lsn_to_data_files(min_flushed_lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); - err = create_log_files_rename(logfilename, dirnamelen, log_get_lsn(), logfile0); + if (err != DB_SUCCESS) { return(err); } } - srv_startup_is_before_trx_rollback_phase = FALSE; recv_recovery_rollback_active(); + srv_startup_is_before_trx_rollback_phase = FALSE; /* It is possible that file_format tag has never been set. In this case we initialize it to minimum @@ -2815,10 +2875,9 @@ files_checked: /* fprintf(stderr, "Max allowed record size %lu\n", page_get_free_space_of_empty() / 2); */ - if (buf_dblwr == NULL) { - /* Create the doublewrite buffer to a new tablespace */ - - buf_dblwr_create(); + if (!buf_dblwr_create()) { + return(srv_init_abort(create_new_db, __FILE__, __LINE__, + DB_ERROR)); } /* Here the double write buffer has already been created and so @@ -2848,6 +2907,9 @@ files_checked: /* Can only happen if server is read only. */ ut_a(srv_read_only_mode); srv_undo_logs = ULONG_UNDEFINED; + } else if (srv_available_undo_logs < srv_undo_logs) { + /* Should due to out of file space. */ + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); } if (!srv_read_only_mode) { @@ -2905,6 +2967,16 @@ files_checked: srv_master_thread, NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS)); thread_started[1 + SRV_MAX_N_IO_THREADS] = true; + + srv_undo_sources = true; + /* Create the dict stats gathering thread */ + srv_dict_stats_thread_active = true; + dict_stats_thread_handle = os_thread_create( + dict_stats_thread, NULL, NULL); + dict_stats_thread_started = true; + + /* Create the thread that will optimize the FTS sub-system. */ + fts_optimize_init(); } if (!srv_read_only_mode @@ -2949,12 +3021,16 @@ files_checked: } - buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL); + buf_page_cleaner_is_active = true; + buf_flush_page_cleaner_thread_handle = os_thread_create( + buf_flush_page_cleaner_thread, NULL, NULL); buf_flush_page_cleaner_thread_started = true; - } - buf_flush_lru_manager_thread_handle = os_thread_create(buf_flush_lru_manager_thread, NULL, NULL); - buf_flush_lru_manager_thread_started = true; + buf_lru_manager_is_active = true; + buf_flush_lru_manager_thread_handle = os_thread_create( + buf_flush_lru_manager_thread, NULL, NULL); + buf_flush_lru_manager_thread_started = true; + } if (!srv_file_per_table && srv_pass_corrupt_table) { fprintf(stderr, "InnoDB: Warning:" @@ -3002,10 +3078,10 @@ files_checked: if (!wsrep_recovery) { #endif /* WITH_WSREP */ /* Create the buffer pool dump/load thread */ + srv_buf_dump_thread_active = true; buf_dump_thread_handle= os_thread_create(buf_dump_thread, NULL, NULL); - srv_buf_dump_thread_active = true; buf_dump_thread_started = true; #ifdef WITH_WSREP } else { @@ -3015,26 +3091,19 @@ files_checked: } #endif /* WITH_WSREP */ - /* Create the dict stats gathering thread */ - dict_stats_thread_handle = os_thread_create( - dict_stats_thread, NULL, NULL); - srv_dict_stats_thread_active = true; - dict_stats_thread_started = true; - - /* Create the thread that will optimize the FTS sub-system. */ - fts_optimize_init(); - /* Create thread(s) that handles key rotation */ fil_system_enter(); fil_crypt_threads_init(); fil_system_exit(); - } - /* Init data for datafile scrub threads */ - btr_scrub_init(); + /* Init data for datafile scrub threads */ + btr_scrub_init(); - /* Initialize online defragmentation. */ - btr_defragment_init(); + /* Initialize online defragmentation. */ + btr_defragment_init(); + btr_defragment_thread_active = true; + os_thread_create(btr_defragment_thread, NULL, NULL); + } srv_was_started = TRUE; @@ -3071,13 +3140,10 @@ srv_fts_close(void) } #endif -/****************************************************************//** -Shuts down the InnoDB database. -@return DB_SUCCESS or error code */ +/** Shut down InnoDB. */ UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void) -/*=============================*/ +void +innodb_shutdown() { ulint i; @@ -3087,15 +3153,20 @@ innobase_shutdown_for_mysql(void) "Shutting down an improperly started, " "or created database!"); } - - return(DB_SUCCESS); } - if (!srv_read_only_mode) { + if (srv_undo_sources) { + ut_ad(!srv_read_only_mode); /* Shutdown the FTS optimize sub system. */ fts_optimize_start_shutdown(); fts_optimize_end(); + dict_stats_shutdown(); + while (row_get_background_drop_list_len_low()) { + srv_wake_master_thread(); + os_thread_yield(); + } + srv_undo_sources = false; } /* 1. Flush the buffer pool to disk, write the current lsn to @@ -3199,11 +3270,10 @@ innobase_shutdown_for_mysql(void) if (!srv_read_only_mode) { dict_stats_thread_deinit(); fil_crypt_threads_cleanup(); + btr_scrub_cleanup(); + btr_defragment_shutdown(); } - /* Cleanup data for datafile scrubbing */ - btr_scrub_cleanup(); - #ifdef __WIN__ /* MDEV-361: ha_innodb.dll leaks handles on Windows MDEV-7403: should not pass recv_writer_thread_handle to @@ -3311,88 +3381,9 @@ innobase_shutdown_for_mysql(void) srv_start_has_been_called = FALSE; /* reset io_tid_i, in case current process does second innodb start (xtrabackup might do that).*/ io_tid_i = 0; - return(DB_SUCCESS); } #endif /* !UNIV_HOTBACKUP */ - -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ -UNIV_INTERN -void -srv_shutdown_table_bg_threads(void) -/*===============================*/ -{ - dict_table_t* table; - dict_table_t* first; - dict_table_t* last = NULL; - - mutex_enter(&dict_sys->mutex); - - /* Signal all threads that they should stop. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - first = table; - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_start_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (!next) { - last = table; - } - - table = next; - } - - /* We must release dict_sys->mutex here; if we hold on to it in the - loop below, we will deadlock if any of the background threads try to - acquire it (for example, the FTS thread by calling que_eval_sql). - - Releasing it here and going through dict_sys->table_LRU without - holding it is safe because: - - a) MySQL only starts the shutdown procedure after all client - threads have been disconnected and no new ones are accepted, so no - new tables are added or old ones dropped. - - b) Despite its name, the list is not LRU, and the order stays - fixed. - - To safeguard against the above assumptions ever changing, we store - the first and last items in the list above, and then check that - they've stayed the same below. */ - - mutex_exit(&dict_sys->mutex); - - /* Wait for the threads of each table to stop. This is not inside - the above loop, because by signaling all the threads first we can - overlap their shutting down delays. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - ut_a(first == table); - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (table == last) { - ut_a(!next); - } - - table = next; - } -} - /*****************************************************************//** Get the meta-data filename from the table name. */ UNIV_INTERN diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index 6692eef9fb0..37ac3c56fff 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -1236,6 +1236,7 @@ sync_thread_add_level( upgrading in innobase_start_or_create_for_mysql(). */ break; } + /* fall through */ case SYNC_MEM_POOL: case SYNC_MEM_HASH: case SYNC_RECV: @@ -1299,9 +1300,9 @@ sync_thread_add_level( } } ut_ad(found_current); - - /* fallthrough */ } + + /* fall through */ case SYNC_BUF_FLUSH_LIST: case SYNC_BUF_LRU_LIST: case SYNC_BUF_FREE_LIST: diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc index eacd9212d2f..0c9618d98eb 100644 --- a/storage/xtradb/trx/trx0i_s.cc +++ b/storage/xtradb/trx/trx0i_s.cc @@ -507,7 +507,9 @@ fill_trx_row( row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd); - stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); + stmt = trx->mysql_thd + ? innobase_get_stmt(trx->mysql_thd, &stmt_len) + : NULL; if (stmt != NULL) { char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1]; diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc index 7d35bb12093..df4a3217820 100644 --- a/storage/xtradb/trx/trx0purge.cc +++ b/storage/xtradb/trx/trx0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under @@ -247,6 +247,19 @@ trx_purge_add_update_undo_to_history( hist_size + undo->size, MLOG_4BYTES, mtr); } + /* Before any transaction-generating background threads or the + purge have been started, recv_recovery_rollback_active() can + start transactions in row_merge_drop_temp_indexes() and + fts_drop_orphaned_tables(), and roll back recovered transactions. + After the purge thread has been given permission to exit, + in fast shutdown, we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd(). */ + ut_ad(srv_undo_sources + || ((srv_startup_is_before_trx_rollback_phase + || trx_rollback_or_clean_is_active) + && purge_sys->state == PURGE_STATE_INIT) + || (trx->undo_no == 0 && srv_fast_shutdown)); + /* Add the log as the first in the history list */ flst_add_first(rseg_header + TRX_RSEG_HISTORY, undo_header + TRX_UNDO_HISTORY_NODE, mtr); @@ -685,7 +698,8 @@ trx_purge_get_rseg_with_min_trx_id( /* We assume in purge of externally stored fields that space id is in the range of UNDO tablespace space ids */ - ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open); + ut_a(purge_sys->rseg->space == 0 + || srv_is_undo_tablespace(purge_sys->rseg->space)); zip_size = purge_sys->rseg->zip_size; diff --git a/storage/xtradb/trx/trx0rec.cc b/storage/xtradb/trx/trx0rec.cc index 74a63b60286..8c0904dd57b 100644 --- a/storage/xtradb/trx/trx0rec.cc +++ b/storage/xtradb/trx/trx0rec.cc @@ -1186,10 +1186,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -1223,16 +1219,8 @@ trx_undo_report_row_operation( ut_a(dict_index_is_clust(index)); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); - if (flags & BTR_NO_UNDO_LOG_FLAG) { - - *roll_ptr = 0; - - return(DB_SUCCESS); - } - ut_ad(thr); - ut_ad((op_type != TRX_UNDO_INSERT_OP) - || (clust_entry && !update && !rec)); + ut_ad(!clust_entry || (!update && !rec)); trx = thr_get_trx(thr); @@ -1253,8 +1241,7 @@ trx_undo_report_row_operation( /* If the undo log is not assigned yet, assign one */ - switch (op_type) { - case TRX_UNDO_INSERT_OP: + if (clust_entry) { undo = trx->insert_undo; if (undo == NULL) { @@ -1270,10 +1257,7 @@ trx_undo_report_row_operation( ut_ad(err == DB_SUCCESS); } - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - + } else { undo = trx->update_undo; if (undo == NULL) { @@ -1297,23 +1281,15 @@ trx_undo_report_row_operation( buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE); do { - page_t* undo_page; - ulint offset; - - undo_page = buf_block_get_frame(undo_block); ut_ad(page_no == buf_block_get_page_no(undo_block)); - switch (op_type) { - case TRX_UNDO_INSERT_OP: - offset = trx_undo_page_report_insert( - undo_page, trx, index, clust_entry, &mtr); - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - offset = trx_undo_page_report_modify( + page_t* undo_page = buf_block_get_frame(undo_block); + ulint offset = clust_entry + ? trx_undo_page_report_insert( + undo_page, trx, index, clust_entry, &mtr) + : trx_undo_page_report_modify( undo_page, trx, index, rec, offsets, update, cmpl_info, &mtr); - } if (UNIV_UNLIKELY(offset == 0)) { /* The record did not fit on the page. We erase the @@ -1364,7 +1340,7 @@ trx_undo_report_row_operation( mutex_exit(&trx->undo_mutex); *roll_ptr = trx_undo_build_roll_ptr( - op_type == TRX_UNDO_INSERT_OP, + clust_entry != NULL, rseg->id, page_no, offset); return(DB_SUCCESS); } diff --git a/storage/xtradb/trx/trx0roll.cc b/storage/xtradb/trx/trx0roll.cc index d228743d300..335ef8859c4 100644 --- a/storage/xtradb/trx/trx0roll.cc +++ b/storage/xtradb/trx/trx0roll.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2016, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -818,6 +818,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -828,6 +829,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( trx_rollback_or_clean_is_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/xtradb/trx/trx0rseg.cc b/storage/xtradb/trx/trx0rseg.cc index 003d1036a8c..16fa334872b 100644 --- a/storage/xtradb/trx/trx0rseg.cc +++ b/storage/xtradb/trx/trx0rseg.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -293,14 +294,13 @@ trx_rseg_create_instance( } } -/********************************************************************* -Creates a rollback segment. -@return pointer to new rollback segment if create successful */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space) /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space) { mtr_t mtr; ulint slot_no; @@ -323,22 +323,21 @@ trx_rseg_create( page_no = trx_rseg_header_create( space, 0, ULINT_MAX, slot_no, &mtr); - ut_a(page_no != FIL_NULL); - - sys_header = trx_sysf_get(&mtr); + if (page_no != FIL_NULL) { + sys_header = trx_sysf_get(&mtr); - id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); - ut_a(id == space); + id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); + ut_a(id == space); - zip_size = space ? fil_space_get_zip_size(space) : 0; + zip_size = space ? fil_space_get_zip_size(space) : 0; - rseg = trx_rseg_mem_create( - slot_no, space, zip_size, page_no, - purge_sys->ib_bh, &mtr); + rseg = trx_rseg_mem_create( + slot_no, space, zip_size, page_no, + purge_sys->ib_bh, &mtr); + } } mtr_commit(&mtr); - return(rseg); } diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc index 558fe8a2c49..9accb4ef303 100644 --- a/storage/xtradb/trx/trx0sys.cc +++ b/storage/xtradb/trx/trx0sys.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1064,18 +1064,12 @@ trx_sys_create_rsegs( ulint new_rsegs = n_rsegs - n_used; for (i = 0; i < new_rsegs; ++i) { - ulint space; + ulint space_id; + space_id = (n_spaces == 0) ? 0 + : (srv_undo_space_id_start + i % n_spaces); - /* Tablespace 0 is the system tablespace. All UNDO - log tablespaces start from 1. */ - - if (n_spaces > 0) { - space = (i % n_spaces) + 1; - } else { - space = 0; /* System tablespace */ - } - - if (trx_rseg_create(space) != NULL) { + /* Tablespace 0 is the system tablespace. */ + if (trx_rseg_create(space_id) != NULL) { ++n_used; } else { break; diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc index d0cb4a883cc..1d2f7ada54e 100644 --- a/storage/xtradb/trx/trx0trx.cc +++ b/storage/xtradb/trx/trx0trx.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2015, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2283,6 +2284,7 @@ state_ok: } } #endif /* WITH_WSREP */ + /**********************************************************************//** Prints info about a transaction. Acquires and releases lock_sys->mutex and trx_sys->mutex. */ @@ -2744,4 +2746,3 @@ trx_start_for_ddl_low( ut_error; } - diff --git a/storage/xtradb/usr/usr0sess.cc b/storage/xtradb/usr/usr0sess.cc index ab7ba6bea09..e1bd71ff1a0 100644 --- a/storage/xtradb/usr/usr0sess.cc +++ b/storage/xtradb/usr/usr0sess.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,8 +49,6 @@ sess_open(void) sess->trx = trx_allocate_for_background(); sess->trx->sess = sess; - UT_LIST_INIT(sess->graphs); - return(sess); } diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 683c80f643d..56330bd68cb 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2009, 2017, MariaDB This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -5114,7 +5114,9 @@ static int my_uni_utf8_no_range(CHARSET_INFO *cs __attribute__((unused)), { /* Fall through all cases!!! */ case 3: r[2]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x800; + /* fall through */ case 2: r[1]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0xc0; + /* fall through */ case 1: r[0]= (uchar) wc; } return count; @@ -7497,8 +7499,11 @@ my_wc_mb_utf8mb4(CHARSET_INFO *cs __attribute__((unused)), switch (count) { /* Fall through all cases!!! */ case 4: r[3] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0x10000; + /* fall through */ case 3: r[2] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0x800; + /* fall through */ case 2: r[1] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0xc0; + /* fall through */ case 1: r[0] = (uchar) wc; } return count; @@ -7529,8 +7534,11 @@ my_wc_mb_utf8mb4_no_range(CHARSET_INFO *cs __attribute__((unused)), { /* Fall through all cases!!! */ case 4: r[3]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x10000; + /* fall through */ case 3: r[2]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0x800; + /* fall through */ case 2: r[1]= (uchar) (0x80 | (wc & 0x3f)); wc= wc >> 6; wc |= 0xc0; + /* fall through */ case 1: r[0]= (uchar) wc; } return count; diff --git a/strings/ctype.c b/strings/ctype.c index 12d511162d7..0aed6c8bf52 100644 --- a/strings/ctype.c +++ b/strings/ctype.c @@ -762,7 +762,7 @@ static int cs_value(MY_XML_PARSER *st,const char *attr, size_t len) /* Rules: Context */ case _CS_CONTEXT: - if (len < sizeof(i->context) + 1) + if (len < sizeof(i->context)) { memcpy(i->context, attr, len); i->context[len]= '\0'; diff --git a/strings/dtoa.c b/strings/dtoa.c index fdf7bceddfd..a16ec93d3eb 100644 --- a/strings/dtoa.c +++ b/strings/dtoa.c @@ -1,4 +1,5 @@ /* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2017, MariaDB Corporation. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -1377,7 +1378,7 @@ static double my_strtod_int(const char *s00, char **se, int *error, char *buf, s switch (*s) { case '-': sign= 1; - /* no break */ + /* fall through */ case '+': s++; goto break2; @@ -1467,6 +1468,7 @@ static double my_strtod_int(const char *s00, char **se, int *error, char *buf, s switch (c= *s) { case '-': esign= 1; + /* fall through */ case '+': c= *++s; } @@ -2360,7 +2362,7 @@ static char *dtoa(double dd, int mode, int ndigits, int *decpt, int *sign, break; case 2: leftright= 0; - /* no break */ + /* fall through */ case 4: if (ndigits <= 0) ndigits= 1; @@ -2368,7 +2370,7 @@ static char *dtoa(double dd, int mode, int ndigits, int *decpt, int *sign, break; case 3: leftright= 0; - /* no break */ + /* fall through */ case 5: i= ndigits + k + 1; ilim= i; diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 1b98622313e..b37e0e8bc2c 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -1096,6 +1096,7 @@ static void test_wl4435_2() \ rs_metadata= mysql_stmt_result_metadata(ps); \ fields= mysql_fetch_fields(rs_metadata); \ + mysql_free_result(rs_metadata); \ \ rc= mysql_stmt_bind_result(ps, &psp); \ check_execute(ps, rc); \ diff --git a/unittest/mysys/ma_dyncol-t.c b/unittest/mysys/ma_dyncol-t.c index b3fff638b65..3b43c10a6a8 100644 --- a/unittest/mysys/ma_dyncol-t.c +++ b/unittest/mysys/ma_dyncol-t.c @@ -687,6 +687,9 @@ void test_update_many(uint *column_numbers, uint *column_values, err: ok(rc, "%s", "update_many"); /* cleanup */ + free(val); + free(upd); + free(res); mariadb_dyncol_free(&str1); mariadb_dyncol_free(&str2); } |