summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2017-09-21 08:12:19 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2017-09-21 08:12:19 +0300
commite3d44f5d62672478c40294d6e46d9f066e418cac (patch)
tree44b4d1b9d94ced780d4f369f84eddc36791470be
parentd176be8aeaecdb20a963fbd126929ddcd5bd98f3 (diff)
parentf70865bc9e540767d8afc3cc2cbe0e1c92a8c0be (diff)
downloadmariadb-git-e3d44f5d62672478c40294d6e46d9f066e418cac.tar.gz
Merge bb-10.2-ext into 10.3
-rw-r--r--CMakeLists.txt6
-rw-r--r--client/mysql.cc1
-rw-r--r--cmake/CMakeLis.txt0
-rw-r--r--cmake/bison.cmake20
-rw-r--r--cmake/install_macros.cmake64
-rw-r--r--cmake/plugin.cmake11
-rw-r--r--cmake/sign.cmake.in18
-rw-r--r--cmake/submodules.cmake4
-rw-r--r--debian/control28
-rw-r--r--debian/libmariadb-dev.install2
-rw-r--r--debian/libmariadb-dev.links2
-rw-r--r--debian/libmariadb3.install4
-rw-r--r--debian/libmariadbclient18.install1
-rw-r--r--debian/libmysqlclient18.install1
-rwxr-xr-xdebian/rules1
-rw-r--r--extra/mariabackup/backup_mysql.cc2
-rw-r--r--extra/yassl/CMakeLists.txt5
-rw-r--r--extra/yassl/include/openssl/ssl.h2
-rw-r--r--extra/yassl/src/ssl.cpp2
-rw-r--r--extra/yassl/taocrypt/CMakeLists.txt4
-rw-r--r--include/CMakeLists.txt19
-rw-r--r--include/my_handler_errors.h26
-rw-r--r--include/ssl_compat.h23
-rw-r--r--libmysqld/CMakeLists.txt9
-rwxr-xr-xmysql-test/mysql-test-run.pl7
-rw-r--r--mysql-test/r/alter_table.result10
-rw-r--r--mysql-test/r/check_constraint.result10
-rw-r--r--mysql-test/r/commit_1innodb.result2
-rw-r--r--mysql-test/r/count_distinct.result25
-rw-r--r--mysql-test/r/create_drop_binlog.result2
-rw-r--r--mysql-test/r/create_drop_view.result2
-rw-r--r--mysql-test/r/date_formats.result15
-rw-r--r--mysql-test/r/drop.result8
-rw-r--r--mysql-test/r/func_math.result5
-rw-r--r--mysql-test/r/func_misc.result17
-rw-r--r--mysql-test/r/get_diagnostics.result2
-rw-r--r--mysql-test/r/grant.result2
-rw-r--r--mysql-test/r/insert.result28
-rw-r--r--mysql-test/r/locale.result18
-rw-r--r--mysql-test/r/log_tables-big.result9
-rw-r--r--mysql-test/r/partition_symlink.result6
-rw-r--r--mysql-test/r/profiling.result2
-rw-r--r--mysql-test/r/range_interrupted-13751.result16
-rw-r--r--mysql-test/r/select.result2
-rw-r--r--mysql-test/r/select_jcl6.result2
-rw-r--r--mysql-test/r/select_pkeycache.result2
-rw-r--r--mysql-test/r/signal.result34
-rw-r--r--mysql-test/r/signal_demo3.result42
-rw-r--r--mysql-test/r/sp-error.result4
-rw-r--r--mysql-test/r/sp-group.result2
-rw-r--r--mysql-test/r/sp.result4
-rw-r--r--mysql-test/r/symlink.result12
-rw-r--r--mysql-test/r/view.result2
-rw-r--r--mysql-test/r/warnings.result2
-rw-r--r--mysql-test/r/win.result85
-rw-r--r--mysql-test/r/win_as_arg_to_aggregate_func.result114
-rw-r--r--mysql-test/r/win_insert_select.result10
-rw-r--r--mysql-test/suite/encryption/r/innodb-spatial-index.result10
-rw-r--r--mysql-test/suite/encryption/t/innodb-spatial-index.test13
-rw-r--r--mysql-test/suite/federated/net_thd_crash-12951.result11
-rw-r--r--mysql-test/suite/federated/net_thd_crash-12951.test23
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_views.result6
-rw-r--r--mysql-test/suite/funcs_1/r/memory_views.result6
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_views-big.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug46000.result2
-rw-r--r--mysql-test/suite/innodb/r/row_format_redundant.result2
-rw-r--r--mysql-test/suite/innodb/t/log_data_file_size.test3
-rw-r--r--mysql-test/suite/innodb_gis/r/alter_spatial_index.result92
-rw-r--r--mysql-test/suite/innodb_gis/t/alter_spatial_index.test80
-rw-r--r--mysql-test/suite/innodb_zip/r/create_options.result4
-rw-r--r--mysql-test/suite/innodb_zip/r/wl6501_crash_4.result5
-rw-r--r--mysql-test/suite/innodb_zip/t/create_options.test2
-rw-r--r--mysql-test/suite/parts/inc/part_exch_drop_tabs.inc3
-rw-r--r--mysql-test/suite/parts/inc/part_exch_tabs.inc103
-rw-r--r--mysql-test/suite/parts/r/partition_exch_myisam_innodb.result55
-rw-r--r--mysql-test/suite/parts/r/partition_exch_qa_14.result310
-rw-r--r--mysql-test/suite/parts/r/partition_exch_qa_15.result57
-rw-r--r--mysql-test/suite/parts/r/partition_exch_qa_2.result63
-rw-r--r--mysql-test/suite/parts/r/partition_exch_qa_3.result155
-rw-r--r--mysql-test/suite/parts/r/partition_exch_qa_6.result57
-rw-r--r--mysql-test/suite/parts/t/partition_exch_myisam_innodb.test14
-rw-r--r--mysql-test/suite/parts/t/partition_exch_qa_14.test142
-rw-r--r--mysql-test/suite/parts/t/partition_exch_qa_15.test18
-rw-r--r--mysql-test/suite/parts/t/partition_exch_qa_2.test61
-rw-r--r--mysql-test/suite/parts/t/partition_exch_qa_3.test36
-rw-r--r--mysql-test/suite/parts/t/partition_exch_qa_6.test36
-rw-r--r--mysql-test/suite/perfschema/r/misc.result4
-rw-r--r--mysql-test/suite/perfschema/r/privilege.result32
-rw-r--r--mysql-test/suite/rpl/r/rpl_create_drop_view.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_sp.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_sp_variables.result28
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_errorhandling.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_sp_variables.test28
-rw-r--r--mysql-test/suite/sql_sequence/alter.result2
-rw-r--r--mysql-test/suite/sql_sequence/create.result14
-rw-r--r--mysql-test/suite/vcol/r/innodb_virtual_fk.result12
-rw-r--r--mysql-test/suite/vcol/r/update.result10
-rw-r--r--mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result23
-rw-r--r--mysql-test/suite/vcol/t/innodb_virtual_fk.test16
-rw-r--r--mysql-test/suite/vcol/t/update.test14
-rw-r--r--mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc10
-rw-r--r--mysql-test/t/alter_table.test2
-rw-r--r--mysql-test/t/check_constraint.test11
-rw-r--r--mysql-test/t/count_distinct.test29
-rw-r--r--mysql-test/t/date_formats.test9
-rw-r--r--mysql-test/t/func_math.test4
-rw-r--r--mysql-test/t/func_misc.test11
-rw-r--r--mysql-test/t/insert.test29
-rw-r--r--mysql-test/t/locale.test17
-rw-r--r--mysql-test/t/log_tables-big.test9
-rw-r--r--mysql-test/t/partition_symlink.test3
-rw-r--r--mysql-test/t/range_interrupted-13751.test25
-rw-r--r--mysql-test/t/symlink.test25
-rw-r--r--mysql-test/t/win.test71
-rw-r--r--mysql-test/t/win_as_arg_to_aggregate_func.test139
-rw-r--r--mysql-test/t/win_insert_select.test6
-rw-r--r--mysys_ssl/my_crypt.cc2
-rw-r--r--mysys_ssl/my_md5.cc2
-rw-r--r--mysys_ssl/openssl.c2
-rw-r--r--plugin/server_audit/server_audit.c10
-rw-r--r--scripts/wsrep_sst_rsync.sh2
-rw-r--r--sql/CMakeLists.txt68
-rw-r--r--sql/ha_partition.h4
-rw-r--r--sql/handler.cc2
-rw-r--r--sql/handler.h4
-rw-r--r--sql/item.cc5
-rw-r--r--sql/item_cmpfunc.cc10
-rw-r--r--sql/item_func.cc3
-rw-r--r--sql/item_strfunc.cc14
-rw-r--r--sql/item_strfunc.h18
-rw-r--r--sql/item_sum.cc23
-rw-r--r--sql/item_timefunc.cc16
-rw-r--r--sql/item_timefunc.h6
-rw-r--r--sql/mysqld.cc8
-rw-r--r--sql/share/errmsg-utf8.txt3
-rw-r--r--sql/sql_class.h1
-rw-r--r--sql/sql_partition.cc4
-rw-r--r--sql/sql_partition_admin.cc7
-rw-r--r--sql/sql_select.cc13
-rw-r--r--sql/sql_table.cc2
-rw-r--r--sql/sql_update.cc20
-rw-r--r--sql/sys_vars.cc11
-rw-r--r--sql/table.cc16
-rw-r--r--sql/uniques.cc16
-rw-r--r--sql/wsrep_sst.cc2
-rw-r--r--storage/connect/CMakeLists.txt82
-rw-r--r--storage/connect/ha_connect.cc56
-rw-r--r--storage/connect/ha_connect.h7
-rw-r--r--storage/connect/jsonudf.cpp2
-rw-r--r--storage/connect/mycat.cc11
-rw-r--r--storage/connect/myconn.cpp5
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_2.result17
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_3.result17
-rw-r--r--storage/connect/mysql-test/connect/r/json_mongo_c.result17
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_c.result2
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_2.result2
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_3.result2
-rw-r--r--storage/connect/mysql-test/connect/r/odbc_oracle.result32
-rw-r--r--storage/connect/mysql-test/connect/r/tbl_thread.result54
-rw-r--r--storage/connect/mysql-test/connect/std_data/Mongo2.jarbin3461358 -> 623907 bytes
-rw-r--r--storage/connect/mysql-test/connect/std_data/Mongo3.jarbin1705776 -> 1705776 bytes
-rw-r--r--storage/connect/mysql-test/connect/t/mongo_test.inc4
-rw-r--r--storage/connect/mysql-test/connect/t/tbl_thread.test34
-rw-r--r--storage/connect/myutil.cpp4
-rw-r--r--storage/connect/reldef.cpp2
-rw-r--r--storage/connect/reldef.h1
-rw-r--r--storage/connect/tabext.cpp95
-rw-r--r--storage/connect/tabext.h1
-rw-r--r--storage/connect/tabjson.cpp7
-rw-r--r--storage/connect/tabmysql.cpp17
-rw-r--r--storage/connect/tabmysql.h1
-rw-r--r--storage/connect/tabtbl.cpp58
-rw-r--r--storage/connect/tabtbl.h1
-rw-r--r--storage/connect/tabutil.cpp3
-rw-r--r--storage/connect/valblk.cpp13
-rw-r--r--storage/connect/valblk.h6
-rw-r--r--storage/connect/value.cpp101
-rw-r--r--storage/federated/ha_federated.cc69
-rw-r--r--storage/innobase/btr/btr0btr.cc57
-rw-r--r--storage/innobase/btr/btr0bulk.cc28
-rw-r--r--storage/innobase/btr/btr0cur.cc89
-rw-r--r--storage/innobase/btr/btr0defragment.cc1
-rw-r--r--storage/innobase/btr/btr0pcur.cc9
-rw-r--r--storage/innobase/btr/btr0sea.cc34
-rw-r--r--storage/innobase/data/data0data.cc11
-rw-r--r--storage/innobase/dict/dict0defrag_bg.cc4
-rw-r--r--storage/innobase/dict/dict0dict.cc28
-rw-r--r--storage/innobase/dict/dict0stats.cc12
-rw-r--r--storage/innobase/fts/fts0fts.cc10
-rw-r--r--storage/innobase/gis/gis0rtree.cc53
-rw-r--r--storage/innobase/gis/gis0sea.cc56
-rw-r--r--storage/innobase/handler/handler0alter.cc58
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc9
-rw-r--r--storage/innobase/include/data0data.h10
-rw-r--r--storage/innobase/include/dict0dict.h30
-rw-r--r--storage/innobase/include/dict0dict.ic14
-rw-r--r--storage/innobase/include/dict0mem.h4
-rw-r--r--storage/innobase/include/gis0rtree.ic3
-rw-r--r--storage/innobase/include/ha_prototypes.h1
-rw-r--r--storage/innobase/include/page0cur.ic4
-rw-r--r--storage/innobase/include/page0page.h318
-rw-r--r--storage/innobase/include/page0page.ic240
-rw-r--r--storage/innobase/include/rem0rec.h71
-rw-r--r--storage/innobase/lock/lock0lock.cc14
-rw-r--r--storage/innobase/mtr/mtr0log.cc9
-rw-r--r--storage/innobase/page/page0cur.cc56
-rw-r--r--storage/innobase/page/page0page.cc20
-rw-r--r--storage/innobase/page/page0zip.cc24
-rw-r--r--storage/innobase/rem/rem0cmp.cc1
-rw-r--r--storage/innobase/rem/rem0rec.cc126
-rw-r--r--storage/innobase/row/row0import.cc4
-rw-r--r--storage/innobase/row/row0ins.cc20
-rw-r--r--storage/innobase/row/row0log.cc14
-rw-r--r--storage/innobase/row/row0merge.cc4
-rw-r--r--storage/innobase/row/row0mysql.cc5
-rw-r--r--storage/innobase/row/row0purge.cc7
-rw-r--r--storage/innobase/row/row0row.cc6
-rw-r--r--storage/innobase/row/row0sel.cc70
-rw-r--r--storage/innobase/row/row0uins.cc2
-rw-r--r--storage/innobase/row/row0umod.cc4
-rw-r--r--storage/innobase/row/row0undo.cc2
-rw-r--r--storage/innobase/row/row0upd.cc17
-rw-r--r--storage/innobase/row/row0vers.cc27
-rw-r--r--storage/innobase/srv/srv0start.cc2
-rw-r--r--storage/innobase/trx/trx0i_s.cc3
-rw-r--r--storage/innobase/trx/trx0rec.cc4
-rw-r--r--storage/innobase/trx/trx0sys.cc10
-rw-r--r--storage/myisam/ha_myisam.cc15
-rw-r--r--storage/test_sql_discovery/mysql-test/sql_discovery/simple.result2
-rw-r--r--storage/tokudb/CMakeLists.txt9
-rw-r--r--storage/tokudb/PerconaFT/buildheader/make_tdb.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/ydb-internal.h9
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_cursor.cc108
-rw-r--r--storage/tokudb/ha_tokudb.cc8
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_row_log_tokudb.result315
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb-master.opt2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb.test14
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_per_db.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_per_db.test17
-rw-r--r--strings/CMakeLists.txt4
-rw-r--r--support-files/policy/apparmor/usr.sbin.mysqld2
-rw-r--r--vio/CMakeLists.txt4
-rw-r--r--zlib/CMakeLists.txt5
244 files changed, 3637 insertions, 2237 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 709ae826dc5..7b2da1b7d2a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -475,6 +475,12 @@ ENDIF()
INCLUDE(CPack)
+IF(WIN32 AND SIGNCODE)
+ # Configure post-install script for authenticode signing
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/cmake/sign.cmake.in sign.cmake @ONLY)
+ INSTALL(SCRIPT ${PROJECT_BINARY_DIR}/sign.cmake)
+ENDIF()
+
IF(NON_DISTRIBUTABLE_WARNING)
MESSAGE(WARNING "
You have linked MariaDB with GPLv3 libraries! You may not distribute the resulting binary. If you do, you will put yourself into a legal problem with Free Software Foundation.")
diff --git a/client/mysql.cc b/client/mysql.cc
index 4da4b7853ec..94269ccdaf7 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -1893,6 +1893,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
usage(1);
status.exit_status= 0;
mysql_end(-1);
+ break;
case 'I':
case '?':
usage(0);
diff --git a/cmake/CMakeLis.txt b/cmake/CMakeLis.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/cmake/CMakeLis.txt
diff --git a/cmake/bison.cmake b/cmake/bison.cmake
index a6a6268c644..0e4a7b58a13 100644
--- a/cmake/bison.cmake
+++ b/cmake/bison.cmake
@@ -47,6 +47,21 @@ MACRO (RUN_BISON input_yy output_cc output_h name_prefix)
ENDIF()
ENDIF()
IF(BISON_USABLE)
+ # Workaround for VS regenerating output even
+ # when outputs are up-to-date. At least, fix output timestamp
+ # after build so that files that depend on generated header are
+ # not rebuilt.
+ IF(CMAKE_GENERATOR MATCHES "Visual Studio")
+ FIND_PROGRAM(TOUCH_EXECUTABLE touch DOC "Path to touch executable"
+ PATHS "C:/Program Files/Git/usr/bin"
+ "C:/Program Files (x86)/Git/usr/bin")
+ IF(TOUCH_EXECUTABLE)
+ SET(VS_FIX_OUTPUT_TIMESTAMPS
+ COMMAND ${TOUCH_EXECUTABLE} -r ${input_yy} ${output_cc}
+ COMMAND ${TOUCH_EXECUTABLE} -r ${input_yy} ${output_h})
+ ENDIF()
+ ENDIF()
+
ADD_CUSTOM_COMMAND(
OUTPUT ${output_cc}
${output_h}
@@ -54,8 +69,9 @@ MACRO (RUN_BISON input_yy output_cc output_h name_prefix)
--output=${output_cc}
--defines=${output_h}
${input_yy}
- DEPENDS ${input_yy}
- )
+ ${VS_FIX_OUTPUT_TIMESTAMPS}
+ DEPENDS ${input_yy}
+ )
ELSE()
# Bison is missing or not usable, e.g too old
IF(EXISTS ${output_cc} AND EXISTS ${output_h})
diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake
index 88dc2a47c6b..5ca147d0d60 100644
--- a/cmake/install_macros.cmake
+++ b/cmake/install_macros.cmake
@@ -32,17 +32,8 @@ FUNCTION (INSTALL_DEBUG_SYMBOLS)
ENDIF()
SET(targets ${ARG_UNPARSED_ARGUMENTS})
FOREACH(target ${targets})
- GET_TARGET_PROPERTY(type ${target} TYPE)
- GET_TARGET_PROPERTY(location ${target} LOCATION)
- STRING(REPLACE ".exe" ".pdb" pdb_location ${location})
- STRING(REPLACE ".dll" ".pdb" pdb_location ${pdb_location})
- STRING(REPLACE ".lib" ".pdb" pdb_location ${pdb_location})
- IF(CMAKE_GENERATOR MATCHES "Visual Studio")
- STRING(REPLACE
- "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}"
- pdb_location ${pdb_location})
- ENDIF()
-
+ GET_TARGET_PROPERTY(target_type ${target} TYPE)
+
set(comp "")
IF(target MATCHES "mysqld" OR type MATCHES "MODULE")
@@ -61,11 +52,9 @@ FUNCTION (INSTALL_DEBUG_SYMBOLS)
IF(NOT comp)
SET(comp Debuginfo_archive_only) # not in MSI
ENDIF()
- IF(type MATCHES "STATIC")
- # PDB for static libraries might be unsupported http://public.kitware.com/Bug/view.php?id=14600
- SET(opt OPTIONAL)
- ENDIF()
- INSTALL(FILES ${pdb_location} DESTINATION ${ARG_INSTALL_LOCATION} COMPONENT ${comp} ${opt})
+ IF(NOT target_type MATCHES "STATIC")
+ INSTALL(FILES $<TARGET_PDB_FILE:${target}> DESTINATION ${ARG_INSTALL_LOCATION} COMPONENT ${comp})
+ ENDIF()
ENDFOREACH()
ENDIF()
ENDFUNCTION()
@@ -212,37 +201,22 @@ IF(WIN32)
ENDIF()
ENDIF()
-MACRO(SIGN_TARGET)
- CMAKE_PARSE_ARGUMENTS(ARG "" "COMPONENT" "" ${ARGN})
- SET(target ${ARG_UNPARSED_ARGUMENTS})
- IF(ARG_COMPONENT)
- SET(comp COMPONENT ${ARG_COMPONENT})
- ELSE()
- SET(comp)
- ENDIF()
- GET_TARGET_PROPERTY(target_type ${target} TYPE)
- IF(target_type AND NOT target_type MATCHES "STATIC")
- GET_TARGET_PROPERTY(target_location ${target} LOCATION)
- IF(CMAKE_GENERATOR MATCHES "Visual Studio")
- STRING(REPLACE "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}"
- target_location ${target_location})
- ENDIF()
- INSTALL(CODE
- "EXECUTE_PROCESS(COMMAND
- \"${SIGNTOOL_EXECUTABLE}\" verify /pa /q \"${target_location}\"
- RESULT_VARIABLE ERR)
- IF(NOT \${ERR} EQUAL 0)
- EXECUTE_PROCESS(COMMAND
- \"${SIGNTOOL_EXECUTABLE}\" sign ${SIGNTOOL_PARAMETERS} \"${target_location}\"
- RESULT_VARIABLE ERR)
+
+FUNCTION(SIGN_TARGET target)
+ IF(NOT SIGNCODE)
+ RETURN()
ENDIF()
- IF(NOT \${ERR} EQUAL 0)
- MESSAGE(FATAL_ERROR \"Error signing '${target_location}'\")
+ GET_TARGET_PROPERTY(target_type ${target} TYPE)
+ IF((NOT target_type) OR (target_type MATCHES "STATIC"))
+ RETURN()
ENDIF()
- " ${comp})
- ENDIF()
-ENDMACRO()
-
+ # Mark executable for signing by creating empty *.signme file
+ # The actual signing happens in preinstall step
+ # (which traverses
+ ADD_CUSTOM_COMMAND(TARGET ${target} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E touch "$<TARGET_FILE:${target}>.signme"
+ )
+ENDFUNCTION()
# Installs targets, also installs pdbs on Windows.
#
diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake
index e6bb64b19c6..2a2f206dcef 100644
--- a/cmake/plugin.cmake
+++ b/cmake/plugin.cmake
@@ -219,7 +219,8 @@ MACRO(MYSQL_ADD_PLUGIN)
# Install dynamic library
IF(ARG_COMPONENT)
IF(CPACK_COMPONENTS_ALL AND
- NOT CPACK_COMPONENTS_ALL MATCHES ${ARG_COMPONENT})
+ NOT CPACK_COMPONENTS_ALL MATCHES ${ARG_COMPONENT}
+ AND INSTALL_SYSCONF2DIR)
IF (ARG_STORAGE_ENGINE)
SET(ver " = %{version}-%{release}")
ELSE()
@@ -235,9 +236,11 @@ MACRO(MYSQL_ADD_PLUGIN)
SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_PROVIDES "cmake_bug_13248" PARENT_SCOPE)
SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_OBSOLETES "cmake_bug_13248" PARENT_SCOPE)
SET(CPACK_RPM_${ARG_COMPONENT}_USER_FILELIST ${ignored} PARENT_SCOPE)
- IF(NOT ARG_CLIENT AND NOT ARG_CONFIG AND UNIX)
- SET(ARG_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/${target}.cnf")
- FILE(WRITE ${ARG_CONFIG} "[mariadb]\nplugin-load-add=${ARG_MODULE_OUTPUT_NAME}.so\n")
+ IF(NOT ARG_CLIENT AND UNIX)
+ IF (NOT ARG_CONFIG)
+ SET(ARG_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/${target}.cnf")
+ FILE(WRITE ${ARG_CONFIG} "[mariadb]\nplugin-load-add=${ARG_MODULE_OUTPUT_NAME}.so\n")
+ ENDIF()
INSTALL(FILES ${ARG_CONFIG} COMPONENT ${ARG_COMPONENT} DESTINATION ${INSTALL_SYSCONF2DIR})
SET(CPACK_RPM_${ARG_COMPONENT}_USER_FILELIST ${ignored} "%config(noreplace) ${INSTALL_SYSCONF2DIR}/*" PARENT_SCOPE)
ENDIF()
diff --git a/cmake/sign.cmake.in b/cmake/sign.cmake.in
new file mode 100644
index 00000000000..61ae38d152d
--- /dev/null
+++ b/cmake/sign.cmake.in
@@ -0,0 +1,18 @@
+FILE(GLOB_RECURSE files "@CMAKE_BINARY_DIR@/*.signme")
+MESSAGE(STATUS "signing files")
+FOREACH(f ${files})
+ STRING(REPLACE ".signme" "" exe_location "${f}")
+
+ string (REPLACE ";" " " params "@SIGNTOOL_PARAMETERS@")
+ #MESSAGE("@SIGNTOOL_EXECUTABLE@" sign ${params} "${exe_location}")
+
+ EXECUTE_PROCESS(COMMAND
+ "@SIGNTOOL_EXECUTABLE@" sign @SIGNTOOL_PARAMETERS@ "${exe_location}"
+ RESULT_VARIABLE ERR)
+ IF(NOT ${ERR} EQUAL 0)
+ MESSAGE( "Error ${ERR} signing ${exe_location}")
+ ELSE()
+ FILE(REMOVE ${f})
+ ENDIF()
+
+ENDFOREACH()
diff --git a/cmake/submodules.cmake b/cmake/submodules.cmake
index 86e3d41eb77..672a3affc1d 100644
--- a/cmake/submodules.cmake
+++ b/cmake/submodules.cmake
@@ -7,7 +7,7 @@ IF(GIT_EXECUTABLE AND EXISTS "${CMAKE_SOURCE_DIR}/.git")
IF(git_config_get_result EQUAL 128 OR cmake_update_submodules MATCHES no)
SET(update_result 0)
ELSEIF (cmake_update_submodules MATCHES force)
- MESSAGE("-- Updating submodules (forced)")
+ MESSAGE(STATUS "Updating submodules (forced)")
EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --force
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
@@ -16,7 +16,7 @@ IF(GIT_EXECUTABLE AND EXISTS "${CMAKE_SOURCE_DIR}/.git")
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
ELSE()
- MESSAGE("-- Updating submodules")
+ MESSAGE(STATUS "Updating submodules")
EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
diff --git a/debian/control b/debian/control
index d51ec7cf13e..5f41323bb63 100644
--- a/debian/control
+++ b/debian/control
@@ -48,14 +48,11 @@ Conflicts: mariadb-galera-server-10.0 (<< 10.0.5),
mariadb-server-5.1,
mariadb-server-5.2,
mariadb-server-5.3,
- mariadb-server-5.5 (<< 5.5.33)
-Replaces: libmariadbclient18,
- libmysqlclient18,
- libmysqlclient19,
+ mariadb-server-5.5 (<< 5.5.33),
+ libmariadbclient18 (<< 10.2.0)
+Replaces: libmysqlclient19,
libmysqlclient20
-Provides: libmariadbclient18,
- libmysqlclient18,
- libmysqlclient19,
+Provides: libmysqlclient19,
libmysqlclient20
Description: MariaDB database client library
MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
@@ -69,27 +66,29 @@ Package: libmariadbclient18
Section: libs
Architecture: any
Depends: libmariadb3 (= ${binary:Version}), ${misc:Depends}
+Replaces: libmariadbclient18
+Provides: libmariadbclient18
Description: Virtual package to satisfy external depends
MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
server. SQL (Structured Query Language) is the most popular database query
language in the world. The main goals of MariaDB are speed, robustness and
ease of use.
.
- This is an empty package that depends on the libmariadb3
- package.
+ This package provides compatibility symlinks for libmariadb3
Package: libmysqlclient18
Section: libs
Architecture: any
Depends: libmariadb3 (= ${binary:Version}), ${misc:Depends}
+Replaces: libmysqlclient18
+Provides: libmysqlclient18
Description: Virtual package to satisfy external depends
MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
server. SQL (Structured Query Language) is the most popular database query
language in the world. The main goals of MariaDB are speed, robustness and
ease of use.
.
- This is an empty package that depends on the libmariadb3
- package.
+ This package provides compatibility symlinks for libmariadb3
Package: libmariadb-dev
Architecture: any
@@ -175,8 +174,7 @@ Description: MariaDB database common files (e.g. /etc/mysql/conf.d/mariadb.cnf)
Package: mariadb-client-core-10.3
Architecture: any
-Depends: libmariadb3 (>= ${source:Version}),
- mariadb-common (>= ${source:Version}),
+Depends: mariadb-common (>= ${source:Version}),
${misc:Depends},
${shlibs:Depends}
Conflicts: mariadb-client-10.0,
@@ -243,7 +241,6 @@ Description: MariaDB database core client binaries
Package: mariadb-client-10.3
Architecture: any
Depends: debianutils (>=1.6),
- libmariadb3 (>= ${source:Version}),
mariadb-client-core-10.3 (>= ${source:Version}),
mariadb-common,
${misc:Depends},
@@ -299,8 +296,7 @@ Description: MariaDB database client binaries
Package: mariadb-server-core-10.3
Architecture: any
-Depends: libmariadb3 (>= ${binary:Version}),
- mariadb-common (>= ${source:Version}),
+Depends: mariadb-common (>= ${source:Version}),
${misc:Depends},
${shlibs:Depends}
Conflicts: mariadb-server-core-10.0,
diff --git a/debian/libmariadb-dev.install b/debian/libmariadb-dev.install
index 25092700f15..e62aad1b43f 100644
--- a/debian/libmariadb-dev.install
+++ b/debian/libmariadb-dev.install
@@ -1,6 +1,8 @@
usr/bin/mysql_config
usr/include/mysql
usr/lib/*/libmariadb.so
+usr/lib/*/libmysqlclient.so
+usr/lib/*/libmariadbclient.so
usr/lib/*/libmariadbclient.a
usr/lib/*/libmysqlservices.a
usr/share/aclocal/mysql.m4
diff --git a/debian/libmariadb-dev.links b/debian/libmariadb-dev.links
deleted file mode 100644
index 0076791dcfa..00000000000
--- a/debian/libmariadb-dev.links
+++ /dev/null
@@ -1,2 +0,0 @@
-usr/lib/libmysqlclient.so.18 usr/lib/libmysqlclient.so
-usr/lib/libmysqlclient_r.so.18 usr/lib/libmysqlclient_r.so
diff --git a/debian/libmariadb3.install b/debian/libmariadb3.install
index cdb1e5918df..58e2bd1dcb2 100644
--- a/debian/libmariadb3.install
+++ b/debian/libmariadb3.install
@@ -1,5 +1,5 @@
-usr/lib/*/libmariadbclient.so.*
-usr/lib/*/libmysqlclient.so.*
+usr/lib/*/libmysqlclient.so.19
+usr/lib/*/libmysqlclient.so.20
usr/lib/*/libmariadb.so.*
usr/lib/mysql/plugin/dialog.so
usr/lib/mysql/plugin/mysql_clear_password.so
diff --git a/debian/libmariadbclient18.install b/debian/libmariadbclient18.install
new file mode 100644
index 00000000000..e3939fa4fc7
--- /dev/null
+++ b/debian/libmariadbclient18.install
@@ -0,0 +1 @@
+usr/lib/*/libmariadbclient.so.18
diff --git a/debian/libmysqlclient18.install b/debian/libmysqlclient18.install
new file mode 100644
index 00000000000..3193873093f
--- /dev/null
+++ b/debian/libmysqlclient18.install
@@ -0,0 +1 @@
+usr/lib/*/libmysqlclient.so.18
diff --git a/debian/rules b/debian/rules
index 09e91988207..cd910b5cc6d 100755
--- a/debian/rules
+++ b/debian/rules
@@ -162,6 +162,7 @@ override_dh_auto_install:
install -D -m 644 debian/mariadb-server-10.3.py $(TMP)/usr/share/apport/package-hooks/source_mariadb-10.3.py
# Install libmariadbclient18 compatibility links
+ ln -s /usr/lib/$(DEB_HOST_MULTIARCH)/libmariadb.so.3 $(TMP)/usr/lib/$(DEB_HOST_MULTIARCH)/libmariadbclient.so
ln -s /usr/lib/$(DEB_HOST_MULTIARCH)/libmariadb.so.3 $(TMP)/usr/lib/$(DEB_HOST_MULTIARCH)/libmariadbclient.so.18
# Install libmysqlclientclientXX compatibility links
diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc
index cebbbf3a28c..aa683e320fc 100644
--- a/extra/mariabackup/backup_mysql.cc
+++ b/extra/mariabackup/backup_mysql.cc
@@ -1647,7 +1647,7 @@ mdl_lock_init()
}
}
-#ifndef DBUF_OFF
+#ifndef DBUG_OFF
/* Test that table is really locked, if lock_ddl_per_table is set.
The test is executed in DBUG_EXECUTE_IF block inside mdl_lock_table().
*/
diff --git a/extra/yassl/CMakeLists.txt b/extra/yassl/CMakeLists.txt
index f3232896c6a..c456af9be15 100644
--- a/extra/yassl/CMakeLists.txt
+++ b/extra/yassl/CMakeLists.txt
@@ -30,8 +30,3 @@ SET(YASSL_SOURCES src/buffer.cpp src/cert_wrapper.cpp src/crypto_wrapper.cpp sr
ADD_CONVENIENCE_LIBRARY(yassl ${YASSL_SOURCES})
RESTRICT_SYMBOL_EXPORTS(yassl)
-IF(MSVC)
- INSTALL_DEBUG_TARGET(yassl DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
-
-
diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h
index 9ec99b46c1f..ede4581fa13 100644
--- a/extra/yassl/include/openssl/ssl.h
+++ b/extra/yassl/include/openssl/ssl.h
@@ -540,7 +540,7 @@ void MD5_Final(unsigned char*, MD5_CTX*);
/* yaSSL extensions */
int SSL_set_compression(SSL*); /* turn on yaSSL zlib compression */
-char *yaSSL_ASN1_TIME_to_string(ASN1_TIME *time, char *buf, size_t len);
+char *yaSSL_ASN1_TIME_to_string(const ASN1_TIME *time, char *buf, size_t len);
#include "transport_types.h"
diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp
index cb845a34c8a..51e1bd1ab21 100644
--- a/extra/yassl/src/ssl.cpp
+++ b/extra/yassl/src/ssl.cpp
@@ -1736,7 +1736,7 @@ unsigned long ERR_get_error()
// end stunnel needs
- char *yaSSL_ASN1_TIME_to_string(ASN1_TIME *time, char *buf, size_t len)
+ char *yaSSL_ASN1_TIME_to_string(const ASN1_TIME *time, char *buf, size_t len)
{
tm t;
static const char *month_names[12]=
diff --git a/extra/yassl/taocrypt/CMakeLists.txt b/extra/yassl/taocrypt/CMakeLists.txt
index eeed35fd6f4..7d95348c6e7 100644
--- a/extra/yassl/taocrypt/CMakeLists.txt
+++ b/extra/yassl/taocrypt/CMakeLists.txt
@@ -32,7 +32,3 @@ SET(TAOCRYPT_SOURCES src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp
ADD_CONVENIENCE_LIBRARY(taocrypt ${TAOCRYPT_SOURCES})
RESTRICT_SYMBOL_EXPORTS(taocrypt)
-IF(MSVC)
- INSTALL_DEBUG_TARGET(taocrypt DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
-
diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt
index e47522a4931..8af7a1281b1 100644
--- a/include/CMakeLists.txt
+++ b/include/CMakeLists.txt
@@ -93,3 +93,22 @@ IF(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
INSTALL_PRIVATE(${CMAKE_CURRENT_SOURCE_DIR})
ENDIF()
+MACRO(INSTALL_COMPAT_HEADER file footer)
+ INSTALL(CODE "FILE(WRITE \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${INSTALL_INCLUDEDIR}/${file}
+\"/* Do not edit this file directly, it was auto-generated by cmake */
+
+#warning This file should not be included by clients, include only <mysql.h>
+${footer}
+\")" COMPONENT Development)
+ENDMACRO()
+
+INSTALL_COMPAT_HEADER(my_global.h "")
+INSTALL_COMPAT_HEADER(my_config.h "")
+INSTALL_COMPAT_HEADER(my_sys.h "")
+INSTALL_COMPAT_HEADER(mysql_version.h "
+#include <mariadb_version.h>
+#define LIBMYSQL_VERSION MARIADB_CLIENT_VERSION_STR
+")
+INSTALL_COMPAT_HEADER(mysql_com.h "
+#include <mariadb_com.h>
+")
diff --git a/include/my_handler_errors.h b/include/my_handler_errors.h
index 4b85832a6de..f682861f3ab 100644
--- a/include/my_handler_errors.h
+++ b/include/my_handler_errors.h
@@ -24,11 +24,11 @@
static const char *handler_error_messages[]=
{
/* 120 */
- "Didn't find key on read or update",
+ "Didn't find the key on read or update",
"Duplicate key on write or update",
"Internal (unspecified) error in handler",
- "Someone has changed the row since it was read (while the table was locked to prevent it)",
- "Wrong index given to function",
+ "Someone has changed the row since it was read (even though the table was locked to prevent it)",
+ "Wrong index given to a function",
"Undefined handler error 125",
"Index is corrupted",
"Table file is corrupted",
@@ -36,7 +36,7 @@ static const char *handler_error_messages[]=
"Undefined handler error 129",
/* 130 */
"Incorrect file format",
- "Command not supported by database",
+ "Command not supported by the engine",
"Old database file",
"No record read before update",
"Record was already deleted (or record file crashed)",
@@ -47,7 +47,7 @@ static const char *handler_error_messages[]=
"Too big row",
/* 140 */
"Wrong create options",
- "Duplicate unique key or constraint on write or update",
+ "Duplicate unique key on write or update",
"Unknown character set used in table",
"Conflicting table definitions in sub-tables of MERGE table",
"Table is crashed and last repair failed",
@@ -62,13 +62,13 @@ static const char *handler_error_messages[]=
"Cannot delete a parent row",
"No savepoint with that name",
"Non unique key block size",
- "The table does not exist in engine",
- "The table already existed in storage engine",
- "Could not connect to storage engine",
+ "The table does not exist in the storage engine",
+ "The table already existed in the storage engine",
+ "Could not connect to the storage engine",
"Unexpected null pointer found when using spatial index",
- "The table changed in storage engine",
+ "The table changed in the storage engine",
/* 160 */
- "There's no partition in table for the given value",
+ "There's no partition in the table for the given value",
"Row-based binary logging of row failed",
"Index needed in foreign key constraint",
"Upholding foreign key constraints would lead to a duplicate key error in some other table",
@@ -77,13 +77,13 @@ static const char *handler_error_messages[]=
"Failed to get next auto increment value",
"Failed to set row auto increment value",
"Unknown (generic) error from engine",
- "Record was not update. Original values was same as new values",
+ "Record was not updated. New values were the same as original values",
/* 170 */
"It is not possible to log this statement",
"The event was corrupt, leading to illegal data being read",
"The table is of a new format not supported by this version",
"The event could not be processed. No other handler error happened",
- "Got a fatal error during initialization of handler",
+ "Fatal error during initialization of handler",
"File too short; Expected more data in file",
"Read page with wrong checksum",
"Too many active concurrent transactions",
@@ -105,7 +105,7 @@ static const char *handler_error_messages[]=
"Too many words in a FTS phrase or proximity search",
"Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.",
"Foreign key cascade delete/update exceeds max depth",
- "Tablespace is missing for table",
+ "Tablespace is missing for a table",
"Sequence has been run out",
"Sequence values are conflicting"
};
diff --git a/include/ssl_compat.h b/include/ssl_compat.h
index b0e3ed497cd..568af6e1db4 100644
--- a/include/ssl_compat.h
+++ b/include/ssl_compat.h
@@ -25,8 +25,8 @@
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
#define HAVE_OPENSSL11 1
+#define SSL_LIBRARY OpenSSL_version(OPENSSL_VERSION)
#define ERR_remove_state(X) ERR_clear_error()
-#define EVP_MD_CTX_cleanup(X) EVP_MD_CTX_reset(X)
#define EVP_CIPHER_CTX_SIZE 168
#define EVP_MD_CTX_SIZE 48
#undef EVP_MD_CTX_init
@@ -34,8 +34,23 @@
#undef EVP_CIPHER_CTX_init
#define EVP_CIPHER_CTX_init(X) do { bzero((X), EVP_CIPHER_CTX_SIZE); EVP_CIPHER_CTX_reset(X); } while(0)
+/*
+ Macros below are deprecated. OpenSSL 1.1 may define them or not,
+ depending on how it was built.
+*/
+#undef ERR_free_strings
+#define ERR_free_strings()
+#undef EVP_cleanup
+#define EVP_cleanup()
+#undef CRYPTO_cleanup_all_ex_data
+#define CRYPTO_cleanup_all_ex_data()
+#undef SSL_load_error_strings
+#define SSL_load_error_strings()
+
#else
#define HAVE_OPENSSL10 1
+#define SSL_LIBRARY SSLeay_version(SSLEAY_VERSION)
+
/*
Unfortunately RAND_bytes manual page does not provide any guarantees
in relation to blocking behavior. Here we explicitly use SSLeay random
@@ -51,6 +66,7 @@
#endif /* HAVE_OPENSSL11 */
#elif defined(HAVE_YASSL)
+#define SSL_LIBRARY "YaSSL " YASSL_VERSION
#define BN_free(X) do { } while(0)
#endif /* !defined(HAVE_YASSL) */
@@ -62,6 +78,11 @@
#define EVP_CIPHER_CTX_encrypting(ctx) ((ctx)->encrypt)
#define EVP_CIPHER_CTX_SIZE sizeof(EVP_CIPHER_CTX)
#define EVP_MD_CTX_SIZE sizeof(EVP_MD_CTX)
+
+#define EVP_MD_CTX_reset(X) EVP_MD_CTX_cleanup(X)
+#define EVP_CIPHER_CTX_reset(X) EVP_CIPHER_CTX_cleanup(X)
+#define X509_get0_notBefore(X) X509_get_notBefore(X)
+#define X509_get0_notAfter(X) X509_get_notAfter(X)
#endif
#ifdef __cplusplus
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index d85a9cb7c83..cdf1abcd89c 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -165,15 +165,6 @@ MERGE_LIBRARIES(mysqlserver STATIC ${EMBEDDED_LIBS}
INSTALL(FILES embedded_priv.h DESTINATION ${INSTALL_INCLUDEDIR}/server/private COMPONENT ${COMPONENT_MYSQLSERVER})
-# Visual Studio users need debug static library
-IF(MSVC)
- INSTALL_DEBUG_TARGET(mysqlserver DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
-
-IF(UNIX)
- INSTALL_DEBUG_TARGET(mysqlserver DESTINATION ${INSTALL_LIBDIR} RENAME
- ${CMAKE_STATIC_LIBRARY_PREFIX}mysqld-debug)
-ENDIF()
SET(CLIENT_API_FUNCTIONS_5_1
get_tty_password
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 1bbcfde9aa3..ee5c75cf0dd 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -739,8 +739,7 @@ sub run_test_server ($$$) {
# Repeat test $opt_repeat number of times
my $repeat= $result->{repeat} || 1;
- # Don't repeat if test was skipped
- if ($repeat < $opt_repeat && $result->{'result'} ne 'MTR_RES_SKIPPED')
+ if ($repeat < $opt_repeat)
{
$result->{retries}= 0;
$result->{rep_failures}++ if $result->{failures};
@@ -4559,8 +4558,8 @@ sub check_warnings ($) {
my $timeout= start_timer(check_timeout($tinfo));
+ my $result= 0;
while (1){
- my $result= 0;
my $proc= My::SafeProcess->wait_any_timeout($timeout);
mtr_report("Got $proc");
@@ -5779,7 +5778,7 @@ sub debugger_arguments {
$$exe= $debugger;
}
- elsif ( $debugger =~ /windbg/ )
+ elsif ( $debugger =~ /windbg|vsjitdebugger/ )
{
# windbg exe arg1 .. argn
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index f7f87b810ce..13fbe00f2e3 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -2228,12 +2228,11 @@ alter table t1 drop column a;
ERROR 42S22: Unknown column 'a' in 'CHECK'
alter table t1 drop column b, add column b bigint first;
ERROR 42S22: Unknown column 'b' in 'CHECK'
+alter table t1 drop column a, drop constraint constraint_1;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` int(11) DEFAULT NULL,
- `b` int(11) DEFAULT NULL,
- CONSTRAINT `CONSTRAINT_1` CHECK (`a` > `b`)
+ `b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 (a int, b int, check(a>0));
@@ -2265,12 +2264,11 @@ drop table t1;
create table t1 (a int, b int, c int, unique(a,b));
alter table t1 drop column a;
ERROR 42000: Key column 'a' doesn't exist in table
+alter table t1 drop column a, drop index a;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL,
- `c` int(11) DEFAULT NULL,
- UNIQUE KEY `a` (`a`,`b`)
+ `c` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
diff --git a/mysql-test/r/check_constraint.result b/mysql-test/r/check_constraint.result
index 308b6a2fc2b..525140d96e7 100644
--- a/mysql-test/r/check_constraint.result
+++ b/mysql-test/r/check_constraint.result
@@ -142,3 +142,13 @@ create table t1 (a int check (@b in (select user from mysql.user)));
ERROR HY000: Function or expression 'select ...' cannot be used in the CHECK clause of `a`
create table t1 (a int check (a > @b));
ERROR HY000: Function or expression '@b' cannot be used in the CHECK clause of `a`
+create table t1 (a int check (a = 1));
+insert t1 values (1);
+insert t1 values (2);
+ERROR 23000: CONSTRAINT `a` failed for `test`.`t1`
+insert t1 values (NULL);
+select * from t1;
+a
+1
+NULL
+drop table t1;
diff --git a/mysql-test/r/commit_1innodb.result b/mysql-test/r/commit_1innodb.result
index 258926e8c4b..ade8a4f7549 100644
--- a/mysql-test/r/commit_1innodb.result
+++ b/mysql-test/r/commit_1innodb.result
@@ -230,7 +230,7 @@ insert into t2 (a) values (1023);
do (f2(23));
Warnings:
Error 1062 Duplicate entry '23' for key 'a'
-Note 4091 At line 4 in test.f2
+Note 4092 At line 4 in test.f2
select * from t2;
a
1023
diff --git a/mysql-test/r/count_distinct.result b/mysql-test/r/count_distinct.result
index d55a232c715..760b2710586 100644
--- a/mysql-test/r/count_distinct.result
+++ b/mysql-test/r/count_distinct.result
@@ -106,3 +106,28 @@ count(distinct user_id)
17
drop table t1;
set @@tmp_table_size = default;
+create table t1 (
+a VARCHAR(1020),
+b int
+);
+insert into t1 values
+( 0 , 1 ),
+( 1 , 2 ),
+( 2 , 3 ),
+( 3 , 4 ),
+( 4 , 5 ),
+( 5 , 6 ),
+( 6 , 7 ),
+( 7 , 8 ),
+( 8 , 9 ),
+( 9 , 10 ),
+( 0 , 11 ),
+( 1 , 12 ),
+( 2 , 13 ),
+( 3 , 14 );
+set @@tmp_table_size=1024;
+select count(distinct a) from t1;
+count(distinct a)
+10
+drop table t1;
+set @@tmp_table_size = default;
diff --git a/mysql-test/r/create_drop_binlog.result b/mysql-test/r/create_drop_binlog.result
index 82133cd8e1d..79e0bdf5e20 100644
--- a/mysql-test/r/create_drop_binlog.result
+++ b/mysql-test/r/create_drop_binlog.result
@@ -160,7 +160,7 @@ Note 1050 Table 'v1' already exists
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # VER
diff --git a/mysql-test/r/create_drop_view.result b/mysql-test/r/create_drop_view.result
index c7185cff7d0..d23b9b713ad 100644
--- a/mysql-test/r/create_drop_view.result
+++ b/mysql-test/r/create_drop_view.result
@@ -55,5 +55,5 @@ id
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
DROP TABLE t1;
diff --git a/mysql-test/r/date_formats.result b/mysql-test/r/date_formats.result
index 8439c40fbf9..16dbdf7969c 100644
--- a/mysql-test/r/date_formats.result
+++ b/mysql-test/r/date_formats.result
@@ -558,3 +558,18 @@ SET NAMES latin1;
#
# End of 5.1 tests
#
+select time_format('2001-01-01 02:02:02', '%d.%m.%Y');
+time_format('2001-01-01 02:02:02', '%d.%m.%Y')
+NULL
+select time_format('2001-01-01 02:02:02', '%d %T');
+time_format('2001-01-01 02:02:02', '%d %T')
+NULL
+select time_format('01 02:02:02', '%d %T');
+time_format('01 02:02:02', '%d %T')
+NULL
+select time_format('01 02:02:02', '%T');
+time_format('01 02:02:02', '%T')
+26:02:02
+select time_format('2001-01-01 02:02:02', '%T');
+time_format('2001-01-01 02:02:02', '%T')
+02:02:02
diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result
index 08e6a19a9a3..3fd5370f470 100644
--- a/mysql-test/r/drop.result
+++ b/mysql-test/r/drop.result
@@ -209,10 +209,10 @@ Note 1051 Unknown table 'test.table1'
Note 1051 Unknown table 'test.table2'
DROP VIEW IF EXISTS view1,view2,view3,view4;
Warnings:
-Note 4089 Unknown VIEW: 'test.view1'
-Note 4089 Unknown VIEW: 'test.view2'
-Note 4089 Unknown VIEW: 'test.view3'
-Note 4089 Unknown VIEW: 'test.view4'
+Note 4090 Unknown VIEW: 'test.view1'
+Note 4090 Unknown VIEW: 'test.view2'
+Note 4090 Unknown VIEW: 'test.view3'
+Note 4090 Unknown VIEW: 'test.view4'
# Test error message when trigger does not find table
CREATE TABLE table1(a int);
diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result
index c99d7e62285..7f2ed1c2ff0 100644
--- a/mysql-test/r/func_math.result
+++ b/mysql-test/r/func_math.result
@@ -838,6 +838,11 @@ select 0=0, 0=-0, 0.0= -0.0, 0.0 = -(0.0), 0.0E1=-0.0E1, 0.0E1=-(0.0E1);
select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678');
CRC32(NULL) CRC32('') CRC32('MySQL') CRC32('mysql') CRC32('01234567') CRC32('012345678')
NULL 0 3259397556 2501908538 763378421 939184570
+explain extended select (3-2)+1, (3/2)*1, 3-(2+1), 3/(2*1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 select 3 - 2 + 1 AS `(3-2)+1`,3 / 2 * 1 AS `(3/2)*1`,3 - (2 + 1) AS `3-(2+1)`,3 / (2 * 1) AS `3/(2*1)`
#
# Start of 10.3 tests
#
diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result
index 39f8f41d1e7..299b6344192 100644
--- a/mysql-test/r/func_misc.result
+++ b/mysql-test/r/func_misc.result
@@ -1441,6 +1441,23 @@ release_lock('test')
# -- Done.
#
+# MDEV-13685 Can not replay binary log due to Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8mb4_general_ci,COERCIBLE) for operation 'concat'
+#
+SET NAMES utf8;
+SELECT COERCIBILITY(NAME_CONST('name','test'));
+COERCIBILITY(NAME_CONST('name','test'))
+2
+SELECT COERCIBILITY(NAME_CONST('name',TIME'00:00:00'));
+COERCIBILITY(NAME_CONST('name',TIME'00:00:00'))
+5
+SELECT COERCIBILITY(NAME_CONST('name',15));
+COERCIBILITY(NAME_CONST('name',15))
+5
+SELECT CONCAT(NAME_CONST('name',15),'오');
+CONCAT(NAME_CONST('name',15),'오')
+15오
+SET NAMES latin1;
+#
# Start of 10.2 tests
#
#
diff --git a/mysql-test/r/get_diagnostics.result b/mysql-test/r/get_diagnostics.result
index 01fed8c846b..a75b775297c 100644
--- a/mysql-test/r/get_diagnostics.result
+++ b/mysql-test/r/get_diagnostics.result
@@ -590,7 +590,7 @@ DROP PROCEDURE p1;
SHOW WARNINGS;
Level Code Message
Error 54321 MESSAGE_TEXT text
-Note 4091 At line 16 in test.p1
+Note 4092 At line 16 in test.p1
CREATE PROCEDURE p1()
BEGIN
DECLARE var INT;
diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result
index 258b8e84eb4..5b239f09172 100644
--- a/mysql-test/r/grant.result
+++ b/mysql-test/r/grant.result
@@ -1428,7 +1428,7 @@ Warnings:
Note 1305 FUNCTION test.test_function does not exist
drop view if exists v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
create table test (col1 varchar(30));
create function test_function() returns varchar(30)
begin
diff --git a/mysql-test/r/insert.result b/mysql-test/r/insert.result
index c9e3dc7b253..8e572c5fede 100644
--- a/mysql-test/r/insert.result
+++ b/mysql-test/r/insert.result
@@ -717,3 +717,31 @@ insert ignore into t1 values (1,12);
Warnings:
Warning 1062 Duplicate entry '1' for key 'f1'
DROP TABLE t1;
+#
+# MDEV-13290 Assertion Assertion `!is_set() || (m_status == DA_OK_BULK
+# && is_bulk_op())' or `! is_set()' failed
+#
+SET @save_mode= @@sql_mode;
+SET sql_mode= 'STRICT_ALL_TABLES';
+CREATE TABLE t1 (f1 INT DEFAULT 0, f2 INT);
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = 'x' WITH CHECK OPTION;
+REPLACE INTO v1 SET f2 = 1;
+ERROR 22007: Truncated incorrect DOUBLE value: 'x'
+SELECT * from t1;
+f1 f2
+drop view v1;
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = cast('' as decimal) WITH CHECK OPTION;
+REPLACE INTO v1 SET f2 = 1;
+ERROR 22007: Truncated incorrect DECIMAL value: ''
+SELECT * from t1;
+f1 f2
+drop view v1;
+SELECT 0,0 INTO OUTFILE 't1.txt';
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = 'x' WITH CHECK OPTION;
+LOAD DATA INFILE 't1.txt' INTO TABLE v1;
+ERROR 22007: Truncated incorrect DOUBLE value: 'x'
+SELECT * from t1;
+f1 f2
+drop view v1;
+drop table t1;
+SET @@sql_mode= @save_mode;
diff --git a/mysql-test/r/locale.result b/mysql-test/r/locale.result
index 95c1f51fd87..a02e80ed21e 100644
--- a/mysql-test/r/locale.result
+++ b/mysql-test/r/locale.result
@@ -223,5 +223,21 @@ SELECT * FROM non_existent;
ERROR 42S02: Table 'test.non_existent' doesn't exist
SET lc_time_names=@old_50915_lc_time_names;
#
-# End of 5.6 tests
+# End of 10.0 tests
+#
+#
+# End of 10.1 tests
+#
+create view v1 as select
+format(123456789,2) as b,
+format(123456789,2,'rm_CH') as b1;
+select * from v1;
+b b1
+123,456,789.00 123'456'789,00
+show create view v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select format(123456789,2) AS `b`,format(123456789,2,'rm_CH') AS `b1` utf8 utf8_general_ci
+drop view v1;
+#
+# End of 10.2 tests
#
diff --git a/mysql-test/r/log_tables-big.result b/mysql-test/r/log_tables-big.result
index 0a33510fe77..392a1bce234 100644
--- a/mysql-test/r/log_tables-big.result
+++ b/mysql-test/r/log_tables-big.result
@@ -1,3 +1,4 @@
+set @log_output.saved = @@global.log_output;
set @@global.log_output = 'TABLE';
connect con1,localhost,root,,;
connect con2,localhost,root,,;
@@ -11,21 +12,21 @@ set session long_query_time=1;
select get_lock('bug27638', 2);
get_lock('bug27638', 2)
0
-select if (query_time >= '00:00:01', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:00:01', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 2)';
qt sql_text
OK select get_lock('bug27638', 2)
select get_lock('bug27638', 60);
get_lock('bug27638', 60)
0
-select if (query_time >= '00:00:59', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:00:59', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 60)';
qt sql_text
OK select get_lock('bug27638', 60)
select get_lock('bug27638', 101);
get_lock('bug27638', 101)
0
-select if (query_time >= '00:01:40', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:01:40', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 101)';
qt sql_text
OK select get_lock('bug27638', 101)
@@ -36,4 +37,4 @@ release_lock('bug27638')
connection default;
disconnect con1;
disconnect con2;
-set @@global.log_output=default;
+set @@global.log_output = @log_output.saved;
diff --git a/mysql-test/r/partition_symlink.result b/mysql-test/r/partition_symlink.result
index 474dd5adf02..90048eb3438 100644
--- a/mysql-test/r/partition_symlink.result
+++ b/mysql-test/r/partition_symlink.result
@@ -33,11 +33,15 @@ t2 CREATE TABLE `t2` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQLTEST_VARDIR/tmp/' INDEX DIRECTORY='MYSQLTEST_VARDIR/tmp/'
INSERT INTO t1 VALUES (0), (1), (2);
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
-ERROR HY000: Tables have different definitions
ALTER TABLE t1 EXCHANGE PARTITION p2 WITH TABLE t2;
ERROR HY000: Tables have different definitions
+SELECT * FROM t1;
+a
+1
+2
SELECT * FROM t2;
a
+0
DROP TABLE t1, t2;
# Creating two non colliding tables mysqltest2.t1 and test.t1
# test.t1 have partitions in mysqltest2-directory!
diff --git a/mysql-test/r/profiling.result b/mysql-test/r/profiling.result
index f7568c1b0f7..9644a8afe8d 100644
--- a/mysql-test/r/profiling.result
+++ b/mysql-test/r/profiling.result
@@ -415,7 +415,7 @@ select @@profiling;
drop table if exists t1, t2, t3;
drop view if exists v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
drop function if exists f1;
set session profiling = OFF;
set global profiling_history_size= @start_value;
diff --git a/mysql-test/r/range_interrupted-13751.result b/mysql-test/r/range_interrupted-13751.result
new file mode 100644
index 00000000000..f4f995721ad
--- /dev/null
+++ b/mysql-test/r/range_interrupted-13751.result
@@ -0,0 +1,16 @@
+CREATE TABLE t1 (i INT AUTO_INCREMENT, c VARCHAR(1), KEY(i), KEY(c,i)) ENGINE=MyISAM;
+INSERT INTO t1 (c) VALUES ('a'),('b'),('c'),('d');
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+set @old_dbug=@@session.debug_dbug;
+set debug_dbug="+d,kill_join_init_read_record";
+SELECT 1 FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
+WHERE alias1.c = alias2.c OR alias1.i <= 1
+;
+ERROR 70100: Query execution was interrupted
+set debug_dbug=@old_dbug;
+DROP TABLE t1;
diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result
index 5859bd0fae6..c7156ddae91 100644
--- a/mysql-test/r/select.result
+++ b/mysql-test/r/select.result
@@ -4784,7 +4784,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 system NULL NULL NULL NULL 1 100.00
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using where
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + 1 + 1)
+Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + (1 + 1))
SELECT * FROM t2 LEFT JOIN t1 ON a = b + 1;
b a
2 3
diff --git a/mysql-test/r/select_jcl6.result b/mysql-test/r/select_jcl6.result
index c9d582bb75a..92be057c62c 100644
--- a/mysql-test/r/select_jcl6.result
+++ b/mysql-test/r/select_jcl6.result
@@ -4795,7 +4795,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 system NULL NULL NULL NULL 1 100.00
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using where
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + 1 + 1)
+Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + (1 + 1))
SELECT * FROM t2 LEFT JOIN t1 ON a = b + 1;
b a
2 3
diff --git a/mysql-test/r/select_pkeycache.result b/mysql-test/r/select_pkeycache.result
index 5859bd0fae6..c7156ddae91 100644
--- a/mysql-test/r/select_pkeycache.result
+++ b/mysql-test/r/select_pkeycache.result
@@ -4784,7 +4784,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 system NULL NULL NULL NULL 1 100.00
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using where
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + 1 + 1)
+Note 1003 select `test`.`t1`.`a` AS `a`,2 AS `b` from `test`.`t1` where `test`.`t1`.`a` = <cache>(2 + (1 + 1))
SELECT * FROM t2 LEFT JOIN t1 ON a = b + 1;
b a
2 3
diff --git a/mysql-test/r/signal.result b/mysql-test/r/signal.result
index 9b140b5b33b..671df4b7f17 100644
--- a/mysql-test/r/signal.result
+++ b/mysql-test/r/signal.result
@@ -1715,7 +1715,7 @@ show warnings $$
Level Code Message
Warning 1012 Raising a warning
Error 5555 RESIGNAL to not found
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1740,7 +1740,7 @@ show warnings $$
Level Code Message
Warning 1012 Raising a warning
Error 5555 RESIGNAL to error
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1789,7 +1789,7 @@ show warnings $$
Level Code Message
Error 1012 Raising a not found
Error 5555 RESIGNAL to not found
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1814,7 +1814,7 @@ show warnings $$
Level Code Message
Error 1012 Raising a not found
Error 5555 RESIGNAL to error
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1863,7 +1863,7 @@ show warnings $$
Level Code Message
Error 1012 Raising an error
Error 5555 RESIGNAL to not found
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1888,7 +1888,7 @@ show warnings $$
Level Code Message
Error 1012 Raising an error
Error 5555 RESIGNAL to error
-Note 4091 At line 9 in test.test_resignal
+Note 4092 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1931,7 +1931,7 @@ show warnings $$
Level Code Message
Warning 1264 Out of range value for column 'a' at row 1
Error 5555 RESIGNAL to a not found
-Note 4091 At line 8 in test.test_resignal
+Note 4092 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1953,7 +1953,7 @@ show warnings $$
Level Code Message
Warning 1264 Out of range value for column 'a' at row 1
Error 5555 RESIGNAL to an error
-Note 4091 At line 8 in test.test_resignal
+Note 4092 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -2004,7 +2004,7 @@ show warnings $$
Level Code Message
Error 1329 No data - zero rows fetched, selected, or processed
Error 5555 RESIGNAL to a not found
-Note 4091 At line 10 in test.test_resignal
+Note 4092 At line 10 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -2030,7 +2030,7 @@ show warnings $$
Level Code Message
Error 1329 No data - zero rows fetched, selected, or processed
Error 5555 RESIGNAL to an error
-Note 4091 At line 10 in test.test_resignal
+Note 4092 At line 10 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -2073,7 +2073,7 @@ show warnings $$
Level Code Message
Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to a not found
-Note 4091 At line 8 in test.test_resignal
+Note 4092 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -2095,7 +2095,7 @@ show warnings $$
Level Code Message
Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to an error
-Note 4091 At line 8 in test.test_resignal
+Note 4092 At line 8 in test.test_resignal
drop procedure test_resignal $$
#
# More complex cases
@@ -2142,7 +2142,7 @@ ERROR 42000: Hi, I am a useless error message
show warnings $$
Level Code Message
Error 9999 Hi, I am a useless error message
-Note 4091 At line 7 in test.peter_p2
+Note 4092 At line 7 in test.peter_p2
drop procedure peter_p1 $$
drop procedure peter_p2 $$
CREATE PROCEDURE peter_p1 ()
@@ -2198,16 +2198,16 @@ Level Code Message
Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 1232 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 9999 Variable 'sql_mode' can't be set to the value of 'NULL'
-Note 4091 At line 8 in test.peter_p1
+Note 4092 At line 8 in test.peter_p1
ERROR 42000: Hi, I am a useless error message
show warnings $$
Level Code Message
Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 1232 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 9999 Variable 'sql_mode' can't be set to the value of 'NULL'
-Note 4091 At line 8 in test.peter_p1
+Note 4092 At line 8 in test.peter_p1
Error 9999 Hi, I am a useless error message
-Note 4091 At line 10 in test.peter_p2
+Note 4092 At line 10 in test.peter_p2
drop procedure peter_p1 $$
drop procedure peter_p2 $$
drop procedure if exists peter_p3 $$
@@ -2225,7 +2225,7 @@ show warnings $$
Level Code Message
Error 1 Original
Error 2 Original
-Note 4091 At line 4 in test.peter_p3
+Note 4092 At line 4 in test.peter_p3
drop procedure peter_p3 $$
drop table t_warn;
drop table t_cursor;
diff --git a/mysql-test/r/signal_demo3.result b/mysql-test/r/signal_demo3.result
index 2e1943b546e..a98d587937c 100644
--- a/mysql-test/r/signal_demo3.result
+++ b/mysql-test/r/signal_demo3.result
@@ -79,23 +79,23 @@ show warnings;
Level Code Message
Error 1051 Unknown table 'demo.oops_it_is_not_here'
Error 1644 Oops in proc_9
-Note 4091 At line 4 in demo.proc_9
+Note 4092 At line 4 in demo.proc_9
Error 1644 Oops in proc_8
-Note 4091 At line 4 in demo.proc_8
+Note 4092 At line 4 in demo.proc_8
Error 1644 Oops in proc_7
-Note 4091 At line 4 in demo.proc_7
+Note 4092 At line 4 in demo.proc_7
Error 1644 Oops in proc_6
-Note 4091 At line 4 in demo.proc_6
+Note 4092 At line 4 in demo.proc_6
Error 1644 Oops in proc_5
-Note 4091 At line 4 in demo.proc_5
+Note 4092 At line 4 in demo.proc_5
Error 1644 Oops in proc_4
-Note 4091 At line 4 in demo.proc_4
+Note 4092 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
-Note 4091 At line 4 in demo.proc_3
+Note 4092 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
-Note 4091 At line 4 in demo.proc_2
+Note 4092 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
-Note 4091 At line 4 in demo.proc_1
+Note 4092 At line 4 in demo.proc_1
SET @@session.max_error_count = 5;
SELECT @@session.max_error_count;
@@session.max_error_count
@@ -104,11 +104,11 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Note 4091 At line 4 in demo.proc_3
+Note 4092 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
-Note 4091 At line 4 in demo.proc_2
+Note 4092 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
-Note 4091 At line 4 in demo.proc_1
+Note 4092 At line 4 in demo.proc_1
SET @@session.max_error_count = 7;
SELECT @@session.max_error_count;
@@session.max_error_count
@@ -117,13 +117,13 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Note 4091 At line 4 in demo.proc_4
+Note 4092 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
-Note 4091 At line 4 in demo.proc_3
+Note 4092 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
-Note 4091 At line 4 in demo.proc_2
+Note 4092 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
-Note 4091 At line 4 in demo.proc_1
+Note 4092 At line 4 in demo.proc_1
SET @@session.max_error_count = 9;
SELECT @@session.max_error_count;
@@session.max_error_count
@@ -132,15 +132,15 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Note 4091 At line 4 in demo.proc_5
+Note 4092 At line 4 in demo.proc_5
Error 1644 Oops in proc_4
-Note 4091 At line 4 in demo.proc_4
+Note 4092 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
-Note 4091 At line 4 in demo.proc_3
+Note 4092 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
-Note 4091 At line 4 in demo.proc_2
+Note 4092 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
-Note 4091 At line 4 in demo.proc_1
+Note 4092 At line 4 in demo.proc_1
drop database demo;
SET @@global.max_error_count = @start_global_value;
SELECT @@global.max_error_count;
diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result
index e3a02503ad0..f0bc1874850 100644
--- a/mysql-test/r/sp-error.result
+++ b/mysql-test/r/sp-error.result
@@ -1990,8 +1990,8 @@ Warning 1264 Out of range value for column 'a' at row 1
Note 1292 Truncated incorrect INTEGER value: '222222 '
Warning 1264 Out of range value for column 'b' at row 1
Error 1048 Column 'c' cannot be null
-Note 4091 At line 6 in test.t1_bi
-Note 4091 At line 2 in test.p1
+Note 4092 At line 6 in test.t1_bi
+Note 4092 At line 2 in test.p1
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/r/sp-group.result b/mysql-test/r/sp-group.result
index 3ed3f812267..535e67046d8 100644
--- a/mysql-test/r/sp-group.result
+++ b/mysql-test/r/sp-group.result
@@ -3,7 +3,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
drop view if exists view_t1;
Warnings:
-Note 4089 Unknown VIEW: 'test.view_t1'
+Note 4090 Unknown VIEW: 'test.view_t1'
SET sql_mode=ONLY_FULL_GROUP_BY;
CREATE TABLE t1 (
pk INT,
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index a1af26cdda0..cb2237699fb 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -3211,7 +3211,7 @@ drop procedure bug10961|
DROP PROCEDURE IF EXISTS bug6866|
DROP VIEW IF EXISTS tv|
Warnings:
-Note 4089 Unknown VIEW: 'test.tv'
+Note 4090 Unknown VIEW: 'test.tv'
DROP TABLE IF EXISTS tt1,tt2,tt3|
Warnings:
Note 1051 Unknown table 'test.tt1'
@@ -7823,7 +7823,7 @@ ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show warnings;
Level Code Message
Error 1062 Duplicate entry '2' for key 'PRIMARY'
-Note 4091 At line 5 in test.p1
+Note 4092 At line 5 in test.p1
select * from t1;
id
1
diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result
index 1764ea6a7dd..e8063ee736c 100644
--- a/mysql-test/r/symlink.result
+++ b/mysql-test/r/symlink.result
@@ -237,3 +237,15 @@ DROP DATABASE x;
CREATE TABLE test.t1(id INT(11)) ENGINE MYISAM
DATA DIRECTORY "MYSQLTEST_VARDIR/tmp";
DROP TABLE test.t1;
+use test;
+create table t1(c1 int, c2 int, c3 varchar(100)) engine=MyISAM data directory='MYSQL_TMP_DIR' index directory = 'MYSQL_TMP_DIR';
+insert t1 values (1,2,3), (2,3,4), (3,4,5), (4,5,6), (5,6,7), (6,7,8), (7,8,9);
+alter online table t1 delay_key_write=1;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` int(11) DEFAULT NULL,
+ `c2` int(11) DEFAULT NULL,
+ `c3` varchar(100) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1 DATA DIRECTORY='MYSQL_TMP_DIR/' INDEX DIRECTORY='MYSQL_TMP_DIR/'
+drop table t1;
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index 36abece7743..24c669308cd 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -5236,7 +5236,7 @@ CREATE TABLE t4 (i4 INT);
INSERT INTO t4 VALUES (1),(2);
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
CREATE VIEW v1 AS select coalesce(j1,i3) AS v1_field1 from t2 join t3 left join t1 on ( i1 = i2 );
CREATE VIEW v2 AS select v1_field1 from t4 join v1;
prepare my_stmt from "select v1_field1 from v2";
diff --git a/mysql-test/r/warnings.result b/mysql-test/r/warnings.result
index 400256f2ab7..5d805ac572a 100644
--- a/mysql-test/r/warnings.result
+++ b/mysql-test/r/warnings.result
@@ -353,7 +353,7 @@ ERROR 23000: Duplicate entry '11' for key 'a'
SHOW WARNINGS;
Level Code Message
-Note 4091 At line 4 in test.f1
+Note 4092 At line 4 in test.f1
Error 1062 Duplicate entry '11' for key 'a'
DROP TABLE t1;
diff --git a/mysql-test/r/win.result b/mysql-test/r/win.result
index 743513092e2..6434b26dba1 100644
--- a/mysql-test/r/win.result
+++ b/mysql-test/r/win.result
@@ -3186,9 +3186,6 @@ fld
2
DROP TABLE t1;
#
-# Start of 10.3 tests
-#
-#
# MDEV-13240 Wrong warning with MAX(datetime_field) OVER (...)
#
CREATE TABLE t1 (dt DATETIME);
@@ -3197,3 +3194,85 @@ SELECT MAX(dt) OVER (ORDER BY dt ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) FROM
MAX(dt) OVER (ORDER BY dt ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING)
NULL
DROP TABLE t1;
+#
+# MDEV-13358 FIRST_VALUE throws SQL Error (1292): Incorrect datetime value
+#
+CREATE TABLE IF NOT EXISTS `fv_test` (
+`SOME_DATE` datetime NOT NULL
+);
+INSERT INTO `fv_test` (`SOME_DATE`) VALUES ('2017-07-20 12:47:56');
+CREATE TABLE fv_result
+SELECT
+FIRST_VALUE(SOME_DATE) OVER(ORDER BY SOME_DATE DESC) AS somedate
+FROM fv_test;
+SHOW CREATE TABLE fv_result;
+Table Create Table
+fv_result CREATE TABLE `fv_result` (
+ `somedate` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM fv_result;
+somedate
+2017-07-20 12:47:56
+DROP TABLE fv_test, fv_result;
+#
+# MDEV-13649: Server crashes in set_field_to_null_with_conversions or in Field::set_notnull
+#
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (0),(1),(2);
+SELECT LEAD(a) OVER (PARTITION BY a) as lead,
+a AND LEAD(a) OVER (PARTITION BY a) AS a_and_lead_part
+FROM t1;
+lead a_and_lead_part
+NULL 0
+NULL NULL
+NULL NULL
+SELECT a OR LEAD(a) OVER (ORDER BY a) AS a_or_lead_order
+FROM t1
+ORDER BY a;
+a_or_lead_order
+1
+1
+1
+SELECT a AND LEAD(a) OVER (ORDER BY a) AS a_and_lead_order
+FROM t1
+ORDER BY a;
+a_and_lead_order
+0
+1
+NULL
+SELECT a XOR LEAD(a) OVER (ORDER BY a) AS a_xor_lead_order
+FROM t1
+ORDER BY a;
+a_xor_lead_order
+1
+0
+NULL
+SELECT NOT LEAD(a) OVER (ORDER BY a) AS not_lead_order
+FROM t1
+ORDER BY a;
+not_lead_order
+0
+0
+NULL
+SELECT LEAD(a) OVER (ORDER BY a) is not null AS is_not_null_lead_order
+FROM t1
+ORDER BY a;
+is_not_null_lead_order
+1
+1
+0
+drop table t1;
+#
+# MDEV-13354: Server crashes in find_field_in_tables upon PS with window function and subquery
+#
+CREATE TABLE t1 (i INT, a char);
+INSERT INTO t1 VALUES (1, 'a'),(2, 'b');
+PREPARE stmt FROM "SELECT row_number() over (partition by i order by i), i FROM (SELECT * from t1) as sq";
+EXECUTE stmt;
+row_number() over (partition by i order by i) i
+1 1
+1 2
+DROP TABLE t1;
+#
+# Start of 10.3 tests
+#
diff --git a/mysql-test/r/win_as_arg_to_aggregate_func.result b/mysql-test/r/win_as_arg_to_aggregate_func.result
new file mode 100644
index 00000000000..e3f0091d6d2
--- /dev/null
+++ b/mysql-test/r/win_as_arg_to_aggregate_func.result
@@ -0,0 +1,114 @@
+create table t1 (i int);
+insert into t1 values (5),(6),(0);
+#
+# Try out all set functions with window functions as arguments.
+# Any such usage should return an error.
+#
+select MIN( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select MIN(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select MAX( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select MAX(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select SUM( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select SUM(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select AVG( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select AVG(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select COUNT( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select COUNT(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select BIT_AND( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select BIT_OR( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select BIT_XOR( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select STD( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select STDDEV( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select STDDEV_POP( SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select STDDEV_SAMP(SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select VARIANCE(SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select VAR_POP(SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select VAR_SAMP(SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select GROUP_CONCAT(SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+select GROUP_CONCAT(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+#
+# Test that partition instead of order by in over doesn't change result.
+#
+select SUM( SUM(i) OVER (PARTITION BY i) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+#
+# Test that no arguments in OVER() clause lead to crash in this case.
+#
+select SUM( SUM(i) OVER () )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+drop table t1;
+#
+# MDEV-13774: Server Crash on Execuate of SQL Statement
+#
+create table t1 (i int);
+insert into t1 values (5),(6),(0);
+select SUM(
+IF( SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0
+AND
+SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,
+1,
+0) )
+from t1;
+ERROR HY000: Window functions can not be used as arguments to group functions.
+#
+# A way to get the aggregation result.
+#
+select i, IF(SUM(IF(i,1,0)) OVER (PARTITION BY i) > 0 AND SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,1,0) AS if_col
+from t1
+order by i;
+i if_col
+0 0
+5 1
+6 1
+select sum(if_col)
+from (select IF(SUM(IF(i,1,0)) OVER (PARTITION BY i) > 0 AND SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,1,0) AS if_col
+from t1) tmp;
+sum(if_col)
+2
+drop table t1;
diff --git a/mysql-test/r/win_insert_select.result b/mysql-test/r/win_insert_select.result
index e04088bd96d..bb25d052840 100644
--- a/mysql-test/r/win_insert_select.result
+++ b/mysql-test/r/win_insert_select.result
@@ -1,16 +1,16 @@
CREATE TABLE t1 (c1 INT, c2 VARCHAR(30));
PREPARE populate_table FROM "INSERT into t1 values (1, 'manual_insert_1'),
(4, 'manual_insert_2')";
-INSERT INTO t1 SELECT row_number() over(), "should_have_NULL" FROM t1;
-INSERT INTO t1 SELECT 1 + row_number() over(), "should_have_2" FROM t1;
+INSERT INTO t1 SELECT row_number() over(), "should_not_add_any_rows" FROM t1;
+INSERT INTO t1 SELECT 1 + row_number() over(), "should_not_add_any_rows" FROM t1;
EXECUTE populate_table;
-INSERT INTO t1 SELECT 10 + row_number() over(), "should repeat 4 times [11-14]" FROM t1;
+INSERT INTO t1 SELECT 10 + row_number() over(), "should repeat 2 times [11-12]" FROM t1;
SELECT c1, c2 FROM t1 ORDER BY c2, c1;
c1 c2
1 manual_insert_1
4 manual_insert_2
-11 should repeat 4 times [11-14]
-12 should repeat 4 times [11-14]
+11 should repeat 2 times [11-12]
+12 should repeat 2 times [11-12]
DELETE FROM t1;
EXECUTE populate_table;
INSERT INTO t1
diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index.result b/mysql-test/suite/encryption/r/innodb-spatial-index.result
index 852be0b9a73..c2a41ac4c2e 100644
--- a/mysql-test/suite/encryption/r/innodb-spatial-index.result
+++ b/mysql-test/suite/encryption/r/innodb-spatial-index.result
@@ -9,10 +9,14 @@ ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong creat
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=YES ENGINE=INNODB;
-CREATE SPATIAL INDEX b on t1(coordinate);
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate);
+ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), ALGORITHM=COPY;
ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), FORCE, ALGORITHM=INPLACE;
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
+ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
+CREATE SPATIAL INDEX b on t1(coordinate);
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=DEFAULT ENGINE=INNODB;
diff --git a/mysql-test/suite/encryption/t/innodb-spatial-index.test b/mysql-test/suite/encryption/t/innodb-spatial-index.test
index de78461c765..6b6191c69cb 100644
--- a/mysql-test/suite/encryption/t/innodb-spatial-index.test
+++ b/mysql-test/suite/encryption/t/innodb-spatial-index.test
@@ -31,12 +31,17 @@ DROP TABLE t1;
#
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=YES ENGINE=INNODB;
+# FIXME: MDEV-13851 Encrypted table refuses some form of ALGORITHM=COPY,
+# but allows rebuild by FORCE
--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
--error ER_CANT_CREATE_TABLE
+ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), ALGORITHM=COPY;
+--error ER_ILLEGAL_HA_CREATE_OPTION
+ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), FORCE, ALGORITHM=INPLACE;
+--error ER_ILLEGAL_HA_CREATE_OPTION
+ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
+--error ER_ILLEGAL_HA_CREATE_OPTION
CREATE SPATIAL INDEX b on t1(coordinate);
---replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate);
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
@@ -69,4 +74,4 @@ INSERT INTO t2 values(1, 'secret', ST_GeomFromText('POINT(903994614 180726515)')
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION > 0;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
-DROP TABLE t1, t2; \ No newline at end of file
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/federated/net_thd_crash-12951.result b/mysql-test/suite/federated/net_thd_crash-12951.result
new file mode 100644
index 00000000000..573ac96efff
--- /dev/null
+++ b/mysql-test/suite/federated/net_thd_crash-12951.result
@@ -0,0 +1,11 @@
+set global query_cache_size= 16*1024*1024;
+set global query_cache_type= 1;
+create table t1 (i int) engine=innodb;
+create table t2 (i int) engine=federated
+CONNECTION="mysql://root@localhost:MASTER_MYPORT/test/t1";
+select * from t2;
+i
+drop table t2;
+drop table t1;
+set global query_cache_type= default;
+set global query_cache_size= default;
diff --git a/mysql-test/suite/federated/net_thd_crash-12951.test b/mysql-test/suite/federated/net_thd_crash-12951.test
new file mode 100644
index 00000000000..81cd826686e
--- /dev/null
+++ b/mysql-test/suite/federated/net_thd_crash-12951.test
@@ -0,0 +1,23 @@
+#
+# MDEV-12951 Server crash [mysqld got exception 0xc0000005]
+#
+
+--source include/have_innodb.inc
+
+set global query_cache_size= 16*1024*1024;
+set global query_cache_type= 1;
+
+create table t1 (i int) engine=innodb;
+--replace_result $MASTER_MYPORT MASTER_MYPORT
+eval create table t2 (i int) engine=federated
+ CONNECTION="mysql://root@localhost:$MASTER_MYPORT/test/t1";
+
+select * from t2;
+
+source include/restart_mysqld.inc;
+
+drop table t2;
+drop table t1;
+
+set global query_cache_type= default;
+set global query_cache_size= default;
diff --git a/mysql-test/suite/funcs_1/r/innodb_views.result b/mysql-test/suite/funcs_1/r/innodb_views.result
index 8b861011dd3..5a27cb65b3d 100644
--- a/mysql-test/suite/funcs_1/r/innodb_views.result
+++ b/mysql-test/suite/funcs_1/r/innodb_views.result
@@ -4314,7 +4314,7 @@ CREATE VIEW v2 AS Select * from test.v1;
ERROR 42S02: Table 'test.v1' doesn't exist
DROP VIEW IF EXISTS v2;
Warnings:
-Note 4089 Unknown VIEW: 'test.v2'
+Note 4090 Unknown VIEW: 'test.v2'
Testcase 3.3.1.25
--------------------------------------------------------------------------------
@@ -7566,7 +7566,7 @@ Call sp1() ;
ERROR 42000: PROCEDURE test.sp1 does not exist
Drop view if exists test.v1 ;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Drop procedure sp1 ;
ERROR 42000: PROCEDURE test.sp1 does not exist
@@ -21312,7 +21312,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Testcase 3.3.1.68
--------------------------------------------------------------------------------
diff --git a/mysql-test/suite/funcs_1/r/memory_views.result b/mysql-test/suite/funcs_1/r/memory_views.result
index 082c8aeb5f3..a2af9082c72 100644
--- a/mysql-test/suite/funcs_1/r/memory_views.result
+++ b/mysql-test/suite/funcs_1/r/memory_views.result
@@ -4315,7 +4315,7 @@ CREATE VIEW v2 AS Select * from test.v1;
ERROR 42S02: Table 'test.v1' doesn't exist
DROP VIEW IF EXISTS v2;
Warnings:
-Note 4089 Unknown VIEW: 'test.v2'
+Note 4090 Unknown VIEW: 'test.v2'
Testcase 3.3.1.25
--------------------------------------------------------------------------------
@@ -7567,7 +7567,7 @@ Call sp1() ;
ERROR 42000: PROCEDURE test.sp1 does not exist
Drop view if exists test.v1 ;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Drop procedure sp1 ;
ERROR 42000: PROCEDURE test.sp1 does not exist
@@ -21314,7 +21314,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Testcase 3.3.1.68
--------------------------------------------------------------------------------
diff --git a/mysql-test/suite/funcs_1/r/myisam_views-big.result b/mysql-test/suite/funcs_1/r/myisam_views-big.result
index 949aaabf864..3290b3dd36a 100644
--- a/mysql-test/suite/funcs_1/r/myisam_views-big.result
+++ b/mysql-test/suite/funcs_1/r/myisam_views-big.result
@@ -4784,7 +4784,7 @@ CREATE VIEW v2 AS Select * from test.v1;
ERROR 42S02: Table 'test.v1' doesn't exist
DROP VIEW IF EXISTS v2;
Warnings:
-Note 4089 Unknown VIEW: 'test.v2'
+Note 4090 Unknown VIEW: 'test.v2'
Testcase 3.3.1.25
--------------------------------------------------------------------------------
@@ -8387,7 +8387,7 @@ Call sp1() ;
ERROR 42000: PROCEDURE test.sp1 does not exist
Drop view if exists test.v1 ;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Drop procedure sp1 ;
ERROR 42000: PROCEDURE test.sp1 does not exist
@@ -22989,7 +22989,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
-Note 4089 Unknown VIEW: 'test.v1'
+Note 4090 Unknown VIEW: 'test.v1'
Testcase 3.3.1.68
--------------------------------------------------------------------------------
diff --git a/mysql-test/suite/innodb/r/innodb_bug46000.result b/mysql-test/suite/innodb/r/innodb_bug46000.result
index 7c5ef13f3dc..86e8766fb8f 100644
--- a/mysql-test/suite/innodb/r/innodb_bug46000.result
+++ b/mysql-test/suite/innodb/r/innodb_bug46000.result
@@ -6,7 +6,7 @@ show warnings;
Level Code Message
Warning 1280 Cannot Create Index with name 'GEN_CLUST_INDEX'. The name is reserved for the system default primary index.
Error 1280 Incorrect index name 'GEN_CLUST_INDEX'
-Warning 1030 Got error 124 "Wrong index given to function" from storage engine InnoDB
+Warning 1030 Got error 124 "Wrong index given to a function" from storage engine InnoDB
create table bug46000(id int) engine=innodb;
create index GEN_CLUST_INDEX on bug46000(id);
ERROR 42000: Incorrect index name 'GEN_CLUST_INDEX'
diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result
index 2bc8769092f..a2d5bbef8df 100644
--- a/mysql-test/suite/innodb/r/row_format_redundant.result
+++ b/mysql-test/suite/innodb/r/row_format_redundant.result
@@ -67,7 +67,7 @@ SELECT COUNT(*) FROM t3;
COUNT(*)
0
RENAME TABLE t1 TO tee_one;
-ERROR HY000: Error on rename of './test/t1' to './test/tee_one' (errno: 155 "The table does not exist in engine")
+ERROR HY000: Error on rename of './test/t1' to './test/tee_one' (errno: 155 "The table does not exist in the storage engine")
DROP TABLE t1;
Warnings:
Warning 1932 Table 'test.t1' doesn't exist in engine
diff --git a/mysql-test/suite/innodb/t/log_data_file_size.test b/mysql-test/suite/innodb/t/log_data_file_size.test
index 7928fc45520..f01e013ddfa 100644
--- a/mysql-test/suite/innodb/t/log_data_file_size.test
+++ b/mysql-test/suite/innodb/t/log_data_file_size.test
@@ -64,6 +64,9 @@ truncate(FILE, $page_size * 4);
close FILE;
open(FILE, "+<", "$ENV{'MYSQLD_DATADIR'}test/ibd4f.ibd") or die;
truncate(FILE, $page_size * 4 + 1234);
+# Work around MDEV-12699 and ensure that the truncated page is all-zero.
+sysseek(FILE, $page_size * 4, 0);
+syswrite(FILE, chr(0) x 1234);
close FILE;
open(FILE, "+<", "$ENV{'MYSQLD_DATADIR'}test/ibd5.ibd") or die;
truncate(FILE, $page_size * 5);
diff --git a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
index abc3c5b5f05..17f1f7e1b06 100644
--- a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
+++ b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
@@ -47,10 +47,20 @@ VALUES(10,ST_GeomFromText('POINT(160 160)'),ST_GeomFromText('LINESTRING(140 140,
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'),
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
ALTER TABLE tab ADD SPATIAL INDEX idx2(c2 ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL KEY idx3(c3 DESC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL INDEX idx4(c4 ASC) COMMENT 'testing spatial index on Polygon';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL KEY idx5(c5 ASC) COMMENT 'testing spatial index on Geometry';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD INDEX idx6(c4(10)) USING BTREE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1);
c1 ST_Astext(c2) ST_Astext(c4)
@@ -140,8 +150,14 @@ c1 ST_Astext(c2) ST_Astext(c4)
1 POINT(1000 1000) POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
ALTER TABLE tab CHANGE COLUMN c2 c22 POINT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab CHANGE COLUMN c3 c33 LINESTRING NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab CHANGE COLUMN c4 c44 POLYGON NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -166,8 +182,14 @@ tab 1 idx4 1 c44 A # 32 NULL SPATIAL testing spatial index on Polygon
tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry
tab 1 idx6 1 c44 A # 10 NULL BTREE
ALTER TABLE tab CHANGE COLUMN c22 c2 POINT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab CHANGE COLUMN c33 c3 LINESTRING NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab CHANGE COLUMN c44 c4 POLYGON NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -210,7 +232,11 @@ DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1);
c1 ST_Astext(c2) ST_Astext(c4)
ALTER TABLE tab DROP PRIMARY KEY;
+affected rows: 4
+info: Records: 4 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD PRIMARY KEY(c2) ;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1);
c1 ST_Astext(c2) ST_Astext(c4)
@@ -254,7 +280,11 @@ SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) OR
c1 ST_Astext(c2) ST_Astext(c4)
INSERT INTO tab SELECT * FROM tab1;
ALTER TABLE tab DROP PRIMARY KEY;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
ALTER TABLE tab DROP INDEX idx2;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
CREATE TEMPORARY TABLE temp_tab AS SELECT * FROM tab where c1 = c2;
ERROR HY000: Illegal parameter data types int and geometry for operation '='
@@ -309,8 +339,14 @@ tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry
tab 1 idx6 1 c4 A # 10 NULL BTREE
DELETE FROM tab;
ALTER TABLE tab ADD PRIMARY KEY(c2);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
CREATE SPATIAL INDEX idx2 ON tab(c2 ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD CONSTRAINT const_1 UNIQUE(c2);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -342,9 +378,17 @@ ST_GeomFromText('POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))'),
ST_GeomFromText('POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))'));
DELETE FROM tab;
ALTER TABLE tab DROP PRIMARY KEY ;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab DROP KEY const_1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD PRIMARY KEY(c5(10));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD CONSTRAINT const_1 UNIQUE(c5(10));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -422,13 +466,25 @@ ANALYZE TABLE tab;
Table Op Msg_type Msg_text
test.tab analyze status OK
ALTER TABLE tab ADD SPATIAL INDEX idx2(c2 ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL KEY idx3(c3 DESC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL INDEX idx4(c4 ASC) COMMENT 'testing spatial index on Polygon';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD SPATIAL KEY idx5(c5 ASC) COMMENT 'testing spatial index on Geometry';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab ADD INDEX idx6(c4(10)) USING BTREE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab MODIFY COLUMN c2 GEOMETRY NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab add COLUMN c8 POINT NOT NULL, ALGORITHM = INPLACE, LOCK=NONE;
-ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Do not support online operation on table with GIS index. Try ALGORITHM=COPY
+ERROR 0A000: LOCK=NONE is not supported. Reason: Do not support online operation on table with GIS index. Try LOCK=SHARED
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -462,6 +518,8 @@ DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c2) ST_Astext(c4)
ALTER TABLE tab MODIFY COLUMN c4 GEOMETRY NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -510,8 +568,14 @@ test.tab analyze status OK
SET @g1 = ST_GeomFromText('POLYGON((4010 4010,4020 4020,4030 4030,4040 4030,4020 4010,4010 4010))');
SET @g2 = ST_GeomFromText('LINESTRING(1 1,2 2,3 3)');
ALTER TABLE tab MODIFY COLUMN c2 POINT NOT NULL;
+affected rows: 8
+info: Records: 8 Duplicates: 0 Warnings: 0
ALTER TABLE tab MODIFY COLUMN c3 LINESTRING NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE tab MODIFY COLUMN c4 POLYGON NOT NULL;
+affected rows: 8
+info: Records: 8 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -588,7 +652,11 @@ DROP TABLE tab;
CREATE TABLE parent (id POINT, PRIMARY KEY(id)) ENGINE=InnoDB;
CREATE TABLE child (id GEOMETRY NOT NULL, parent_id POINT NOT NULL) ENGINE=InnoDB;
ALTER TABLE parent ADD SPATIAL INDEX idx1(id ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE child ADD SPATIAL INDEX idx2(parent_id ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE parent;
Table Create Table
parent CREATE TABLE `parent` (
@@ -616,7 +684,11 @@ DROP table child,parent;
CREATE TABLE parent (id GEOMETRY, PRIMARY KEY(id(10))) ENGINE=InnoDB;
CREATE TABLE child (id GEOMETRY NOT NULL, parent_id GEOMETRY NOT NULL) ENGINE=InnoDB;
ALTER TABLE parent ADD SPATIAL INDEX idx1(id ASC) ;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE child ADD SPATIAL INDEX idx2(parent_id ASC);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE parent;
Table Create Table
parent CREATE TABLE `parent` (
@@ -644,11 +716,12 @@ create table t1 (c1 int) engine=innodb;
insert into t1 values(NULL);
alter table t1 add b geometry, add spatial index(b), algorithm=inplace;
ERROR 42000: All parts of a SPATIAL index must be NOT NULL
-alter table t1 add b geometry, algorithm=inplace;
-update t1 set b = st_geomfromtext('point(0 0)');
-alter table t1 add spatial index(b), algorithm=inplace;
-ERROR 42000: All parts of a SPATIAL index must be NOT NULL
-delete from t1;
+alter table t1 add b geometry not null, add spatial index(b), algorithm=inplace;
+ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
+alter table t1 add b geometry not null default st_geomfromtext('POINT(0 0)'),
+add spatial index(b), algorithm=inplace;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
DROP table t1;
create table t1 (c1 int) engine=innodb;
insert into t1 values(NULL);
@@ -656,11 +729,8 @@ alter table t1 add b geometry, add spatial index(b), algorithm=copy;
ERROR 42000: All parts of a SPATIAL index must be NOT NULL
alter table t1 add b geometry not null, add spatial index(b), algorithm=copy;
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
-update t1 set b = st_geomfromtext('point(0 0)');
-ERROR 42S22: Unknown column 'b' in 'field list'
-alter table t1 add spatial index(b), algorithm=copy;
-ERROR 42000: Key column 'b' doesn't exist in table
-delete from t1;
+alter table t1 add b geometry not null default st_geomfromtext('POINT(0 0)'),
+add spatial index(b), algorithm=copy;
DROP table t1;
#
# BUG#20111575 ALTER TABLE...ADD SPATIAL INDEX...LOCK NONE IS REFUSED
diff --git a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
index 48052da021f..2b834ac69a6 100644
--- a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
+++ b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
@@ -86,6 +86,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
+--enable_info
ALTER TABLE tab ADD SPATIAL INDEX idx2(c2 ASC);
ALTER TABLE tab ADD SPATIAL KEY idx3(c3 DESC);
@@ -95,7 +96,7 @@ ALTER TABLE tab ADD SPATIAL INDEX idx4(c4 ASC) COMMENT 'testing spatial index on
ALTER TABLE tab ADD SPATIAL KEY idx5(c5 ASC) COMMENT 'testing spatial index on Geometry';
ALTER TABLE tab ADD INDEX idx6(c4(10)) USING BTREE;
-
+--disable_info
# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
@@ -195,22 +196,26 @@ SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1);
DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
+--enable_info
ALTER TABLE tab CHANGE COLUMN c2 c22 POINT NOT NULL;
ALTER TABLE tab CHANGE COLUMN c3 c33 LINESTRING NOT NULL;
ALTER TABLE tab CHANGE COLUMN c4 c44 POLYGON NOT NULL;
+--disable_info
SHOW CREATE TABLE tab;
--replace_column 7 #
SHOW INDEX FROM tab;
+--enable_info
ALTER TABLE tab CHANGE COLUMN c22 c2 POINT NOT NULL;
ALTER TABLE tab CHANGE COLUMN c33 c3 LINESTRING NOT NULL;
ALTER TABLE tab CHANGE COLUMN c44 c4 POLYGON NOT NULL;
+--disable_info
SHOW CREATE TABLE tab;
@@ -234,9 +239,11 @@ DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1);
+--enable_info
ALTER TABLE tab DROP PRIMARY KEY;
ALTER TABLE tab ADD PRIMARY KEY(c2) ;
+--disable_info
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
@@ -298,9 +305,11 @@ SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) OR
INSERT INTO tab SELECT * FROM tab1;
+--enable_info
ALTER TABLE tab DROP PRIMARY KEY;
ALTER TABLE tab DROP INDEX idx2;
+--disable_info
# Check spatial index on temp tables
--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
@@ -346,11 +355,13 @@ SHOW INDEX FROM tab;
DELETE FROM tab;
+--enable_info
ALTER TABLE tab ADD PRIMARY KEY(c2);
CREATE SPATIAL INDEX idx2 ON tab(c2 ASC);
ALTER TABLE tab ADD CONSTRAINT const_1 UNIQUE(c2);
+--disable_info
SHOW CREATE TABLE tab;
@@ -366,6 +377,7 @@ ST_GeomFromText('POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))'));
DELETE FROM tab;
+--enable_info
ALTER TABLE tab DROP PRIMARY KEY ;
ALTER TABLE tab DROP KEY const_1;
@@ -373,6 +385,7 @@ ALTER TABLE tab DROP KEY const_1;
ALTER TABLE tab ADD PRIMARY KEY(c5(10));
ALTER TABLE tab ADD CONSTRAINT const_1 UNIQUE(c5(10));
+--disable_info
SHOW CREATE TABLE tab;
@@ -449,6 +462,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ANALYZE TABLE tab;
+--enable_info
ALTER TABLE tab ADD SPATIAL INDEX idx2(c2 ASC);
ALTER TABLE tab ADD SPATIAL KEY idx3(c3 DESC);
@@ -470,6 +484,7 @@ ALTER TABLE tab MODIFY COLUMN c2 GEOMETRY NOT NULL;
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tab add COLUMN c8 POINT NOT NULL, ALGORITHM = INPLACE, LOCK=NONE;
+--disable_info
SHOW CREATE TABLE tab;
@@ -487,10 +502,12 @@ DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
+--enable_info
# --error ER_CANT_CREATE_GEOMETRY_OBJECT
# ALTER TABLE tab MODIFY COLUMN c2 POLYGON NOT NULL;
ALTER TABLE tab MODIFY COLUMN c4 GEOMETRY NOT NULL;
+--disable_info
SHOW CREATE TABLE tab;
@@ -524,11 +541,13 @@ SET @g1 = ST_GeomFromText('POLYGON((4010 4010,4020 4020,4030 4030,4040 4030,4020
SET @g2 = ST_GeomFromText('LINESTRING(1 1,2 2,3 3)');
# When Point type data exist in the column allow DDL operation
+--enable_info
ALTER TABLE tab MODIFY COLUMN c2 POINT NOT NULL;
ALTER TABLE tab MODIFY COLUMN c3 LINESTRING NOT NULL;
ALTER TABLE tab MODIFY COLUMN c4 POLYGON NOT NULL;
+--disable_info
SHOW CREATE TABLE tab;
@@ -575,6 +594,7 @@ DELETE FROM tab WHERE ST_Touches(tab.c4, @g1) OR ST_Touches(tab.c3,@g2);
SELECT c1,ST_Astext(c2),ST_AsText(c3),ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1)
OR ST_Touches(tab.c3,@g2);
+--enable_info
# --error ER_SPATIAL_MUST_HAVE_GEOM_COL
--error ER_WRONG_ARGUMENTS
ALTER TABLE tab MODIFY COLUMN c4 INT NOT NULL;
@@ -582,6 +602,7 @@ ALTER TABLE tab MODIFY COLUMN c4 INT NOT NULL;
# --error ER_SPATIAL_MUST_HAVE_GEOM_COL
--error ER_WRONG_ARGUMENTS
ALTER TABLE tab MODIFY COLUMN c4 BLOB NOT NULL;
+--disable_info
# Test InnoDB to Myisam to InnoDB
ALTER TABLE tab ENGINE Myisam;
@@ -619,9 +640,11 @@ CREATE TABLE parent (id POINT, PRIMARY KEY(id)) ENGINE=InnoDB;
CREATE TABLE child (id GEOMETRY NOT NULL, parent_id POINT NOT NULL) ENGINE=InnoDB;
+--enable_info
ALTER TABLE parent ADD SPATIAL INDEX idx1(id ASC);
ALTER TABLE child ADD SPATIAL INDEX idx2(parent_id ASC);
+--disable_info
SHOW CREATE TABLE parent;
@@ -650,9 +673,11 @@ CREATE TABLE parent (id GEOMETRY, PRIMARY KEY(id(10))) ENGINE=InnoDB;
CREATE TABLE child (id GEOMETRY NOT NULL, parent_id GEOMETRY NOT NULL) ENGINE=InnoDB;
+--enable_info
ALTER TABLE parent ADD SPATIAL INDEX idx1(id ASC) ;
ALTER TABLE child ADD SPATIAL INDEX idx2(parent_id ASC);
+--disable_info
SHOW CREATE TABLE parent;
@@ -682,29 +707,13 @@ alter table t1 add b geometry, add spatial index(b), algorithm=inplace;
# Add spatial index fail, since there's invalid geo data.
# The case has to be commented because it no longer fails and following cases
# don't expect the effect of such a statement.
-#--error ER_CANT_CREATE_GEOMETRY_OBJECT
-# alter table t1 add b geometry not null, add spatial index(b), algorithm=inplace;
-
-# Add a geometry column.
-alter table t1 add b geometry, algorithm=inplace;
-
-# Add spatial index fail, since there's a NULL or invalid geo data.
-# The case has to be commented because it no longer fails and following cases
-# don't expect the effect of such a statement.
-#--error ER_CANT_CREATE_GEOMETRY_OBJECT
-#alter table t1 add spatial index(b), algorithm=inplace;
-
-# Update invalide geo data to point(0 0).
-update t1 set b = st_geomfromtext('point(0 0)');
-
-# Add spatial index success.
---error ER_SPATIAL_CANT_HAVE_NULL
-alter table t1 add spatial index(b), algorithm=inplace;
-
-# Delete rows.
-delete from t1;
+--error ER_CANT_CREATE_GEOMETRY_OBJECT
+ alter table t1 add b geometry not null, add spatial index(b), algorithm=inplace;
-#cleanup
+--enable_info
+alter table t1 add b geometry not null default st_geomfromtext('POINT(0 0)'),
+add spatial index(b), algorithm=inplace;
+--disable_info
DROP table t1;
# Check add spatial index when table already has rows (copy).
@@ -716,32 +725,11 @@ insert into t1 values(NULL);
alter table t1 add b geometry, add spatial index(b), algorithm=copy;
# Add spatial index fail, since there's a NULL or invalid geo data.
-# --error ER_INVALID_USE_OF_NULL
--error ER_CANT_CREATE_GEOMETRY_OBJECT
alter table t1 add b geometry not null, add spatial index(b), algorithm=copy;
-# Add a geometry column.
-# --error ER_INVALID_USE_OF_NULL
-# alter table t1 add b geometry not null, algorithm=copy;
-
-# Add spatial index.
-# The case has to be commented because it no longer fails and following cases
-# don't expect the effect of such a statement.
-#--error ER_CANT_CREATE_GEOMETRY_OBJECT
-#alter table t1 add spatial index(b), algorithm=copy;
-
-# Update invalide geo data to point(0 0).
---error ER_BAD_FIELD_ERROR
-update t1 set b = st_geomfromtext('point(0 0)');
-
-# Add spatial index success.
---error ER_KEY_COLUMN_DOES_NOT_EXITS
-alter table t1 add spatial index(b), algorithm=copy;
-
-# Delete rows.
-delete from t1;
-
-#cleanup
+alter table t1 add b geometry not null default st_geomfromtext('POINT(0 0)'),
+add spatial index(b), algorithm=copy;
DROP table t1;
--echo #
diff --git a/mysql-test/suite/innodb_zip/r/create_options.result b/mysql-test/suite/innodb_zip/r/create_options.result
index 9b9283d5df1..b217388b6dc 100644
--- a/mysql-test/suite/innodb_zip/r/create_options.result
+++ b/mysql-test/suite/innodb_zip/r/create_options.result
@@ -261,14 +261,14 @@ Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
# can be set to default values during strict mode.
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
ALTER TABLE t1 ADD COLUMN f1 INT;
+SHOW WARNINGS;
+Level Code Message
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL,
`f1` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4
-SHOW WARNINGS;
-Level Code Message
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
diff --git a/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result b/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result
index 1a02c1a711a..d766ecceaac 100644
--- a/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result
+++ b/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result
@@ -461,10 +461,6 @@ i f c
drop table t;
set global innodb_file_per_table = 1;
call mtr.add_suppression("does not exist in the InnoDB internal");
-Warnings:
-Error 145 Table './mtr/test_suppressions' is marked as crashed and should be repaired
-Error 1194 Table 'test_suppressions' is marked as crashed and should be repaired
-Error 1034 1 client is using or hasn't closed the table properly
set global innodb_file_per_table = on;
"1. Hit crash point on completing drop of all indexes before creation"
" of index is commenced."
@@ -520,3 +516,4 @@ check table t;
Table Op Msg_type Msg_text
test.t check Error Table 'test.t' doesn't exist
test.t check status Operation failed
+set global innodb_file_per_table = 1;
diff --git a/mysql-test/suite/innodb_zip/t/create_options.test b/mysql-test/suite/innodb_zip/t/create_options.test
index f91cd7d42dd..fce64060df3 100644
--- a/mysql-test/suite/innodb_zip/t/create_options.test
+++ b/mysql-test/suite/innodb_zip/t/create_options.test
@@ -214,8 +214,8 @@ SHOW WARNINGS;
--echo # can be set to default values during strict mode.
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
ALTER TABLE t1 ADD COLUMN f1 INT;
-SHOW CREATE TABLE t1;
SHOW WARNINGS;
+SHOW CREATE TABLE t1;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
ALTER TABLE t1 ADD COLUMN f2 INT;
diff --git a/mysql-test/suite/parts/inc/part_exch_drop_tabs.inc b/mysql-test/suite/parts/inc/part_exch_drop_tabs.inc
index 5ffc5438a3e..7d6441b15ad 100644
--- a/mysql-test/suite/parts/inc/part_exch_drop_tabs.inc
+++ b/mysql-test/suite/parts/inc/part_exch_drop_tabs.inc
@@ -1,4 +1,3 @@
---disable_warnings
DROP TABLE IF EXISTS t_10;
DROP TABLE IF EXISTS t_100;
DROP TABLE IF EXISTS t_1000;
@@ -11,5 +10,3 @@ DROP TABLE IF EXISTS tsp_03;
DROP TABLE IF EXISTS tsp_04;
DROP TABLE IF EXISTS t_empty;
DROP TABLE IF EXISTS t_null;
---enable_warnings
-
diff --git a/mysql-test/suite/parts/inc/part_exch_tabs.inc b/mysql-test/suite/parts/inc/part_exch_tabs.inc
index 378e0c2278d..482c9d378e7 100644
--- a/mysql-test/suite/parts/inc/part_exch_tabs.inc
+++ b/mysql-test/suite/parts/inc/part_exch_tabs.inc
@@ -1,51 +1,27 @@
---disable_warnings
-DROP TABLE IF EXISTS t_10;
-DROP TABLE IF EXISTS t_100;
-DROP TABLE IF EXISTS t_1000;
-DROP TABLE IF EXISTS tp;
-DROP TABLE IF EXISTS tsp;
-DROP TABLE IF EXISTS t_empty;
-DROP TABLE IF EXISTS t_null;
---enable_warnings
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_table;
-eval CREATE TABLE t_10 (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_table;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_table;
-eval CREATE TABLE t_100 (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_table;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_table;
-eval CREATE TABLE t_1000 (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_table;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_table;
-eval CREATE TABLE t_empty (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_table;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_table;
-eval CREATE TABLE t_null (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_table;
-
-eval CREATE TABLE tp (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_part
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_part
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) $p_data_directory $p_index_directory,
PARTITION p1 VALUES LESS THAN (100) $p_data_directory $p_index_directory,
PARTITION p2 VALUES LESS THAN (1000) $p_data_directory $p_index_directory);
-eval CREATE TABLE tsp (a INT,
- b VARCHAR(55),
- PRIMARY KEY (a)) $data_directory $index_directory
-ENGINE = $engine_subpart
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) $data_directory $index_directory ENGINE = $engine_subpart
PARTITION BY RANGE (a)
SUBPARTITION BY HASH(a)
(PARTITION p0 VALUES LESS THAN (10) $p_data_directory $p_index_directory
@@ -53,8 +29,7 @@ SUBPARTITION BY HASH(a)
SUBPARTITION sp01,
SUBPARTITION sp02,
SUBPARTITION sp03,
- SUBPARTITION sp04),
- PARTITION p1 VALUES LESS THAN (100)
+ SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
(SUBPARTITION sp10 $p_data_directory $p_index_directory,
SUBPARTITION sp11 $p_data_directory $p_index_directory,
SUBPARTITION sp12 $p_data_directory $p_index_directory,
@@ -99,41 +74,13 @@ INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four")
INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
-eval CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a))
- ENGINE = $engine_table $data_directory $index_directory
- AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
-eval CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a))
- ENGINE = $engine_table $data_directory $index_directory
- AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
-eval CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a))
- ENGINE = $engine_table $data_directory $index_directory
- AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
-eval CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a))
- ENGINE = $engine_table $data_directory $index_directory
- AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
-eval CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a))
- ENGINE = $engine_table $data_directory $index_directory
- AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
-
-SHOW CREATE TABLE t_10;
-SHOW CREATE TABLE t_100;
-SHOW CREATE TABLE t_1000;
-SHOW CREATE TABLE tp;
-SHOW CREATE TABLE tsp;
-
---sorted_result
-SELECT * FROM t_10;
---sorted_result
-SELECT * FROM t_100;
---sorted_result
-SELECT * FROM t_1000;
---sorted_result
-SELECT * FROM tp;
---sorted_result
-SELECT * FROM tp WHERE a< 10;
---sorted_result
-SELECT * FROM tp WHERE a BETWEEN 11 AND 100;
---sorted_result
-SELECT * FROM tp WHERE a BETWEEN 101 AND 200;
---sorted_result
-SELECT * FROM tsp;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = $engine_table $data_directory $index_directory AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = $engine_table $data_directory $index_directory AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = $engine_table $data_directory $index_directory AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = $engine_table $data_directory $index_directory AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR;
+eval CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = $engine_table $data_directory $index_directory AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
diff --git a/mysql-test/suite/parts/r/partition_exch_myisam_innodb.result b/mysql-test/suite/parts/r/partition_exch_myisam_innodb.result
index 9ff4afcfe35..a2a58c22c42 100644
--- a/mysql-test/suite/parts/r/partition_exch_myisam_innodb.result
+++ b/mysql-test/suite/parts/r/partition_exch_myisam_innodb.result
@@ -1,3 +1,58 @@
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
ERROR HY000: The mix of handlers in the partitions is not allowed in this version of MariaDB
DROP TABLE IF EXISTS t_10;
diff --git a/mysql-test/suite/parts/r/partition_exch_qa_14.result b/mysql-test/suite/parts/r/partition_exch_qa_14.result
index f6866727184..1420982436a 100644
--- a/mysql-test/suite/parts/r/partition_exch_qa_14.result
+++ b/mysql-test/suite/parts/r/partition_exch_qa_14.result
@@ -1,4 +1,198 @@
-use test;
+# === Data/Index directories are identical
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p1 VALUES LESS THAN (100) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir');
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp11 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp12 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp13 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp14 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'),
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
+ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+DROP TABLE IF EXISTS t_10;
+DROP TABLE IF EXISTS t_100;
+DROP TABLE IF EXISTS t_1000;
+DROP TABLE IF EXISTS tp;
+DROP TABLE IF EXISTS tsp;
+DROP TABLE IF EXISTS tsp_00;
+DROP TABLE IF EXISTS tsp_01;
+DROP TABLE IF EXISTS tsp_02;
+DROP TABLE IF EXISTS tsp_03;
+DROP TABLE IF EXISTS tsp_04;
+DROP TABLE IF EXISTS t_empty;
+DROP TABLE IF EXISTS t_null;
+# === partition has directories, the table does not
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p1 VALUES LESS THAN (100) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir');
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp11 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp12 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp13 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp14 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'),
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
+ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ERROR HY000: Tables have different definitions
+DROP TABLE IF EXISTS t_10;
+DROP TABLE IF EXISTS t_100;
+DROP TABLE IF EXISTS t_1000;
+DROP TABLE IF EXISTS tp;
+DROP TABLE IF EXISTS tsp;
+DROP TABLE IF EXISTS tsp_00;
+DROP TABLE IF EXISTS tsp_01;
+DROP TABLE IF EXISTS tsp_02;
+DROP TABLE IF EXISTS tsp_03;
+DROP TABLE IF EXISTS tsp_04;
+DROP TABLE IF EXISTS t_empty;
+DROP TABLE IF EXISTS t_null;
+# === the table has directories, partition does not
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
ERROR HY000: Tables have different definitions
DROP TABLE IF EXISTS t_10;
@@ -13,7 +207,62 @@ DROP TABLE IF EXISTS tsp_03;
DROP TABLE IF EXISTS tsp_04;
DROP TABLE IF EXISTS t_empty;
DROP TABLE IF EXISTS t_null;
-use test;
+# === data directory differs
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p1 VALUES LESS THAN (100) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir');
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp11 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp12 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp13 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+SUBPARTITION sp14 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'),
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
ERROR HY000: Tables have different definitions
DROP TABLE IF EXISTS t_10;
@@ -28,7 +277,62 @@ DROP TABLE IF EXISTS tsp_03;
DROP TABLE IF EXISTS tsp_04;
DROP TABLE IF EXISTS t_empty;
DROP TABLE IF EXISTS t_null;
-use test;
+# === index directory differs
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+PARTITION p1 VALUES LESS THAN (100) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir');
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+SUBPARTITION sp11 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+SUBPARTITION sp12 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+SUBPARTITION sp13 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir',
+SUBPARTITION sp14 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'),
+PARTITION p2 VALUES LESS THAN (1000) DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
ERROR HY000: Tables have different definitions
DROP TABLE IF EXISTS t_10;
diff --git a/mysql-test/suite/parts/r/partition_exch_qa_15.result b/mysql-test/suite/parts/r/partition_exch_qa_15.result
index 87671a74253..355cf43d886 100644
--- a/mysql-test/suite/parts/r/partition_exch_qa_15.result
+++ b/mysql-test/suite/parts/r/partition_exch_qa_15.result
@@ -1,10 +1,65 @@
use test;
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = InnoDB AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = InnoDB AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = InnoDB AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = InnoDB AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = InnoDB AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
CREATE TABLE t_11 (a INT, b VARCHAR(55),
FOREIGN KEY (a) REFERENCES t_10 (a) ON DELETE CASCADE)
ENGINE= InnoDB;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
ERROR HY000: Table to exchange with partition has foreign key references: 't_11'
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
DROP TABLE IF EXISTS t_10;
DROP TABLE IF EXISTS t_100;
DROP TABLE IF EXISTS t_1000;
diff --git a/mysql-test/suite/parts/r/partition_exch_qa_2.result b/mysql-test/suite/parts/r/partition_exch_qa_2.result
index ea4983db3dc..956cb0af695 100644
--- a/mysql-test/suite/parts/r/partition_exch_qa_2.result
+++ b/mysql-test/suite/parts/r/partition_exch_qa_2.result
@@ -1,4 +1,59 @@
use test;
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
SELECT * FROM t_10;
a b
1 One
@@ -102,20 +157,20 @@ a b
CREATE TABLE t_11(a INT,b VARCHAR(55)) SELECT * FROM t_10;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
ERROR HY000: Tables have different definitions
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
CREATE TABLE t_11(a INT,b CHAR(55),PRIMARY KEY(a)) ENGINE= MYISAM SELECT * FROM t_10;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
ERROR HY000: Tables have different definitions
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
CREATE TABLE t_11(a INT,b VARCHAR(55),PRIMARY KEY(a)) ENGINE= MEMORY SELECT * FROM t_10;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
ERROR HY000: The mix of handlers in the partitions is not allowed in this version of MariaDB
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
CREATE TABLE t_11(a INT,b CHAR(55),PRIMARY KEY(a)) ENGINE= MYISAM
PARTITION BY KEY() AS SELECT * FROM t_10;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
ERROR HY000: Table to exchange with partition is partitioned: 't_11'
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE tsp;
ERROR HY000: Table to exchange with partition is partitioned: 'tsp'
ALTER TABLE tsp EXCHANGE PARTITION p0 WITH TABLE t_10;
diff --git a/mysql-test/suite/parts/r/partition_exch_qa_3.result b/mysql-test/suite/parts/r/partition_exch_qa_3.result
index 9f4043a055a..791757c95f2 100644
--- a/mysql-test/suite/parts/r/partition_exch_qa_3.result
+++ b/mysql-test/suite/parts/r/partition_exch_qa_3.result
@@ -1,4 +1,159 @@
use test;
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = InnoDB
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
+SELECT * FROM t_10;
+a b
+1 One
+3 Three
+5 Five
+9 Nine
+SELECT * FROM t_100;
+a b
+11 Eleven
+13 Thirdteen
+15 Fifeteen
+19 Nineteen
+91 Ninety-one
+93 Ninety-three
+95 Ninety-five
+99 Ninety-nine
+SELECT * FROM t_1000;
+a b
+111 Hundred elven
+113 Hundred thirdteen
+115 Hundred fiveteen
+119 Hundred nineteen
+131 Hundred thirty-one
+133 Hundred thirty-three
+135 Hundred thirty-five
+139 Hundred thirty-nine
+151 Hundred fifty-one
+153 Hundred fifty-three
+155 Hundred fity-five
+159 Hundred fifty-nine
+191 Hundred ninety-one
+193 Hundred ninety-three
+195 Hundred ninety-five
+199 Hundred ninety-nine
+SELECT * FROM tp;
+a b
+112 Hundred twelve
+114 Hundred fourteen
+116 Hundred sixteen
+118 Hundred eightteen
+12 twelve
+122 Hundred twenty-two
+124 Hundred twenty-four
+126 Hundred twenty-six
+128 Hundred twenty-eight
+14 Fourteen
+16 Sixteen
+162 Hundred sixty-two
+164 Hundred sixty-four
+166 Hundred sixty-six
+168 Hundred sixty-eight
+18 Eightteen
+182 Hundred eighty-two
+184 Hundred eighty-four
+186 Hundred eighty-six
+188 Hundred eighty-eight
+2 Two
+4 Four
+6 Six
+8 Eight
+SELECT * FROM tsp;
+a b
+112 Hundred twelve
+114 Hundred fourteen
+116 Hundred sixteen
+118 Hundred eightteen
+12 twelve
+122 Hundred twenty-two
+124 Hundred twenty-four
+126 Hundred twenty-six
+128 Hundred twenty-eight
+14 Fourteen
+16 Sixteen
+162 Hundred sixty-two
+164 Hundred sixty-four
+166 Hundred sixty-six
+168 Hundred sixty-eight
+18 Eightteen
+182 Hundred eight-two
+184 Hundred eighty-four
+186 Hundred eighty-six
+188 Hundred eighty-eight
+2 Two
+4 Four
+6 Six
+8 Eight
+SELECT * FROM tsp_00;
+a b
+5 Five
+SELECT * FROM tsp_01;
+a b
+1 One
+SELECT * FROM tsp_02;
+a b
+SELECT * FROM tsp_03;
+a b
+3 Three
+SELECT * FROM tsp_04;
+a b
+9 Nine
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
ERROR HY000: The mix of handlers in the partitions is not allowed in this version of MariaDB
ALTER TABLE tsp EXCHANGE PARTITION sp00 WITH TABLE tsp_00;
diff --git a/mysql-test/suite/parts/r/partition_exch_qa_6.result b/mysql-test/suite/parts/r/partition_exch_qa_6.result
index bef277f695a..6de40ee614a 100644
--- a/mysql-test/suite/parts/r/partition_exch_qa_6.result
+++ b/mysql-test/suite/parts/r/partition_exch_qa_6.result
@@ -1,8 +1,62 @@
CREATE USER test2@localhost;
+CREATE TABLE t_10 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_100 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_1000 (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_empty (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE t_null (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM;
+CREATE TABLE tp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ,
+PARTITION p1 VALUES LESS THAN (100) ,
+PARTITION p2 VALUES LESS THAN (1000) );
+CREATE TABLE tsp (a INT, b VARCHAR(55), PRIMARY KEY (a)) ENGINE = MYISAM
+PARTITION BY RANGE (a)
+SUBPARTITION BY HASH(a)
+(PARTITION p0 VALUES LESS THAN (10)
+(SUBPARTITION sp00,
+SUBPARTITION sp01,
+SUBPARTITION sp02,
+SUBPARTITION sp03,
+SUBPARTITION sp04), PARTITION p1 VALUES LESS THAN (100)
+(SUBPARTITION sp10 ,
+SUBPARTITION sp11 ,
+SUBPARTITION sp12 ,
+SUBPARTITION sp13 ,
+SUBPARTITION sp14 ),
+PARTITION p2 VALUES LESS THAN (1000)
+(SUBPARTITION sp20,
+SUBPARTITION sp21,
+SUBPARTITION sp22,
+SUBPARTITION sp23,
+SUBPARTITION sp24));
+INSERT INTO t_10 VALUES (1, "One"), (3, "Three"), (5, "Five"), (9, "Nine");
+INSERT INTO t_100 VALUES (11, "Eleven"), (13, "Thirdteen"), (15, "Fifeteen"), (19, "Nineteen");
+INSERT INTO t_100 VALUES (91, "Ninety-one"), (93, "Ninety-three"), (95, "Ninety-five"), (99, "Ninety-nine");
+INSERT INTO t_1000 VALUES (111, "Hundred elven"), (113, "Hundred thirdteen"), (115, "Hundred fiveteen"), (119, "Hundred nineteen");
+INSERT INTO t_1000 VALUES (131, "Hundred thirty-one"), (133, "Hundred thirty-three"), (135, "Hundred thirty-five"), (139, "Hundred thirty-nine");
+INSERT INTO t_1000 VALUES (151, "Hundred fifty-one"), (153, "Hundred fifty-three"), (155, "Hundred fity-five"), (159, "Hundred fifty-nine");
+INSERT INTO t_1000 VALUES (191, "Hundred ninety-one"), (193, "Hundred ninety-three"), (195, "Hundred ninety-five"), (199, "Hundred ninety-nine");
+INSERT INTO t_null VALUES (1, "NULL");
+INSERT INTO tp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tp VALUES (182, "Hundred eighty-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+INSERT INTO tsp VALUES (2, "Two"), (4, "Four"), (6, "Six"), (8, "Eight");
+INSERT INTO tsp VALUES (12, "twelve"), (14, "Fourteen"), (16, "Sixteen"), (18, "Eightteen");
+INSERT INTO tsp VALUES (112, "Hundred twelve"), (114, "Hundred fourteen"), (116, "Hundred sixteen"), (118, "Hundred eightteen");
+INSERT INTO tsp VALUES (122, "Hundred twenty-two"), (124, "Hundred twenty-four"), (126, "Hundred twenty-six"), (128, "Hundred twenty-eight");
+INSERT INTO tsp VALUES (162, "Hundred sixty-two"), (164, "Hundred sixty-four"), (166, "Hundred sixty-six"), (168, "Hundred sixty-eight");
+INSERT INTO tsp VALUES (182, "Hundred eight-two"), (184, "Hundred eighty-four"), (186, "Hundred eighty-six"), (188, "Hundred eighty-eight");
+CREATE TABLE tsp_01(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 1;
+CREATE TABLE tsp_02(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 2;
+CREATE TABLE tsp_03(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 3;
+CREATE TABLE tsp_04(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 4;
+CREATE TABLE tsp_00(a INT,b VARCHAR(55),PRIMARY KEY (a)) ENGINE = MYISAM AS SELECT a, b FROM t_10 WHERE MOD(a,5)= 0;
GRANT USAGE ON *.* TO test2@localhost;
GRANT CREATE, DROP, ALTER, UPDATE, INSERT, SELECT ON test.* TO test2@localhost;
connect test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
-USE test;
SHOW GRANTS FOR CURRENT_USER;
Grants for test2@localhost
GRANT USAGE ON *.* TO 'test2'@'localhost'
@@ -81,7 +135,6 @@ connection default;
GRANT CREATE ON test.* TO test2@localhost;
REVOKE DROP ON test.* FROM test2@localhost;
connect test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
-USE test;
SHOW GRANTS FOR CURRENT_USER;
Grants for test2@localhost
GRANT USAGE ON *.* TO 'test2'@'localhost'
diff --git a/mysql-test/suite/parts/t/partition_exch_myisam_innodb.test b/mysql-test/suite/parts/t/partition_exch_myisam_innodb.test
index fa956f19aec..c625ad93775 100644
--- a/mysql-test/suite/parts/t/partition_exch_myisam_innodb.test
+++ b/mysql-test/suite/parts/t/partition_exch_myisam_innodb.test
@@ -1,21 +1,17 @@
# Author: Horst Hunger
# Created: 2010-07-05
---source include/have_innodb.inc
---source include/have_partition.inc
+source include/have_innodb.inc;
+source include/have_partition.inc;
let $engine_table= MYISAM;
let $engine_part= InnoDB;
let $engine_subpart= InnoDB;
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
+source suite/parts/inc/part_exch_tabs.inc;
---error 1497
+error ER_MIX_HANDLER_ERROR;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
---source suite/parts/inc/part_exch_drop_tabs.inc
+source suite/parts/inc/part_exch_drop_tabs.inc;
diff --git a/mysql-test/suite/parts/t/partition_exch_qa_14.test b/mysql-test/suite/parts/t/partition_exch_qa_14.test
index 7c6699a0a72..8d9f201f1db 100644
--- a/mysql-test/suite/parts/t/partition_exch_qa_14.test
+++ b/mysql-test/suite/parts/t/partition_exch_qa_14.test
@@ -1,94 +1,66 @@
# Author: Horst Hunger
# Created: 2010-07-13
---source include/not_windows.inc
---source include/have_partition.inc
---source include/have_symlink.inc
+source include/not_windows.inc;
+source include/have_partition.inc;
+source include/have_symlink.inc;
let $engine_table= MYISAM;
let $engine_part= MYISAM;
let $engine_subpart= MYISAM;
-
-# DATA DIRECTORY
-# Make directory for partition data
-let $data_dir_path= $MYSQLTEST_VARDIR/mysql-test-data-dir;
---mkdir $data_dir_path
-let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
-let $data_directory= DATA DIRECTORY = '$data_dir_path';
-
-# INDEX DIRECTORY
-# Make directory for partition index
-let $idx_dir_path= $MYSQLTEST_VARDIR/mysql-test-idx-dir;
---mkdir $idx_dir_path
-let $p_index_directory= INDEX DIRECTORY = '$idx_dir_path';
-let $index_directory= INDEX DIRECTORY = '$idx_dir_path';
-
-use test;
-
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
-
---error ER_TABLES_DIFFERENT_METADATA
-ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
-
---source suite/parts/inc/part_exch_drop_tabs.inc
---rmdir $data_dir_path
---rmdir $idx_dir_path
-
-# DATA DIRECTORY
-# Make directory for partition data
let $data_dir_path= $MYSQLTEST_VARDIR/mysql-test-data-dir;
---mkdir $data_dir_path
-let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
-
-# INDEX DIRECTORY
-# Make directory for partition index
let $idx_dir_path= $MYSQLTEST_VARDIR/mysql-test-idx-dir;
---mkdir $idx_dir_path
-let $p_index_directory= INDEX DIRECTORY = '$idx_dir_path';
-
-use test;
-
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
-
---error ER_TABLES_DIFFERENT_METADATA
-ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
-
---source suite/parts/inc/part_exch_drop_tabs.inc
---rmdir $data_dir_path
---rmdir $idx_dir_path
-
-# DATA DIRECTORY
-# Make directory for partition data
-let $data_dir_path= $MYSQLTEST_VARDIR/mysql-test-data-dir;
---mkdir $data_dir_path
-let $data_directory= DATA DIRECTORY = '$data_dir_path';
-
-# INDEX DIRECTORY
-# Make directory for partition index
-let $idx_dir_path= $MYSQLTEST_VARDIR/mysql-test-idx-dir;
---mkdir $idx_dir_path
-let $index_directory= INDEX DIRECTORY = '$idx_dir_path';
-
-use test;
-
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
-
---error ER_TABLES_DIFFERENT_METADATA
-ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
-
---source suite/parts/inc/part_exch_drop_tabs.inc
---rmdir $data_dir_path
---rmdir $idx_dir_path
-
+mkdir $data_dir_path;
+mkdir $idx_dir_path;
+
+echo # === Data/Index directories are identical;
+ let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $p_index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ let $index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ source suite/parts/inc/part_exch_tabs.inc;
+ ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ source suite/parts/inc/part_exch_drop_tabs.inc;
+
+echo # === partition has directories, the table does not;
+ let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $data_directory= ;
+ let $p_index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ let $index_directory= ;
+ source suite/parts/inc/part_exch_tabs.inc;
+ error ER_TABLES_DIFFERENT_METADATA;
+ ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ source suite/parts/inc/part_exch_drop_tabs.inc;
+
+echo # === the table has directories, partition does not;
+ let $p_data_directory= ;
+ let $data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $p_index_directory= ;
+ let $index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ source suite/parts/inc/part_exch_tabs.inc;
+ error ER_TABLES_DIFFERENT_METADATA;
+ ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ source suite/parts/inc/part_exch_drop_tabs.inc;
+
+echo # === data directory differs;
+ let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $data_directory= DATA DIRECTORY = '$idx_dir_path';
+ let $p_index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ let $index_directory= INDEX DIRECTORY = '$idx_dir_path';
+ source suite/parts/inc/part_exch_tabs.inc;
+ error ER_TABLES_DIFFERENT_METADATA;
+ ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ source suite/parts/inc/part_exch_drop_tabs.inc;
+
+echo # === index directory differs;
+ let $p_data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $data_directory= DATA DIRECTORY = '$data_dir_path';
+ let $p_index_directory= DATA DIRECTORY = '$data_dir_path';
+ let $index_directory= DATA DIRECTORY = '$idx_dir_path';
+ source suite/parts/inc/part_exch_tabs.inc;
+ error ER_TABLES_DIFFERENT_METADATA;
+ ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
+ source suite/parts/inc/part_exch_drop_tabs.inc;
+
+rmdir $idx_dir_path;
+rmdir $data_dir_path;
diff --git a/mysql-test/suite/parts/t/partition_exch_qa_15.test b/mysql-test/suite/parts/t/partition_exch_qa_15.test
index 51d09be5ed9..8ea641c8178 100644
--- a/mysql-test/suite/parts/t/partition_exch_qa_15.test
+++ b/mysql-test/suite/parts/t/partition_exch_qa_15.test
@@ -1,8 +1,8 @@
# Author: Horst Hunger
# Created: 2010-07-15
---source include/have_innodb.inc
---source include/have_partition.inc
+source include/have_innodb.inc;
+source include/have_partition.inc;
let $engine_table= InnoDB;
let $engine_part= InnoDB;
@@ -10,11 +10,7 @@ let $engine_subpart= InnoDB;
use test;
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
+source suite/parts/inc/part_exch_tabs.inc;
# 21) Foreign Key.
# Exchange of partition with table differing in structure.
@@ -22,10 +18,8 @@ CREATE TABLE t_11 (a INT, b VARCHAR(55),
FOREIGN KEY (a) REFERENCES t_10 (a) ON DELETE CASCADE)
ENGINE= InnoDB;
#--error ER_TABLES_DIFFERENT_METADATA
---error ER_PARTITION_EXCHANGE_FOREIGN_KEY
+error ER_PARTITION_EXCHANGE_FOREIGN_KEY;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
---disable_warnings
-DROP TABLE IF EXISTS t_11;
---enable_warnings
---source suite/parts/inc/part_exch_drop_tabs.inc
+DROP TABLE t_11;
+source suite/parts/inc/part_exch_drop_tabs.inc;
diff --git a/mysql-test/suite/parts/t/partition_exch_qa_2.test b/mysql-test/suite/parts/t/partition_exch_qa_2.test
index 83dc0a81fca..1858131ce10 100644
--- a/mysql-test/suite/parts/t/partition_exch_qa_2.test
+++ b/mysql-test/suite/parts/t/partition_exch_qa_2.test
@@ -1,7 +1,7 @@
# Author: Horst Hunger
# Created: 2010-07-05
---source include/have_partition.inc
+source include/have_partition.inc;
let $engine_table= MYISAM;
let $engine_part= MYISAM;
@@ -9,72 +9,61 @@ let $engine_subpart= MYISAM;
use test;
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
+source suite/parts/inc/part_exch_tabs.inc;
---sorted_result
+sorted_result;
SELECT * FROM t_10;
---sorted_result
+sorted_result;
SELECT * FROM t_100;
---sorted_result
+sorted_result;
SELECT * FROM t_1000;
---sorted_result
+sorted_result;
SELECT * FROM tp;
---sorted_result
+sorted_result;
SELECT * FROM tsp;
---sorted_result
+sorted_result;
SELECT * FROM tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp_01;
---sorted_result
+sorted_result;
SELECT * FROM tsp_02;
---sorted_result
+sorted_result;
SELECT * FROM tsp_03;
---sorted_result
+sorted_result;
SELECT * FROM tsp_04;
# 3) Invalid exchanges.
# Exchange of partition with table differing in structure.
CREATE TABLE t_11(a INT,b VARCHAR(55)) SELECT * FROM t_10;
---error ER_TABLES_DIFFERENT_METADATA
+error ER_TABLES_DIFFERENT_METADATA;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
---disable_warnings
-DROP TABLE IF EXISTS t_11;
---enable_warnings
+DROP TABLE t_11;
eval CREATE TABLE t_11(a INT,b CHAR(55),PRIMARY KEY(a)) ENGINE= $engine_table SELECT * FROM t_10;
---error ER_TABLES_DIFFERENT_METADATA
+error ER_TABLES_DIFFERENT_METADATA;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
---disable_warnings
-DROP TABLE IF EXISTS t_11;
+DROP TABLE t_11;
CREATE TABLE t_11(a INT,b VARCHAR(55),PRIMARY KEY(a)) ENGINE= MEMORY SELECT * FROM t_10;
---error ER_MIX_HANDLER_ERROR
+error ER_MIX_HANDLER_ERROR;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
---disable_warnings
-DROP TABLE IF EXISTS t_11;
---enable_warnings
+DROP TABLE t_11;
# Exchange of partition with partitioned table.
eval CREATE TABLE t_11(a INT,b CHAR(55),PRIMARY KEY(a)) ENGINE= $engine_table
PARTITION BY KEY() AS SELECT * FROM t_10;
---error ER_PARTITION_EXCHANGE_PART_TABLE
+error ER_PARTITION_EXCHANGE_PART_TABLE;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_11;
---disable_warnings
-DROP TABLE IF EXISTS t_11;
---enable_warnings
+DROP TABLE t_11;
# Exchange of subpartition with partitioned table.
---error ER_PARTITION_EXCHANGE_PART_TABLE
+error ER_PARTITION_EXCHANGE_PART_TABLE;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE tsp;
# Exchange of subpartitioned partition with table.
---error ER_PARTITION_INSTEAD_OF_SUBPARTITION
+error ER_PARTITION_INSTEAD_OF_SUBPARTITION;
ALTER TABLE tsp EXCHANGE PARTITION p0 WITH TABLE t_10;
# Exchange of values in partition not fitting the hash.
---error ER_ROW_DOES_NOT_MATCH_PARTITION
+error ER_ROW_DOES_NOT_MATCH_PARTITION;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_100;
# Exchange of values in subpartition not fitting the hash.
---error ER_ROW_DOES_NOT_MATCH_PARTITION
+error ER_ROW_DOES_NOT_MATCH_PARTITION;
ALTER TABLE tp EXCHANGE PARTITION p2 WITH TABLE t_10;
---source suite/parts/inc/part_exch_drop_tabs.inc
+source suite/parts/inc/part_exch_drop_tabs.inc;
diff --git a/mysql-test/suite/parts/t/partition_exch_qa_3.test b/mysql-test/suite/parts/t/partition_exch_qa_3.test
index aa79e97adb6..fc49eb1da90 100644
--- a/mysql-test/suite/parts/t/partition_exch_qa_3.test
+++ b/mysql-test/suite/parts/t/partition_exch_qa_3.test
@@ -1,8 +1,8 @@
# Author: Horst Hunger
# Created: 2010-07-05
---source include/have_partition.inc
---source include/have_innodb.inc
+source include/have_partition.inc;
+source include/have_innodb.inc;
let $engine_table= MYISAM;
let $engine_part= InnoDB;
@@ -10,38 +10,34 @@ let $engine_subpart= InnoDB;
use test;
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
+source suite/parts/inc/part_exch_tabs.inc;
---sorted_result
+sorted_result;
SELECT * FROM t_10;
---sorted_result
+sorted_result;
SELECT * FROM t_100;
---sorted_result
+sorted_result;
SELECT * FROM t_1000;
---sorted_result
+sorted_result;
SELECT * FROM tp;
---sorted_result
+sorted_result;
SELECT * FROM tsp;
---sorted_result
+sorted_result;
SELECT * FROM tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp_01;
---sorted_result
+sorted_result;
SELECT * FROM tsp_02;
---sorted_result
+sorted_result;
SELECT * FROM tsp_03;
---sorted_result
+sorted_result;
SELECT * FROM tsp_04;
---enable_result_log
---enable_query_log
# 5) Exchanges with different engines.
---error ER_MIX_HANDLER_ERROR
+error ER_MIX_HANDLER_ERROR;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
---error ER_MIX_HANDLER_ERROR
+error ER_MIX_HANDLER_ERROR;
ALTER TABLE tsp EXCHANGE PARTITION sp00 WITH TABLE tsp_00;
---source suite/parts/inc/part_exch_drop_tabs.inc
+source suite/parts/inc/part_exch_drop_tabs.inc;
diff --git a/mysql-test/suite/parts/t/partition_exch_qa_6.test b/mysql-test/suite/parts/t/partition_exch_qa_6.test
index 69b49a32813..ace13f26921 100644
--- a/mysql-test/suite/parts/t/partition_exch_qa_6.test
+++ b/mysql-test/suite/parts/t/partition_exch_qa_6.test
@@ -1,8 +1,8 @@
# Author: Horst Hunger
# Created: 2010-07-06
---source include/not_embedded.inc
---source include/have_partition.inc
+source include/not_embedded.inc;
+source include/have_partition.inc;
let $engine_table= MYISAM;
let $engine_part= MYISAM;
@@ -10,41 +10,36 @@ let $engine_subpart= MYISAM;
CREATE USER test2@localhost;
---disable_result_log
---disable_query_log
---source suite/parts/inc/part_exch_tabs.inc
---enable_result_log
---enable_query_log
+source suite/parts/inc/part_exch_tabs.inc;
GRANT USAGE ON *.* TO test2@localhost;
GRANT CREATE, DROP, ALTER, UPDATE, INSERT, SELECT ON test.* TO test2@localhost;
connect (test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
-USE test;
SHOW GRANTS FOR CURRENT_USER;
# 9) Exchanges with different owner.
# Privilege for ALTER and SELECT
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
---sorted_result
+sorted_result;
SELECT * FROM t_10;
---sorted_result
+sorted_result;
SELECT * FROM tp WHERE a BETWEEN 0 AND 10;
# Back to former values.
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
---sorted_result
+sorted_result;
SELECT * FROM t_10;
---sorted_result
+sorted_result;
SELECT * FROM tp WHERE a BETWEEN 0 AND 10;
ALTER TABLE tsp EXCHANGE PARTITION sp00 WITH TABLE tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp WHERE a BETWEEN 0 AND 10;
# Back to former values.
ALTER TABLE tsp EXCHANGE PARTITION sp00 WITH TABLE tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp_00;
---sorted_result
+sorted_result;
SELECT * FROM tsp WHERE a BETWEEN 0 AND 10;
disconnect test2;
@@ -55,7 +50,7 @@ connect (test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
USE test;
SHOW GRANTS FOR CURRENT_USER;
# Privilege for ALTER and SELECT
---error ER_TABLEACCESS_DENIED_ERROR
+error ER_TABLEACCESS_DENIED_ERROR;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
disconnect test2;
@@ -66,7 +61,7 @@ REVOKE CREATE ON test.* FROM test2@localhost;
connect (test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
USE test;
SHOW GRANTS FOR CURRENT_USER;
---error ER_TABLEACCESS_DENIED_ERROR
+error ER_TABLEACCESS_DENIED_ERROR;
ALTER TABLE tsp EXCHANGE PARTITION sp00 WITH TABLE tsp_00;
disconnect test2;
@@ -75,15 +70,14 @@ GRANT CREATE ON test.* TO test2@localhost;
REVOKE DROP ON test.* FROM test2@localhost;
connect (test2,localhost,test2,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
-USE test;
SHOW GRANTS FOR CURRENT_USER;
# Privilege for ALTER and SELECT
---error ER_TABLEACCESS_DENIED_ERROR
+error ER_TABLEACCESS_DENIED_ERROR;
ALTER TABLE tp EXCHANGE PARTITION p0 WITH TABLE t_10;
disconnect test2;
connection default;
---source suite/parts/inc/part_exch_drop_tabs.inc
+source suite/parts/inc/part_exch_drop_tabs.inc;
DROP USER test2@localhost;
diff --git a/mysql-test/suite/perfschema/r/misc.result b/mysql-test/suite/perfschema/r/misc.result
index 2adf2cba851..f2d40fe90b5 100644
--- a/mysql-test/suite/perfschema/r/misc.result
+++ b/mysql-test/suite/perfschema/r/misc.result
@@ -42,12 +42,12 @@ AND EVENT_NAME IN
WHERE NAME LIKE "wait/synch/%")
LIMIT 1;
create table test.t1(a int) engine=performance_schema;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
SELECT object_schema, object_name FROM performance_schema.objects_summary_global_by_type
WHERE object_schema='test';
object_schema object_name
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
SELECT object_schema, object_name FROM performance_schema.objects_summary_global_by_type
WHERE object_schema='test';
object_schema object_name
diff --git a/mysql-test/suite/perfschema/r/privilege.result b/mysql-test/suite/perfschema/r/privilege.result
index 09d32a177fd..7bb7627142e 100644
--- a/mysql-test/suite/perfschema/r/privilege.result
+++ b/mysql-test/suite/perfschema/r/privilege.result
@@ -155,13 +155,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'setup_instruments'
@@ -254,13 +254,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_1'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_1'@'localhost' for table 'setup_instruments'
@@ -354,13 +354,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_2'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_2'@'localhost' for table 'setup_instruments'
@@ -454,13 +454,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_3'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_3'@'localhost' for table 'setup_instruments'
diff --git a/mysql-test/suite/rpl/r/rpl_create_drop_view.result b/mysql-test/suite/rpl/r/rpl_create_drop_view.result
index 34f27faf98a..ebbe9efc9df 100644
--- a/mysql-test/suite/rpl/r/rpl_create_drop_view.result
+++ b/mysql-test/suite/rpl/r/rpl_create_drop_view.result
@@ -99,7 +99,7 @@ DROP VIEW v1;
ERROR 42S02: Unknown VIEW: 'test.v1'
DROP VIEW IF EXISTS v2;
Warnings:
-Note 4089 Unknown VIEW: 'test.v2'
+Note 4090 Unknown VIEW: 'test.v2'
# Syncing slave with master
connection slave;
SELECT * FROM v1;
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result b/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result
index 62a5b9c3531..54156685806 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result
@@ -113,7 +113,7 @@ SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,dummy_disable_default_dbug_output";
SET GLOBAL debug_dbug="+d,gtid_fail_after_record_gtid";
SET sql_log_bin=0;
-CALL mtr.add_suppression('Got error 131 "Command not supported by database" during COMMIT');
+CALL mtr.add_suppression('Got error 131 "Command not supported by the engine" during COMMIT');
SET sql_log_bin=1;
START SLAVE;
include/wait_for_slave_sql_error.inc [errno=1180]
diff --git a/mysql-test/suite/rpl/r/rpl_sp.result b/mysql-test/suite/rpl/r/rpl_sp.result
index 25287b70cf6..2849e5b7ae3 100644
--- a/mysql-test/suite/rpl/r/rpl_sp.result
+++ b/mysql-test/suite/rpl/r/rpl_sp.result
@@ -128,7 +128,7 @@ show warnings;
Level Code Message
Error 1062 Duplicate entry '20' for key 'a'
Warning 1196 Some non-transactional changed tables couldn't be rolled back
-Note 4091 At line 4 in mysqltest1.foo4
+Note 4092 At line 4 in mysqltest1.foo4
select * from t2;
a
20
@@ -291,7 +291,7 @@ end|
do fn1(100);
Warnings:
Error 1062 Duplicate entry '100' for key 'a'
-Note 4091 At line 3 in mysqltest1.fn1
+Note 4092 At line 3 in mysqltest1.fn1
Warning 1196 Some non-transactional changed tables couldn't be rolled back
select fn1(20);
ERROR 23000: Duplicate entry '20' for key 'a'
diff --git a/mysql-test/suite/rpl/r/rpl_sp_variables.result b/mysql-test/suite/rpl/r/rpl_sp_variables.result
new file mode 100644
index 00000000000..7e2ba72845c
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_sp_variables.result
@@ -0,0 +1,28 @@
+include/master-slave.inc
+[connection master]
+#
+# MDEV-13685 Can not replay binary log due to Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8mb4_general_ci,COERCIBLE) for operation 'concat'
+#
+connection master;
+SET NAMES utf8;
+CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8);
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE v_id INT DEFAULT 2017;
+INSERT INTO test.t1 SELECT CONCAT(v_id, '오');
+END;
+$$
+CALL p1;
+SELECT * FROM t1;
+a
+2017오
+connection slave;
+SET NAMES utf8;
+SELECT * FROM t1;
+a
+2017오
+connection master;
+DROP PROCEDURE p1;
+DROP TABLE t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_errorhandling.test b/mysql-test/suite/rpl/t/rpl_gtid_errorhandling.test
index 796f6894f19..c02e2670c92 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_errorhandling.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_errorhandling.test
@@ -132,7 +132,7 @@ SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,dummy_disable_default_dbug_output";
SET GLOBAL debug_dbug="+d,gtid_fail_after_record_gtid";
SET sql_log_bin=0;
-CALL mtr.add_suppression('Got error 131 "Command not supported by database" during COMMIT');
+CALL mtr.add_suppression('Got error 131 "Command not supported by the engine" during COMMIT');
SET sql_log_bin=1;
START SLAVE;
--let $slave_sql_errno= 1180
diff --git a/mysql-test/suite/rpl/t/rpl_sp_variables.test b/mysql-test/suite/rpl/t/rpl_sp_variables.test
new file mode 100644
index 00000000000..87e9fe194ea
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_sp_variables.test
@@ -0,0 +1,28 @@
+source include/master-slave.inc;
+
+--echo #
+--echo # MDEV-13685 Can not replay binary log due to Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8mb4_general_ci,COERCIBLE) for operation 'concat'
+--echo #
+
+connection master;
+SET NAMES utf8;
+CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8);
+DELIMITER $$;
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE v_id INT DEFAULT 2017;
+ INSERT INTO test.t1 SELECT CONCAT(v_id, '오');
+END;
+$$
+DELIMITER ;$$
+CALL p1;
+SELECT * FROM t1;
+sync_slave_with_master;
+SET NAMES utf8;
+SELECT * FROM t1;
+connection master;
+DROP PROCEDURE p1;
+DROP TABLE t1;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/sql_sequence/alter.result b/mysql-test/suite/sql_sequence/alter.result
index 283253b585a..0fca8b7c02b 100644
--- a/mysql-test/suite/sql_sequence/alter.result
+++ b/mysql-test/suite/sql_sequence/alter.result
@@ -212,7 +212,7 @@ ERROR 42S02: 'test.t1' is not a SEQUENCE
drop table t1;
alter sequence if exists t1 minvalue=100;
Warnings:
-Note 4088 Unknown SEQUENCE: 'test.t1'
+Note 4089 Unknown SEQUENCE: 'test.t1'
alter sequence t1 minvalue=100;
ERROR 42S02: Table 'test.t1' doesn't exist
create sequence t1;
diff --git a/mysql-test/suite/sql_sequence/create.result b/mysql-test/suite/sql_sequence/create.result
index 2697da1d8ff..a5eb64802db 100644
--- a/mysql-test/suite/sql_sequence/create.result
+++ b/mysql-test/suite/sql_sequence/create.result
@@ -165,7 +165,7 @@ drop sequence t1;
ERROR 42S02: 'test.t1' is not a SEQUENCE
drop sequence if exists t1;
Warnings:
-Note 4088 Unknown SEQUENCE: 'test.t1'
+Note 4089 Unknown SEQUENCE: 'test.t1'
create sequence t1 start with 10 maxvalue=9;
ERROR HY000: Sequence 'test.t1' values are conflicting
create sequence t1 minvalue= 100 maxvalue=10;
@@ -377,7 +377,7 @@ key key1 (next_not_cached_value)
ERROR HY000: Sequence 'test.t1' table structure is invalid (Sequence tables cannot have any keys)
drop sequence if exists t1;
Warnings:
-Note 4088 Unknown SEQUENCE: 'test.t1'
+Note 4089 Unknown SEQUENCE: 'test.t1'
create sequence t1;
create sequence t2;
create table t3 (a int) engine=myisam;
@@ -387,8 +387,8 @@ CREATE SEQUENCE s1;
drop sequence s1;
drop sequence if exists t1,t2,t3,t4;
Warnings:
-Note 4088 Unknown SEQUENCE: 'test.t3'
-Note 4088 Unknown SEQUENCE: 'test.t4'
+Note 4089 Unknown SEQUENCE: 'test.t3'
+Note 4089 Unknown SEQUENCE: 'test.t4'
drop table if exists t1,t2,t3;
Warnings:
Note 1051 Unknown table 'test.t1'
@@ -414,9 +414,9 @@ CREATE TABLE t2 (a int);
CREATE SEQUENCE s1;
drop sequence if exists t1,t2,s1,s2;
Warnings:
-Note 4088 Unknown SEQUENCE: 'test.t1'
-Note 4088 Unknown SEQUENCE: 'test.t2'
-Note 4088 Unknown SEQUENCE: 'test.s2'
+Note 4089 Unknown SEQUENCE: 'test.t1'
+Note 4089 Unknown SEQUENCE: 'test.t2'
+Note 4089 Unknown SEQUENCE: 'test.s2'
drop table if exists t1,t2;
CREATE TEMPORARY SEQUENCE s1;
DROP SEQUENCE s1;
diff --git a/mysql-test/suite/vcol/r/innodb_virtual_fk.result b/mysql-test/suite/vcol/r/innodb_virtual_fk.result
new file mode 100644
index 00000000000..58db12583e2
--- /dev/null
+++ b/mysql-test/suite/vcol/r/innodb_virtual_fk.result
@@ -0,0 +1,12 @@
+set default_storage_engine=innodb;
+create table t1 (id int primary key, id2 int as (id) virtual, key id2 (id2));
+create table t2 (id int key, constraint fk_id foreign key (id) references t1 (id) on delete cascade);
+insert into t1 (id) values (1), (2);
+insert into t2 (id) values (1), (2);
+delete from t1;
+select * from t1;
+id id2
+select * from t2;
+id
+drop table t2;
+drop table t1;
diff --git a/mysql-test/suite/vcol/r/update.result b/mysql-test/suite/vcol/r/update.result
index 95b0093ed71..5c7905cf547 100644
--- a/mysql-test/suite/vcol/r/update.result
+++ b/mysql-test/suite/vcol/r/update.result
@@ -155,3 +155,13 @@ select * from t;
a b c d e
11 11 11 11 11
drop table t, t1, t2;
+create table t (f1 int, f2 int, f3 int as (f1*2) virtual, key(f3,f2));
+insert into t (f1,f2) values (1,1),(2,2);
+create view v as
+select a2.f1, a2.f2, a1.f3
+from t a1, t a2
+where a2.f3 <> 0
+with local check option;
+update v set f3 = 52;
+drop view v;
+drop table t;
diff --git a/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result b/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result
index 86030a304d4..3fa4f6e1431 100644
--- a/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result
+++ b/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result
@@ -2911,16 +2911,31 @@ drop table t1;
set sql_warnings = 0;
# TIME_FORMAT()
set sql_warnings = 1;
-create table t1 (a datetime, b varchar(10) as (time_format(a,"%d.%m.%Y")));
+create table t1 (a datetime, b varchar(10) as (time_format(a,"%H.%i.%S")));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` datetime DEFAULT NULL,
- `b` varchar(10) GENERATED ALWAYS AS (time_format(`a`,'%d.%m.%Y')) VIRTUAL
+ `b` varchar(10) GENERATED ALWAYS AS (time_format(`a`,'%H.%i.%S')) VIRTUAL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
-insert into t1 values ('2001-01-01 02:02:02',default);
+insert into t1 values ('2001-01-01 02:03:04',default);
select * from t1;
a b
-2001-01-01 02:02:02 01.01.2001
+2001-01-01 02:03:04 02.03.04
+drop table t1;
+set sql_warnings = 0;
+# TIME_FORMAT() STORED
+set sql_warnings = 1;
+create table t1 (a datetime, b varchar(10) as (time_format(a,"%H.%i.%S")) STORED);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` datetime DEFAULT NULL,
+ `b` varchar(10) GENERATED ALWAYS AS (time_format(`a`,'%H.%i.%S')) STORED
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values ('2001-01-01 02:03:04',default);
+select * from t1;
+a b
+2001-01-01 02:03:04 02.03.04
drop table t1;
set sql_warnings = 0;
diff --git a/mysql-test/suite/vcol/t/innodb_virtual_fk.test b/mysql-test/suite/vcol/t/innodb_virtual_fk.test
new file mode 100644
index 00000000000..c364adaa613
--- /dev/null
+++ b/mysql-test/suite/vcol/t/innodb_virtual_fk.test
@@ -0,0 +1,16 @@
+source include/have_innodb.inc;
+set default_storage_engine=innodb;
+
+#
+# MDEV-13708 Crash with indexed virtual columns and FK cascading deletes
+#
+
+create table t1 (id int primary key, id2 int as (id) virtual, key id2 (id2));
+create table t2 (id int key, constraint fk_id foreign key (id) references t1 (id) on delete cascade);
+insert into t1 (id) values (1), (2);
+insert into t2 (id) values (1), (2);
+delete from t1;
+select * from t1;
+select * from t2;
+drop table t2;
+drop table t1;
diff --git a/mysql-test/suite/vcol/t/update.test b/mysql-test/suite/vcol/t/update.test
index 062d9736ed8..1797bdd501e 100644
--- a/mysql-test/suite/vcol/t/update.test
+++ b/mysql-test/suite/vcol/t/update.test
@@ -111,3 +111,17 @@ check table t; select * from t;
update t, t tt set t.b=11, tt.d=11 where t.a=tt.a;
check table t; select * from t;
drop table t, t1, t2;
+
+#
+# MDEV-13623 Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index))' failed in virtual longlong Field_long::val_int
+#
+create table t (f1 int, f2 int, f3 int as (f1*2) virtual, key(f3,f2));
+insert into t (f1,f2) values (1,1),(2,2);
+create view v as
+ select a2.f1, a2.f2, a1.f3
+ from t a1, t a2
+ where a2.f3 <> 0
+ with local check option;
+update v set f3 = 52;
+drop view v;
+drop table t;
diff --git a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc
index 4a95ea75534..dafc42098dd 100644
--- a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc
+++ b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc
@@ -1204,8 +1204,14 @@ let $rows = 1;
--source suite/vcol/inc/vcol_supported_sql_funcs.inc
--echo # TIME_FORMAT()
-let $cols = a datetime, b varchar(10) as (time_format(a,"%d.%m.%Y"));
-let $values1 = '2001-01-01 02:02:02',default;
+let $cols = a datetime, b varchar(10) as (time_format(a,"%H.%i.%S"));
+let $values1 = '2001-01-01 02:03:04',default;
+let $rows = 1;
+--source suite/vcol/inc/vcol_supported_sql_funcs.inc
+
+--echo # TIME_FORMAT() STORED
+let $cols = a datetime, b varchar(10) as (time_format(a,"%H.%i.%S")) STORED;
+let $values1 = '2001-01-01 02:03:04',default;
let $rows = 1;
--source suite/vcol/inc/vcol_supported_sql_funcs.inc
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index ca2f73db5ca..487990b61cd 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -1852,6 +1852,7 @@ create table t1 (a int, b int, check(a>b));
alter table t1 drop column a;
--error ER_BAD_FIELD_ERROR
alter table t1 drop column b, add column b bigint first;
+alter table t1 drop column a, drop constraint constraint_1;
show create table t1;
drop table t1;
@@ -1873,5 +1874,6 @@ drop table t1;
create table t1 (a int, b int, c int, unique(a,b));
--error ER_KEY_COLUMN_DOES_NOT_EXITS
alter table t1 drop column a;
+alter table t1 drop column a, drop index a;
show create table t1;
drop table t1;
diff --git a/mysql-test/t/check_constraint.test b/mysql-test/t/check_constraint.test
index 43b4417cfa3..f72ce38087e 100644
--- a/mysql-test/t/check_constraint.test
+++ b/mysql-test/t/check_constraint.test
@@ -92,3 +92,14 @@ create or replace table t1( c1 int auto_increment primary key, check( c1 > 0 or
create table t1 (a int check (@b in (select user from mysql.user)));
--error ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED
create table t1 (a int check (a > @b));
+
+#
+# MDEV-13596 CHECK constraints disallow NULL to pass through, violating SQL
+#
+create table t1 (a int check (a = 1));
+insert t1 values (1);
+--error ER_CONSTRAINT_FAILED
+insert t1 values (2);
+insert t1 values (NULL);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/count_distinct.test b/mysql-test/t/count_distinct.test
index a00574b6cba..86045e862e7 100644
--- a/mysql-test/t/count_distinct.test
+++ b/mysql-test/t/count_distinct.test
@@ -121,5 +121,34 @@ drop table t1;
set @@tmp_table_size = default;
#
+# MDEV-13457: Wrong result for aggregate function with distinct clause when the value for
+# tmp_table_size is small
+#
+
+create table t1 (
+a VARCHAR(1020),
+b int
+);
+insert into t1 values
+( 0 , 1 ),
+( 1 , 2 ),
+( 2 , 3 ),
+( 3 , 4 ),
+( 4 , 5 ),
+( 5 , 6 ),
+( 6 , 7 ),
+( 7 , 8 ),
+( 8 , 9 ),
+( 9 , 10 ),
+( 0 , 11 ),
+( 1 , 12 ),
+( 2 , 13 ),
+( 3 , 14 );
+set @@tmp_table_size=1024;
+select count(distinct a) from t1;
+drop table t1;
+set @@tmp_table_size = default;
+
+#
# End of 5.5 tests
#
diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test
index 972543aefc2..3bf6fabbf6d 100644
--- a/mysql-test/t/date_formats.test
+++ b/mysql-test/t/date_formats.test
@@ -366,3 +366,12 @@ SET NAMES latin1;
--echo #
--echo # End of 5.1 tests
--echo #
+
+#
+# TIME_FORMAT and non-time format specifiers
+#
+select time_format('2001-01-01 02:02:02', '%d.%m.%Y');
+select time_format('2001-01-01 02:02:02', '%d %T');
+select time_format('01 02:02:02', '%d %T');
+select time_format('01 02:02:02', '%T');
+select time_format('2001-01-01 02:02:02', '%T');
diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test
index ee70102c496..83e345ec890 100644
--- a/mysql-test/t/func_math.test
+++ b/mysql-test/t/func_math.test
@@ -609,6 +609,10 @@ select 0=0, 0=-0, 0.0= -0.0, 0.0 = -(0.0), 0.0E1=-0.0E1, 0.0E1=-(0.0E1);
select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678');
+#
+# MDEV-13673 Bad result in view
+#
+explain extended select (3-2)+1, (3/2)*1, 3-(2+1), 3/(2*1);
--echo #
--echo # Start of 10.3 tests
diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test
index c661819424a..ac983048129 100644
--- a/mysql-test/t/func_misc.test
+++ b/mysql-test/t/func_misc.test
@@ -1092,6 +1092,17 @@ select release_lock('test');
--echo # -- Done.
--echo
+
+--echo #
+--echo # MDEV-13685 Can not replay binary log due to Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8mb4_general_ci,COERCIBLE) for operation 'concat'
+--echo #
+SET NAMES utf8;
+SELECT COERCIBILITY(NAME_CONST('name','test'));
+SELECT COERCIBILITY(NAME_CONST('name',TIME'00:00:00'));
+SELECT COERCIBILITY(NAME_CONST('name',15));
+SELECT CONCAT(NAME_CONST('name',15),'오');
+SET NAMES latin1;
+
--echo #
--echo # Start of 10.2 tests
--echo #
diff --git a/mysql-test/t/insert.test b/mysql-test/t/insert.test
index 206c5553100..80caefa5a0f 100644
--- a/mysql-test/t/insert.test
+++ b/mysql-test/t/insert.test
@@ -573,3 +573,32 @@ insert ignore into t1 values (1,12) on duplicate key update f2=13;
set @@old_mode="";
insert ignore into t1 values (1,12);
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-13290 Assertion Assertion `!is_set() || (m_status == DA_OK_BULK
+--echo # && is_bulk_op())' or `! is_set()' failed
+--echo #
+
+SET @save_mode= @@sql_mode;
+SET sql_mode= 'STRICT_ALL_TABLES';
+CREATE TABLE t1 (f1 INT DEFAULT 0, f2 INT);
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = 'x' WITH CHECK OPTION;
+--error ER_TRUNCATED_WRONG_VALUE
+REPLACE INTO v1 SET f2 = 1;
+SELECT * from t1;
+drop view v1;
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = cast('' as decimal) WITH CHECK OPTION;
+--error ER_TRUNCATED_WRONG_VALUE
+REPLACE INTO v1 SET f2 = 1;
+SELECT * from t1;
+drop view v1;
+SELECT 0,0 INTO OUTFILE 't1.txt';
+CREATE ALGORITHM = MERGE VIEW v1 AS SELECT f1, f2 FROM t1 WHERE f1 = 'x' WITH CHECK OPTION;
+--error ER_TRUNCATED_WRONG_VALUE
+LOAD DATA INFILE 't1.txt' INTO TABLE v1;
+SELECT * from t1;
+let $MYSQLD_DATADIR= `select @@datadir`;
+remove_file $MYSQLD_DATADIR/test/t1.txt;
+drop view v1;
+drop table t1;
+SET @@sql_mode= @save_mode;
diff --git a/mysql-test/t/locale.test b/mysql-test/t/locale.test
index b1fbc40f0c8..4570b968ecf 100644
--- a/mysql-test/t/locale.test
+++ b/mysql-test/t/locale.test
@@ -134,7 +134,22 @@ SELECT * FROM non_existent;
SET lc_time_names=@old_50915_lc_time_names;
+--echo #
+--echo # End of 10.0 tests
+--echo #
+
+--echo #
+--echo # End of 10.1 tests
+--echo #
+
+# Item::print
+create view v1 as select
+ format(123456789,2) as b,
+ format(123456789,2,'rm_CH') as b1;
+select * from v1;
+show create view v1;
+drop view v1;
--echo #
---echo # End of 5.6 tests
+--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/t/log_tables-big.test b/mysql-test/t/log_tables-big.test
index 8936a163d73..fa8810ecd3b 100644
--- a/mysql-test/t/log_tables-big.test
+++ b/mysql-test/t/log_tables-big.test
@@ -7,6 +7,7 @@
# check that CSV engine was compiled in
--source include/have_csv.inc
+set @log_output.saved = @@global.log_output;
set @@global.log_output = 'TABLE';
connect (con1,localhost,root,,);
@@ -21,13 +22,13 @@ select get_lock('bug27638', 1);
connection con2;
set session long_query_time=1;
select get_lock('bug27638', 2);
-select if (query_time >= '00:00:01', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:00:01', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 2)';
select get_lock('bug27638', 60);
-select if (query_time >= '00:00:59', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:00:59', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 60)';
select get_lock('bug27638', 101);
-select if (query_time >= '00:01:40', 'OK', 'WRONG') as qt, sql_text from mysql.slow_log
+select if (query_time >= '00:01:40', 'OK', concat('WRONG: ',query_time)) as qt, sql_text from mysql.slow_log
where sql_text = 'select get_lock(\'bug27638\', 101)';
connection con1;
select release_lock('bug27638');
@@ -36,4 +37,4 @@ connection default;
disconnect con1;
disconnect con2;
-set @@global.log_output=default;
+set @@global.log_output = @log_output.saved;
diff --git a/mysql-test/t/partition_symlink.test b/mysql-test/t/partition_symlink.test
index f2e3eba5de6..8f6e837299a 100644
--- a/mysql-test/t/partition_symlink.test
+++ b/mysql-test/t/partition_symlink.test
@@ -38,11 +38,10 @@ SHOW CREATE TABLE t1;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
SHOW CREATE TABLE t2;
INSERT INTO t1 VALUES (0), (1), (2);
---error ER_TABLES_DIFFERENT_METADATA
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
--error ER_TABLES_DIFFERENT_METADATA
ALTER TABLE t1 EXCHANGE PARTITION p2 WITH TABLE t2;
---sorted_result
+SELECT * FROM t1;
SELECT * FROM t2;
DROP TABLE t1, t2;
# skipped because of bug#52354
diff --git a/mysql-test/t/range_interrupted-13751.test b/mysql-test/t/range_interrupted-13751.test
new file mode 100644
index 00000000000..000a46383e8
--- /dev/null
+++ b/mysql-test/t/range_interrupted-13751.test
@@ -0,0 +1,25 @@
+source include/have_debug.inc;
+#
+# MDEV-13751 Interrupted SELECT fails with 1030: 'Got error 1 "Operation not permitted" from storage engine MyISAM'
+#
+CREATE TABLE t1 (i INT AUTO_INCREMENT, c VARCHAR(1), KEY(i), KEY(c,i)) ENGINE=MyISAM;
+INSERT INTO t1 (c) VALUES ('a'),('b'),('c'),('d');
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) SELECT c FROM t1;
+
+set @old_dbug=@@session.debug_dbug;
+set debug_dbug="+d,kill_join_init_read_record";
+
+--error ER_QUERY_INTERRUPTED
+SELECT 1 FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
+WHERE alias1.c = alias2.c OR alias1.i <= 1
+;
+
+set debug_dbug=@old_dbug;
+
+DROP TABLE t1;
+
diff --git a/mysql-test/t/symlink.test b/mysql-test/t/symlink.test
index e17ea07ca3c..cf95d4cb938 100644
--- a/mysql-test/t/symlink.test
+++ b/mysql-test/t/symlink.test
@@ -316,3 +316,28 @@ eval CREATE TABLE test.t1(id INT(11)) ENGINE MYISAM
DATA DIRECTORY "$MYSQLTEST_VARDIR/tmp";
DROP TABLE test.t1;
+use test;
+
+#
+# End of 5.5 tests
+#
+
+#
+# End of 10.0 tests
+#
+
+#
+# MDEV-13636 ALTER TABLE ... DELAY_KEY_WRITE=1 creates table copy for MyISAM table with DATA DIRECTORY/INDEX DIRECTORY options
+#
+replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR;
+eval
+create table t1(c1 int, c2 int, c3 varchar(100)) engine=MyISAM data directory='$MYSQL_TMP_DIR' index directory = '$MYSQL_TMP_DIR';
+insert t1 values (1,2,3), (2,3,4), (3,4,5), (4,5,6), (5,6,7), (6,7,8), (7,8,9);
+alter online table t1 delay_key_write=1;
+replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR;
+show create table t1;
+drop table t1;
+
+#
+# End of 10.1 tests
+#
diff --git a/mysql-test/t/win.test b/mysql-test/t/win.test
index c2022499acf..c353cd8b599 100644
--- a/mysql-test/t/win.test
+++ b/mysql-test/t/win.test
@@ -1966,10 +1966,6 @@ SELECT i AS fld FROM t1 UNION SELECT COUNT(*) AS fld FROM t1;
DROP TABLE t1;
--echo #
---echo # Start of 10.3 tests
---echo #
-
---echo #
--echo # MDEV-13240 Wrong warning with MAX(datetime_field) OVER (...)
--echo #
@@ -1977,3 +1973,70 @@ CREATE TABLE t1 (dt DATETIME);
INSERT INTO t1 VALUES ('2017-05-17');
SELECT MAX(dt) OVER (ORDER BY dt ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) FROM t1;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-13358 FIRST_VALUE throws SQL Error (1292): Incorrect datetime value
+--echo #
+CREATE TABLE IF NOT EXISTS `fv_test` (
+ `SOME_DATE` datetime NOT NULL
+ );
+
+INSERT INTO `fv_test` (`SOME_DATE`) VALUES ('2017-07-20 12:47:56');
+
+CREATE TABLE fv_result
+SELECT
+FIRST_VALUE(SOME_DATE) OVER(ORDER BY SOME_DATE DESC) AS somedate
+FROM fv_test;
+
+SHOW CREATE TABLE fv_result;
+
+SELECT * FROM fv_result;
+
+DROP TABLE fv_test, fv_result;
+
+--echo #
+--echo # MDEV-13649: Server crashes in set_field_to_null_with_conversions or in Field::set_notnull
+--echo #
+
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (0),(1),(2);
+SELECT LEAD(a) OVER (PARTITION BY a) as lead,
+ a AND LEAD(a) OVER (PARTITION BY a) AS a_and_lead_part
+FROM t1;
+
+SELECT a OR LEAD(a) OVER (ORDER BY a) AS a_or_lead_order
+FROM t1
+ORDER BY a;
+
+SELECT a AND LEAD(a) OVER (ORDER BY a) AS a_and_lead_order
+FROM t1
+ORDER BY a;
+
+SELECT a XOR LEAD(a) OVER (ORDER BY a) AS a_xor_lead_order
+FROM t1
+ORDER BY a;
+
+SELECT NOT LEAD(a) OVER (ORDER BY a) AS not_lead_order
+FROM t1
+ORDER BY a;
+
+SELECT LEAD(a) OVER (ORDER BY a) is not null AS is_not_null_lead_order
+FROM t1
+ORDER BY a;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-13354: Server crashes in find_field_in_tables upon PS with window function and subquery
+--echo #
+
+CREATE TABLE t1 (i INT, a char);
+INSERT INTO t1 VALUES (1, 'a'),(2, 'b');
+PREPARE stmt FROM "SELECT row_number() over (partition by i order by i), i FROM (SELECT * from t1) as sq";
+EXECUTE stmt;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Start of 10.3 tests
+--echo #
diff --git a/mysql-test/t/win_as_arg_to_aggregate_func.test b/mysql-test/t/win_as_arg_to_aggregate_func.test
new file mode 100644
index 00000000000..93c9238bedf
--- /dev/null
+++ b/mysql-test/t/win_as_arg_to_aggregate_func.test
@@ -0,0 +1,139 @@
+create table t1 (i int);
+insert into t1 values (5),(6),(0);
+
+--echo #
+--echo # Try out all set functions with window functions as arguments.
+--echo # Any such usage should return an error.
+--echo #
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select MIN( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select MIN(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select MAX( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select MAX(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select SUM( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select SUM(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select AVG( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select AVG(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select COUNT( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select COUNT(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select BIT_AND( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select BIT_OR( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select BIT_XOR( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select STD( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select STDDEV( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select STDDEV_POP( SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select STDDEV_SAMP(SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select VARIANCE(SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select VAR_POP(SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select VAR_SAMP(SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select GROUP_CONCAT(SUM(i) OVER (order by i) )
+from t1;
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select GROUP_CONCAT(DISTINCT SUM(i) OVER (order by i) )
+from t1;
+
+--echo #
+--echo # Test that partition instead of order by in over doesn't change result.
+--echo #
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select SUM( SUM(i) OVER (PARTITION BY i) )
+from t1;
+
+--echo #
+--echo # Test that no arguments in OVER() clause lead to crash in this case.
+--echo #
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select SUM( SUM(i) OVER () )
+from t1;
+drop table t1;
+
+-- echo #
+-- echo # MDEV-13774: Server Crash on Execuate of SQL Statement
+-- echo #
+create table t1 (i int);
+insert into t1 values (5),(6),(0);
+
+--error ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+select SUM(
+ IF( SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0
+ AND
+ SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,
+ 1,
+ 0) )
+from t1;
+
+--echo #
+--echo # A way to get the aggregation result.
+--echo #
+
+select i, IF(SUM(IF(i,1,0)) OVER (PARTITION BY i) > 0 AND SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,1,0) AS if_col
+from t1
+order by i;
+
+select sum(if_col)
+from (select IF(SUM(IF(i,1,0)) OVER (PARTITION BY i) > 0 AND SUM( IF(i,1,0)) OVER (PARTITION BY i) > 0,1,0) AS if_col
+ from t1) tmp;
+drop table t1;
diff --git a/mysql-test/t/win_insert_select.test b/mysql-test/t/win_insert_select.test
index a9e7e8f322f..6b2e0da4175 100644
--- a/mysql-test/t/win_insert_select.test
+++ b/mysql-test/t/win_insert_select.test
@@ -3,12 +3,12 @@ CREATE TABLE t1 (c1 INT, c2 VARCHAR(30));
PREPARE populate_table FROM "INSERT into t1 values (1, 'manual_insert_1'),
(4, 'manual_insert_2')";
-INSERT INTO t1 SELECT row_number() over(), "should_have_NULL" FROM t1;
-INSERT INTO t1 SELECT 1 + row_number() over(), "should_have_2" FROM t1;
+INSERT INTO t1 SELECT row_number() over(), "should_not_add_any_rows" FROM t1;
+INSERT INTO t1 SELECT 1 + row_number() over(), "should_not_add_any_rows" FROM t1;
EXECUTE populate_table;
-INSERT INTO t1 SELECT 10 + row_number() over(), "should repeat 4 times [11-14]" FROM t1;
+INSERT INTO t1 SELECT 10 + row_number() over(), "should repeat 2 times [11-12]" FROM t1;
SELECT c1, c2 FROM t1 ORDER BY c2, c1;
diff --git a/mysys_ssl/my_crypt.cc b/mysys_ssl/my_crypt.cc
index 6ad9171bfbc..92f4012d175 100644
--- a/mysys_ssl/my_crypt.cc
+++ b/mysys_ssl/my_crypt.cc
@@ -43,7 +43,7 @@ public:
}
virtual ~MyCTX()
{
- EVP_CIPHER_CTX_cleanup(ctx);
+ EVP_CIPHER_CTX_reset(ctx);
ERR_remove_state(0);
}
diff --git a/mysys_ssl/my_md5.cc b/mysys_ssl/my_md5.cc
index 0105082b7e1..582c83d0522 100644
--- a/mysys_ssl/my_md5.cc
+++ b/mysys_ssl/my_md5.cc
@@ -71,7 +71,7 @@ static void md5_input(EVP_MD_CTX *context, const uchar *buf, unsigned len)
static void md5_result(EVP_MD_CTX *context, uchar digest[MD5_HASH_SIZE])
{
EVP_DigestFinal_ex(context, digest, NULL);
- EVP_MD_CTX_cleanup(context);
+ EVP_MD_CTX_reset(context);
}
#endif /* HAVE_YASSL */
diff --git a/mysys_ssl/openssl.c b/mysys_ssl/openssl.c
index 34f4d629ba6..3f841eec92f 100644
--- a/mysys_ssl/openssl.c
+++ b/mysys_ssl/openssl.c
@@ -50,7 +50,7 @@ int check_openssl_compatibility()
EVP_CIPHER_CTX *evp_ctx;
EVP_MD_CTX *md5_ctx;
- if (!CRYPTO_set_mem_functions(coc_malloc, CRYPTO_realloc, CRYPTO_free))
+ if (!CRYPTO_set_mem_functions(coc_malloc, NULL, NULL))
return 1;
testing= 1;
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index af6d2d95a42..2f6c8a8afda 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -15,7 +15,7 @@
#define PLUGIN_VERSION 0x104
-#define PLUGIN_STR_VERSION "1.4.1"
+#define PLUGIN_STR_VERSION "1.4.2"
#define _my_thread_var loc_thread_var
@@ -1087,6 +1087,7 @@ static void setup_connection_connect(struct connection_info *cn,
const struct mysql_event_connection *event)
{
cn->query_id= 0;
+ cn->query_length= 0;
cn->log_always= 0;
cn->thread_id= event->thread_id;
get_str_n(cn->db, &cn->db_length, sizeof(cn->db),
@@ -1128,6 +1129,7 @@ static void setup_connection_initdb(struct connection_info *cn,
cn->thread_id= event->general_thread_id;
cn->query_id= 0;
+ cn->query_length= 0;
cn->log_always= 0;
get_str_n(cn->db, &cn->db_length, sizeof(cn->db),
event->general_query, event->general_query_length);
@@ -1160,6 +1162,7 @@ static void setup_connection_table(struct connection_info *cn,
cn->thread_id= event->thread_id;
cn->query_id= query_counter++;
cn->log_always= 0;
+ cn->query_length= 0;
get_str_n(cn->db, &cn->db_length, sizeof(cn->db),
event->database, event->database_length);
get_str_n(cn->user, &cn->user_length, sizeof(cn->db),
@@ -1181,6 +1184,7 @@ static void setup_connection_query(struct connection_info *cn,
cn->thread_id= event->general_thread_id;
cn->query_id= query_counter++;
cn->log_always= 0;
+ cn->query_length= 0;
get_str_n(cn->db, &cn->db_length, sizeof(cn->db), "", 0);
if (get_user_host(event->general_user, event->general_user_length,
@@ -2005,6 +2009,7 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
event_query_command(event))
{
log_statement(cn, event, "QUERY");
+ cn->query_length= 0; /* So the log_current_query() won't log this again. */
}
}
else if (event_class == MYSQL_AUDIT_TABLE_CLASS && FILTER(EVENT_TABLE) && cn)
@@ -2520,7 +2525,8 @@ static void log_current_query(MYSQL_THD thd)
if (!thd)
return;
cn= get_loc_info(thd);
- if (!ci_needs_setup(cn) && FILTER(EVENT_QUERY) && do_log_user(cn->user))
+ if (!ci_needs_setup(cn) && cn->query_length &&
+ FILTER(EVENT_QUERY) && do_log_user(cn->user))
{
log_statement_ex(cn, cn->query_time, thd_get_thread_id(thd),
cn->query, cn->query_length, 0, "QUERY");
diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh
index 7acdf87b660..24720ff3587 100644
--- a/scripts/wsrep_sst_rsync.sh
+++ b/scripts/wsrep_sst_rsync.sh
@@ -68,7 +68,7 @@ check_pid_and_port()
local port_info="$(sockstat -46lp ${rsync_port} 2>/dev/null | \
grep ":${rsync_port}")"
local is_rsync="$(echo $port_info | \
- grep -w '[[:space:]]\+rsync[[:space:]]\+'"$rsync_pid" 2>/dev/null)"
+ grep '[[:space:]]\+rsync[[:space:]]\+'"$rsync_pid" 2>/dev/null)"
;;
*)
if ! which lsof > /dev/null; then
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 9522d589945..0940b394d54 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -50,24 +50,13 @@ ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.h
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.h
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc
-${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
-)
-SET(GEN_DIGEST_SOURCES
- ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
+${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
+${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
)
SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES}
- ${GEN_DIGEST_SOURCES}
PROPERTIES GENERATED 1)
-# Gen_lex_token
-# Make sure sql_yacc.h is generated before compiling gen_lex_token
-
-IF(NOT CMAKE_GENERATOR MATCHES "Visual Studio")
- SET(DEPENDS_gen_lex_token DEPENDS gen_lex_token)
- SET(DEPENDS_gen_lex_hash DEPENDS gen_lex_hash)
-ENDIF()
-
IF(NOT CMAKE_CROSSCOMPILING)
ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc
@@ -77,7 +66,7 @@ ENDIF()
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
COMMAND gen_lex_token > lex_token.h
- ${DEPENDS_gen_lex_token}
+ DEPENDS gen_lex_token
)
ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER)
@@ -158,7 +147,6 @@ SET (SQL_SOURCE
proxy_protocol.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_builtin.cc
${GEN_SOURCES}
- ${GEN_DIGEST_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
@@ -181,7 +169,6 @@ RECOMPILE_FOR_EMBEDDED)
ADD_LIBRARY(sql STATIC ${SQL_SOURCE})
ADD_DEPENDENCIES(sql GenServerSource)
-ADD_DEPENDENCIES(sql GenDigestServerSource)
DTRACE_INSTRUMENT(sql)
TARGET_LINK_LIBRARIES(sql ${MYSQLD_STATIC_PLUGIN_LIBS}
mysys mysys_ssl dbug strings vio pcre
@@ -220,7 +207,7 @@ IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS)
SET(MYSQLD_DEF ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.def)
SET(MYSQLD_EXP ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.exp)
SET(MYSQLD_LIB ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.lib)
- SET(MYSQLD_CORELIBS sql mysys mysys_ssl dbug strings)
+ SET(MYSQLD_CORELIBS sql mysys dbug strings)
FOREACH (CORELIB ${MYSQLD_CORELIBS})
GET_TARGET_PROPERTY(LOC ${CORELIB} LOCATION)
FILE(TO_NATIVE_PATH ${LOC} LOC)
@@ -231,6 +218,29 @@ IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS)
IF(CMAKE_SIZEOF_VOID_P EQUAL 8)
SET(_PLATFORM x64)
ENDIF()
+ # Create a cmake script to generate import and export libs
+ # from a .def file
+ SET(CMAKE_CONFIGURABLE_FILE_CONTENT "
+ IF ((mysqld_lib.def IS_NEWER_THAN mysqld_lib.lib) OR
+ (mysqld_lib.def IS_NEWER_THAN mysqld_lib.exp))
+ FILE(REMOVE mysqld_lib.lib mysqld_lib.exp)
+ SET(ENV{VS_UNICODE_OUTPUT})
+ EXECUTE_PROCESS (
+ COMMAND \"${CMAKE_LINKER}\" /lib /NAME:mysqld.exe \"/DEF:${MYSQLD_DEF}\" /MACHINE:${_PLATFORM}
+ RESULT_VARIABLE ret)
+ IF(NOT ret EQUAL 0)
+ MESSAGE(FATAL_ERROR \"process failed ret=\${ret}\")
+ ENDIF()
+ ENDIF()
+ ")
+
+ CONFIGURE_FILE(
+ ${PROJECT_SOURCE_DIR}/cmake/configurable_file_content.in
+ make_mysqld_lib.cmake)
+
+ IF(CMAKE_VERSION VERSION_GREATER "3.2.0")
+ SET(MYSQLD_LIB_BYPRODUCTS BYPRODUCTS ${MYSQLD_DEF} ${MYSQLD_LIB} ${MYSQLD_EXP})
+ ENDIF()
# Create a cmake script to generate import and export libs
# from a .def file
@@ -252,16 +262,20 @@ IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS)
make_mysqld_lib.cmake)
ADD_CUSTOM_COMMAND(
- OUTPUT ${MYSQLD_DEF} ${MYSQLD_LIB} ${MYSQLD_EXP}
- COMMAND cscript ARGS //nologo ${PROJECT_SOURCE_DIR}/win/create_def_file.js
- ${_PLATFORM} /forLib ${LIB_LOCATIONS} > mysqld_lib.def.tmp
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.stamp
+ ${MYSQLD_LIB_BYPRODUCTS}
+ COMMENT "Generating mysqld_lib.def, mysqld_lib.lib, mysqld_lib.exp"
+ COMMAND cscript //nologo ${PROJECT_SOURCE_DIR}/win/create_def_file.js
+ ${_PLATFORM} /forLib ${LIB_LOCATIONS} > mysqld_lib.def.tmp
COMMAND ${CMAKE_COMMAND} -E copy_if_different mysqld_lib.def.tmp mysqld_lib.def
+ COMMAND ${CMAKE_COMMAND} -E remove mysqld_lib.def.tmp
COMMAND ${CMAKE_COMMAND} -P make_mysqld_lib.cmake
+ COMMAND ${CMAKE_COMMAND} -E touch mysqld_lib.stamp
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${MYSQLD_CORELIBS}
)
- ADD_CUSTOM_TARGET(gen_mysqld_lib DEPENDS ${MYSQLD_LIB})
+ ADD_CUSTOM_TARGET(gen_mysqld_lib DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.stamp)
ADD_LIBRARY(mysqld_import_lib UNKNOWN IMPORTED GLOBAL)
SET_TARGET_PROPERTIES(mysqld_import_lib PROPERTIES IMPORTED_LOCATION ${MYSQLD_LIB})
ENDIF()
@@ -280,7 +294,6 @@ IF(NOT WITHOUT_DYNAMIC_PLUGINS)
IF(NOT MSVC)
SET_TARGET_PROPERTIES(mysqld PROPERTIES ENABLE_EXPORTS TRUE)
ENDIF()
-
GET_TARGET_PROPERTY(mysqld_link_flags mysqld LINK_FLAGS)
IF(NOT mysqld_link_flags)
SET(mysqld_link_flags)
@@ -319,10 +332,6 @@ IF(WITH_MYSQLD_LDFLAGS)
SET_TARGET_PROPERTIES(mysqld PROPERTIES LINK_FLAGS
"${MYSQLD_LINK_FLAGS} ${WITH_MYSQLD_LDFLAGS}")
ENDIF()
-INSTALL_DEBUG_TARGET(mysqld
- DESTINATION ${INSTALL_SBINDIR}
- PDB_DESTINATION ${INSTALL_SBINDIR}/debug
- RENAME mysqld-debug)
INCLUDE(${CMAKE_SOURCE_DIR}/cmake/bison.cmake)
@@ -364,7 +373,7 @@ ENDIF()
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
COMMAND gen_lex_hash > lex_hash.h
- ${DEPENDS_gen_lex_hash}
+ DEPENDS gen_lex_hash
)
MYSQL_ADD_EXECUTABLE(mysql_tzinfo_to_sql tztime.cc COMPONENT Server)
@@ -376,11 +385,6 @@ ADD_CUSTOM_TARGET(
DEPENDS ${GEN_SOURCES}
)
-ADD_CUSTOM_TARGET(
- GenDigestServerSource
- DEPENDS ${GEN_DIGEST_SOURCES}
-)
-
#Need this only for embedded
SET_TARGET_PROPERTIES(GenServerSource PROPERTIES EXCLUDE_FROM_ALL TRUE)
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c8aa09c23b7..0402908c640 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -345,6 +345,10 @@ public:
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes);
+ void update_part_create_info(HA_CREATE_INFO *create_info, uint part_id)
+ {
+ m_file[part_id]->update_create_info(create_info);
+ }
private:
int copy_partitions(ulonglong * const copied, ulonglong * const deleted);
void cleanup_new_partition(uint part_count);
diff --git a/sql/handler.cc b/sql/handler.cc
index e5c15524e1d..bcb68ca0695 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1681,7 +1681,7 @@ int ha_rollback_trans(THD *thd, bool all)
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
#ifdef WITH_WSREP
- WSREP_WARN("handlerton rollback failed, thd %lu %lld conf %d SQL %s",
+ WSREP_WARN("handlerton rollback failed, thd %llu %lld conf %d SQL %s",
thd->thread_id, thd->query_id, thd->wsrep_conflict_state,
thd->query());
#endif /* WITH_WSREP */
diff --git a/sql/handler.h b/sql/handler.h
index 86287596ca3..5e0adfca072 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1890,10 +1890,6 @@ public:
// Add non-unique, non-primary index
static const HA_ALTER_FLAGS ADD_INDEX = 1ULL << 0;
- //
- // Adds a spatial index. At the moment all engines treat it
- // identically to the ADD_INDEX, so it gets the same code
- static const HA_ALTER_FLAGS ADD_SPATIAL_INDEX = ADD_INDEX;
// Drop non-unique, non-primary index
static const HA_ALTER_FLAGS DROP_INDEX = 1ULL << 1;
diff --git a/sql/item.cc b/sql/item.cc
index 58b3c82ff14..8e77a731400 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2061,7 +2061,10 @@ bool Item_name_const::fix_fields(THD *thd, Item **ref)
set_name(thd, item_name->c_ptr(), (uint) item_name->length(),
system_charset_info);
}
- collation.set(value_item->collation.collation, DERIVATION_IMPLICIT);
+ if (value_item->collation.derivation == DERIVATION_NUMERIC)
+ collation.set_numeric();
+ else
+ collation.set(value_item->collation.collation, DERIVATION_IMPLICIT);
max_length= value_item->max_length;
decimals= value_item->decimals;
fixed= 1;
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 89a7cab6ac2..d6cd57770cb 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -4540,11 +4540,11 @@ Item_cond::fix_fields(THD *thd, Item **ref)
const_item_cache= FALSE;
}
- with_sum_func= with_sum_func || item->with_sum_func;
- with_field= with_field || item->with_field;
- with_subselect|= item->has_subquery();
- if (item->maybe_null)
- maybe_null=1;
+ with_sum_func|= item->with_sum_func;
+ with_field|= item->with_field;
+ with_subselect|= item->has_subquery();
+ with_window_func|= item->with_window_func;
+ maybe_null|= item->maybe_null;
}
fix_length_and_dec();
fixed= 1;
diff --git a/sql/item_func.cc b/sql/item_func.cc
index a3b2027f2fc..2af7338b3c6 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -560,7 +560,8 @@ void Item_func::print_op(String *str, enum_query_type query_type)
str->append(func_name());
str->append(' ');
}
- args[arg_count-1]->print_parenthesised(str, query_type, precedence());
+ args[arg_count-1]->print_parenthesised(str, query_type,
+ (enum precedence)(precedence() + 1));
}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 190a927ba03..cb05516964c 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2809,20 +2809,6 @@ String *Item_func_format::val_str_ascii(String *str)
}
-void Item_func_format::print(String *str, enum_query_type query_type)
-{
- str->append(STRING_WITH_LEN("format("));
- args[0]->print(str, query_type);
- str->append(',');
- args[1]->print(str, query_type);
- if(arg_count > 2)
- {
- str->append(',');
- args[2]->print(str,query_type);
- }
- str->append(')');
-}
-
void Item_func_elt::fix_length_and_dec()
{
uint32 char_length= 0;
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index f9b6f96fb9a..685a64816be 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -92,7 +92,7 @@ public:
{
return val_str_from_val_str_ascii(str, &ascii_buf);
}
- virtual String *val_str_ascii(String *)= 0;
+ String *val_str_ascii(String *)= 0;
};
@@ -559,7 +559,7 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "trim"; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
virtual const char *mode_name() const { return "both"; }
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_trim>(thd, mem_root, this); }
@@ -943,7 +943,6 @@ public:
String *val_str_ascii(String *);
void fix_length_and_dec();
const char *func_name() const { return "format"; }
- virtual void print(String *str, enum_query_type query_type);
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_format>(thd, mem_root, this); }
};
@@ -1022,7 +1021,6 @@ public:
const char *func_name() const { return "binlog_gtid_pos"; }
bool check_vcol_func_processor(void *arg)
{
-
return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
@@ -1207,7 +1205,7 @@ public:
collation.set(&my_charset_bin);
max_length=args[0]->max_length;
}
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
const char *func_name() const { return "cast_as_binary"; }
bool need_parentheses_in_default() { return true; }
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
@@ -1351,7 +1349,7 @@ public:
}
void fix_length_and_dec();
const char *func_name() const { return "convert"; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_conv_charset>(thd, mem_root, this); }
};
@@ -1367,7 +1365,7 @@ public:
const char *func_name() const { return "collate"; }
enum precedence precedence() const { return COLLATE_PRECEDENCE; }
enum Functype functype() const { return COLLATE_FUNC; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
Item_field *field_for_view_update()
{
/* this function is transparent for view updating */
@@ -1549,8 +1547,8 @@ public:
void fix_length_and_dec();
const char *func_name() const{ return "column_create"; }
String *val_str(String *);
- virtual void print(String *str, enum_query_type query_type);
- virtual enum Functype functype() const { return DYNCOL_FUNC; }
+ void print(String *str, enum_query_type query_type);
+ enum Functype functype() const { return DYNCOL_FUNC; }
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_dyncol_create>(thd, mem_root, this); }
};
@@ -1564,7 +1562,7 @@ public:
{}
const char *func_name() const{ return "column_add"; }
String *val_str(String *);
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_dyncol_add>(thd, mem_root, this); }
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 1ad55cf9f7a..755ed7d302b 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -100,7 +100,11 @@ bool Item_sum::init_sum_func_check(THD *thd)
The method verifies whether context conditions imposed on a usage
of any set function are met for this occurrence.
- It checks whether the set function occurs in the position where it
+
+ The function first checks if we are using any window functions as
+ arguments to the set function. In that case it returns an error.
+
+ Afterwards, it checks whether the set function occurs in the position where it
can be aggregated and, when it happens to occur in argument of another
set function, the method checks that these two functions are aggregated in
different subqueries.
@@ -151,6 +155,20 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref)
curr_sel->name_visibility_map);
bool invalid= FALSE;
DBUG_ASSERT(curr_sel->name_visibility_map); // should be set already
+
+ /*
+ Window functions can not be used as arguments to sum functions.
+ Aggregation happes before window function computation, so there
+ are no values to aggregate over.
+ */
+ if (with_window_func)
+ {
+ my_message(ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG,
+ ER_THD(thd, ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG),
+ MYF(0));
+ return TRUE;
+ }
+
if (window_func_sum_expr_flag)
return false;
/*
@@ -1109,6 +1127,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref)
return TRUE;
set_if_bigger(decimals, args[i]->decimals);
with_subselect|= args[i]->with_subselect;
+ with_window_func|= args[i]->with_window_func;
}
result_field=0;
max_length=float_length(decimals);
@@ -1139,6 +1158,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
(item= args[0])->check_cols(1))
return TRUE;
with_subselect= args[0]->with_subselect;
+ with_window_func|= args[0]->with_window_func;
fix_length_and_dec();
if (!is_window_func_sum_expr())
@@ -3416,6 +3436,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
args[i]->check_cols(1))
return TRUE;
with_subselect|= args[i]->with_subselect;
+ with_window_func|= args[i]->with_window_func;
}
/* skip charset aggregation for order columns */
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index c05f3f5c7be..0657d483408 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -477,14 +477,14 @@ static bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time,
{
switch (*++ptr) {
case 'M':
- if (!l_time->month)
+ if (type == MYSQL_TIMESTAMP_TIME || !l_time->month)
return 1;
str->append(locale->month_names->type_names[l_time->month-1],
(uint) strlen(locale->month_names->type_names[l_time->month-1]),
system_charset_info);
break;
case 'b':
- if (!l_time->month)
+ if (type == MYSQL_TIMESTAMP_TIME || !l_time->month)
return 1;
str->append(locale->ab_month_names->type_names[l_time->month-1],
(uint) strlen(locale->ab_month_names->type_names[l_time->month-1]),
@@ -534,26 +534,38 @@ static bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time,
}
break;
case 'Y':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->year, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 4, '0');
break;
case 'y':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->year%100, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'm':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->month, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'c':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->month, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 1, '0');
break;
case 'd':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->day, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'e':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->day, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 1, '0');
break;
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 4cdf7b63158..a752c160557 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -188,7 +188,6 @@ public:
}
bool check_vcol_func_processor(void *arg)
{
-
return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
}
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
@@ -673,8 +672,7 @@ public:
virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
bool check_vcol_func_processor(void *arg)
{
- return mark_unsupported_function(func_name(), "()", arg,
- VCOL_TIME_FUNC);
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC);
}
};
@@ -813,6 +811,8 @@ public:
bool eq(const Item *item, bool binary_cmp) const;
bool check_vcol_func_processor(void *arg)
{
+ if (is_time_format)
+ return false;
return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
}
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 2fc5e3c27a5..e7240ad2cce 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -8215,7 +8215,7 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff,
#ifdef HAVE_YASSL
static char *
-my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
+my_asn1_time_to_string(const ASN1_TIME *time, char *buf, size_t len)
{
return yaSSL_ASN1_TIME_to_string(time, buf, len);
}
@@ -8223,7 +8223,7 @@ my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
#else /* openssl */
static char *
-my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
+my_asn1_time_to_string(const ASN1_TIME *time, char *buf, size_t len)
{
int n_read;
char *res= NULL;
@@ -8271,7 +8271,7 @@ show_ssl_get_server_not_before(THD *thd, SHOW_VAR *var, char *buff,
{
SSL *ssl= (SSL*) thd->net.vio->ssl_arg;
X509 *cert= SSL_get_certificate(ssl);
- ASN1_TIME *not_before= X509_get_notBefore(cert);
+ const ASN1_TIME *not_before= X509_get0_notBefore(cert);
var->value= my_asn1_time_to_string(not_before, buff,
SHOW_VAR_FUNC_BUFF_SIZE);
@@ -8305,7 +8305,7 @@ show_ssl_get_server_not_after(THD *thd, SHOW_VAR *var, char *buff,
{
SSL *ssl= (SSL*) thd->net.vio->ssl_arg;
X509 *cert= SSL_get_certificate(ssl);
- ASN1_TIME *not_after= X509_get_notAfter(cert);
+ const ASN1_TIME *not_after= X509_get0_notAfter(cert);
var->value= my_asn1_time_to_string(not_after, buff,
SHOW_VAR_FUNC_BUFF_SIZE);
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index d0fb0ee1772..0204121c553 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -7740,6 +7740,9 @@ ER_RDB_TTL_DURATION_FORMAT
ER_PER_INDEX_CF_DEPRECATED
eng "The per-index column family option has been deprecated"
+ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+ eng "Window functions can not be used as arguments to group functions."
+
ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
eng "Illegal parameter data types %s and %s for operation '%s'"
ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 92f28a4dc07..9cb69f2aa9a 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -5615,7 +5615,6 @@ user_var_entry *get_variable(HASH *hash, LEX_CSTRING *name,
bool create_if_not_exists);
class SORT_INFO;
-
class multi_delete :public select_result_interceptor
{
TABLE_LIST *delete_tables, *table_being_deleted;
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 2614178b622..9fc67272bfa 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -4355,10 +4355,6 @@ bool compare_partition_options(HA_CREATE_INFO *table_create_info,
option_diffs[errors++]= "MAX_ROWS";
if (part_elem->part_min_rows != table_create_info->min_rows)
option_diffs[errors++]= "MIN_ROWS";
- if (part_elem->data_file_name || table_create_info->data_file_name)
- option_diffs[errors++]= "DATA DIRECTORY";
- if (part_elem->index_file_name || table_create_info->index_file_name)
- option_diffs[errors++]= "INDEX DIRECTORY";
for (i= 0; i < errors; i++)
my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index 0512e712102..0c1c9fb02de 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -179,7 +179,8 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table)
*/
static bool compare_table_with_partition(THD *thd, TABLE *table,
TABLE *part_table,
- partition_element *part_elem)
+ partition_element *part_elem,
+ uint part_id)
{
HA_CREATE_INFO table_create_info, part_create_info;
Alter_info part_alter_info;
@@ -204,6 +205,7 @@ static bool compare_table_with_partition(THD *thd, TABLE *table,
}
/* db_type is not set in prepare_alter_table */
part_create_info.db_type= part_table->part_info->default_engine_type;
+ ((ha_partition*)(part_table->file))->update_part_create_info(&part_create_info, part_id);
/*
Since we exchange the partition with the table, allow exchanging
auto_increment value as well.
@@ -588,7 +590,8 @@ bool Sql_cmd_alter_table_exchange_partition::
DBUG_RETURN(TRUE);
}
- if (compare_table_with_partition(thd, swap_table, part_table, part_elem))
+ if (compare_table_with_partition(thd, swap_table, part_table, part_elem,
+ swap_part_id))
DBUG_RETURN(TRUE);
/* Table and partition has same structure/options, OK to exchange */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 52544130918..ba3d2e7da2f 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -19763,7 +19763,6 @@ bool test_if_use_dynamic_range_scan(JOIN_TAB *join_tab)
int join_init_read_record(JOIN_TAB *tab)
{
- int error;
/*
Note: the query plan tree for the below operations is constructed in
save_agg_explain_data.
@@ -19773,12 +19772,18 @@ int join_init_read_record(JOIN_TAB *tab)
if (tab->filesort && tab->sort_table()) // Sort table.
return 1;
- if (tab->select && tab->select->quick && (error= tab->select->quick->reset()))
+ DBUG_EXECUTE_IF("kill_join_init_read_record",
+ tab->join->thd->set_killed(KILL_QUERY););
+ if (tab->select && tab->select->quick && tab->select->quick->reset())
{
- /* Ensures error status is propageted back to client */
- report_error(tab->table, error);
+ /* Ensures error status is propagated back to client */
+ report_error(tab->table,
+ tab->join->thd->killed ? HA_ERR_QUERY_INTERRUPTED : HA_ERR_OUT_OF_MEM);
return 1;
}
+ /* make sure we won't get ER_QUERY_INTERRUPTED from any code below */
+ DBUG_EXECUTE_IF("kill_join_init_read_record",
+ tab->join->thd->reset_killed(););
if (!tab->preread_init_done && tab->preread_init())
return 1;
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 4006e3aec4d..9c746e470f7 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -7924,7 +7924,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
}
/* see if the constraint depends on *only* on dropped fields */
- if (dropped_fields)
+ if (!drop && dropped_fields)
{
table->default_column_bitmaps();
bitmap_clear_all(table->read_set);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 02a5a4a15e6..3f39765b531 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -2311,6 +2311,26 @@ int multi_update::do_updates()
do_update= 0; // Don't retry this function
if (!found)
DBUG_RETURN(0);
+
+ /*
+ Update read_set to include all fields that virtual columns may depend on.
+ Usually they're already in the read_set, but if the previous access
+ method was keyread, only the virtual column itself will be in read_set,
+ not its dependencies
+ */
+ while(TABLE *tbl= check_opt_it++)
+ {
+ if (tbl->vcol_set)
+ {
+ bitmap_clear_all(tbl->vcol_set);
+ for (Field **vf= tbl->vfield; *vf; vf++)
+ {
+ if (bitmap_is_set(tbl->read_set, (*vf)->field_index))
+ tbl->mark_virtual_col(*vf);
+ }
+ }
+ }
+
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
bool can_compare_record;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index a159fc87aa9..b74c12bb563 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -61,6 +61,7 @@
#include "sql_repl.h"
#include "opt_range.h"
#include "rpl_parallel.h"
+#include <ssl_compat.h>
/*
The rule for this file: everything should be 'static'. When a sys_var
@@ -3578,16 +3579,6 @@ static Sys_var_charptr Sys_malloc_library(
READ_ONLY GLOBAL_VAR(malloc_library), CMD_LINE_HELP_ONLY,
IN_SYSTEM_CHARSET, DEFAULT(guess_malloc_library()));
-#ifdef HAVE_YASSL
-#include <openssl/ssl.h>
-#define SSL_LIBRARY "YaSSL " YASSL_VERSION
-#elif HAVE_OPENSSL
-#include <openssl/crypto.h>
-#define SSL_LIBRARY SSLeay_version(SSLEAY_VERSION)
-#else
-#error No SSL?
-#endif
-
static char *ssl_library;
static Sys_var_charptr Sys_ssl_library(
"version_ssl_library", "Version of the used SSL library",
diff --git a/sql/table.cc b/sql/table.cc
index 1ef3af422be..eb9e848d549 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -5078,7 +5078,11 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
name_db, name_table);
return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
}
- return table->verify_constraints(ignore_failure);
+ int result= table->verify_constraints(ignore_failure);
+ /* We check thd->error() because it can be set by conversion problem. */
+ if (thd->is_error())
+ return(VIEW_CHECK_ERROR);
+ return result;
}
@@ -5090,7 +5094,8 @@ int TABLE::verify_constraints(bool ignore_failure)
{
for (Virtual_column_info **chk= check_constraints ; *chk ; chk++)
{
- if ((*chk)->expr->val_int() == 0)
+ /* yes! NULL is ok, see 4.23.3.4 Table check constraints, part 2, SQL:2016 */
+ if ((*chk)->expr->val_int() == 0 && !(*chk)->expr->null_value)
{
my_error(ER_CONSTRAINT_FAILED,
MYF(ignore_failure ? ME_JUST_WARNING : 0), (*chk)->name.str,
@@ -5099,7 +5104,7 @@ int TABLE::verify_constraints(bool ignore_failure)
}
}
}
- return VIEW_CHECK_OK;
+ return(VIEW_CHECK_OK);
}
@@ -5766,9 +5771,10 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
{
DBUG_RETURN(field);
}
+ Name_resolution_context *context= view->view ? &view->view->select_lex.context :
+ &thd->lex->select_lex.context;
Item *item= (new (thd->mem_root)
- Item_direct_view_ref(thd, &view->view->select_lex.context,
- field_ref, view->alias,
+ Item_direct_view_ref(thd, context, field_ref, view->alias,
name, view));
/*
Force creation of nullable item for the result tmp table for outer joined
diff --git a/sql/uniques.cc b/sql/uniques.cc
index fc6437a2f64..e7dc40a5a5f 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -101,6 +101,9 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
*/
max_elements= (ulong) (max_in_memory_size /
ALIGN_SIZE(sizeof(TREE_ELEMENT)+size));
+ if (!max_elements)
+ max_elements= 1;
+
(void) open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
MYF(MY_WME));
}
@@ -646,11 +649,12 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0))
return 1;
/*
- merge_buffer must fit at least MERGEBUFF2 keys, because
- merge_index() can merge that many BUFFPEKs at once.
+ merge_buffer must fit at least MERGEBUFF2 + 1 keys, because
+ merge_index() can merge that many BUFFPEKs at once. The extra space for one key
+ is needed when a piece of merge buffer is re-read, see merge_walk()
*/
- size_t buff_sz= MY_MAX(MERGEBUFF2, max_in_memory_size/full_size+1) * full_size;
- if (!(merge_buffer = (uchar *)my_malloc(buff_sz, MYF(MY_THREAD_SPECIFIC|MY_WME))))
+ size_t buff_sz= MY_MAX(MERGEBUFF2+1, max_in_memory_size/full_size+1) * full_size;
+ if (!(merge_buffer = (uchar *)my_malloc(buff_sz, MYF(MY_WME))))
return 1;
if (buff_sz < full_size * (file_ptrs.elements + 1UL))
res= merge(table, merge_buffer, buff_sz >= full_size * MERGEBUFF2) ;
@@ -708,8 +712,8 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
full_size;
sort_param.min_dupl_count= min_dupl_count;
sort_param.res_length= 0;
- sort_param.max_keys_per_buffer=
- (uint) (max_in_memory_size / sort_param.sort_length);
+ sort_param.max_keys_per_buffer=
+ (uint) MY_MAX((max_in_memory_size / sort_param.sort_length), MERGEBUFF2);
sort_param.not_killable= 1;
sort_param.unique_buff= buff +(sort_param.max_keys_per_buffer *
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index 35bf1b251e7..9c00f9fdaf6 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -433,7 +433,7 @@ static int generate_binlog_opt_val(char** ret)
{
assert(opt_bin_logname);
*ret= strcmp(opt_bin_logname, "0") ?
- my_strdup(opt_bin_logname, MYF(0)) : my_strdup("", MYF(0));
+ my_strdup(opt_bin_logname, MYF(0)) : my_strdup("", MYF(0));
}
else
{
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index 2fabd12dcd0..a1594f8797c 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -245,7 +245,7 @@ int main() {
ENDIF(CONNECT_WITH_ODBC)
#
-# JDBC and MongoDB Java Driver
+# JDBC with MongoDB Java Driver included but disabled
#
OPTION(CONNECT_WITH_MONGO "Compile CONNECT storage engine with MONGO support" ON)
OPTION(CONNECT_WITH_JDBC "Compile CONNECT storage engine with JDBC support" ON)
@@ -265,13 +265,13 @@ IF(CONNECT_WITH_JDBC)
MysqlInterface.java OracleInterface.java PostgresqlInterface.java
JavaWrappers.jar)
add_definitions(-DJDBC_SUPPORT)
- IF(CONNECT_WITH_MONGO)
+ IF(CONNECT_WITH_MONGO)
SET(CONNECT_SOURCES ${CONNECT_SOURCES}
jmgfam.cpp jmgoconn.cpp mongo.cpp tabjmg.cpp
jmgfam.h jmgoconn.h mongo.h tabjmg.h
Mongo2Interface.java Mongo3Interface.java)
- add_definitions(-DMONGO_SUPPORT)
- ENDIF()
+ add_definitions(-DMONGO_SUPPORT -DMONGO_ENABLED=0)
+ ENDIF()
ELSE()
SET(JDBC_LIBRARY "")
ENDIF()
@@ -290,33 +290,36 @@ IF(CONNECT_WITH_ZIP)
ENDIF(CONNECT_WITH_ZIP)
#
-# MONGO C Driver (CMAKE NOT YET WORKING)
+# MONGO C Driver
#
-#IF(CONNECT_WITH_MONGO)
-# IF(WIN32)
-# # Adding some typical places to search in
-# SET(PC_MONGO_INCLUDE_DIRS
-# C:/mongo-c-driver/include
-# D:/mongo-c-driver/include)
-# SET(PC_MONGO_LIBRARY_DIRS
-# C:/mongo-c-driver/lib
-# D:/mongo-c-driver/lib)
-# ENDIF(WIN32)
-# FIND_PACKAGE(libmongoc 1.7)
-# IF (MONGO_FOUND)
-# INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR})
-# SET(MONGO_LIBRARY ${MONGO_LIBRARIES})
-# SET(CONNECT_SOURCES ${CONNECT_SOURCES}
-# cmgoconn.cpp cmgfam.cpp tabcmg.cpp
-# cmgoconn.h cmgfam.h tabcmg.h)
-# add_definitions(-DCMGO_SUPPORT)
-# IF (NOT JAVA_FOUND AND JNI_FOUND)
-# SET(CONNECT_SOURCES ${CONNECT_SOURCES} mongo.cpp mongo.h)
-# add_definitions(-DMONGO_SUPPORT)
-# ENDIF (NOT JAVA_FOUND AND JNI_FOUND)
-# ENDIF(MONGO_FOUND)
-#ENDIF(CONNECT_WITH_MONGO)
+IF(CONNECT_WITH_MONGO)
+ IF(WIN32)
+ # Adding some typical places to search in
+ SET(PC_MONGO_INCLUDE_DIRS
+ C:/mongo-c-driver/include
+ D:/mongo-c-driver/include)
+ SET(PC_MONGO_LIBRARY_DIRS
+ C:/mongo-c-driver/lib
+ D:/mongo-c-driver/lib)
+ ENDIF(WIN32)
+ FIND_PACKAGE(libmongoc-1.0 1.7)
+ IF (libmongoc-1.0_FOUND)
+ INCLUDE_DIRECTORIES(${MONGOC_INCLUDE_DIRS})
+ SET(MONGOC_LIBRARY ${MONGOC_LIBRARIES})
+ SET(CONNECT_SOURCES ${CONNECT_SOURCES}
+ cmgoconn.cpp cmgfam.cpp tabcmg.cpp
+ cmgoconn.h cmgfam.h tabcmg.h)
+ add_definitions(-DCMGO_SUPPORT)
+ IF (NOT JAVA_FOUND AND JNI_FOUND)
+ SET(CONNECT_SOURCES ${CONNECT_SOURCES} mongo.cpp mongo.h)
+ add_definitions(-DMONGO_SUPPORT -DMONGO_ENABLED=1)
+ ELSE ()
+ remove_definitions(-DMONGO_ENABLED=0)
+ add_definitions(-DMONGO_ENABLED=1)
+ ENDIF (NOT JAVA_FOUND AND JNI_FOUND)
+ ENDIF(libmongoc-1.0_FOUND)
+ENDIF(CONNECT_WITH_MONGO)
#
@@ -337,9 +340,25 @@ MYSQL_ADD_PLUGIN(connect ${CONNECT_SOURCES}
STORAGE_ENGINE
COMPONENT connect-engine
RECOMPILE_FOR_EMBEDDED
-# LINK_LIBRARIES ${ZLIB_LIBRARY} ${XML_LIBRARY} ${ICONV_LIBRARY} $(MONGO_LIBRARY)
LINK_LIBRARIES ${ZLIB_LIBRARY} ${XML_LIBRARY} ${ICONV_LIBRARY}
- ${ODBC_LIBRARY} ${JDBC_LIBRARY} ${IPHLPAPI_LIBRARY})
+ ${ODBC_LIBRARY} ${JDBC_LIBRARY} ${MONGOC_LIBRARY} ${IPHLPAPI_LIBRARY})
+
+IF(NOT TARGET connect)
+ RETURN()
+ENDIF()
+
+# Install some extra files that belong to connect engine
+IF(WIN32)
+ # install ha_connect.lib
+ GET_TARGET_PROPERTY(CONNECT_LOCATION connect LOCATION)
+ STRING(REPLACE "dll" "lib" CONNECT_LIB ${CONNECT_LOCATION})
+ IF(CMAKE_CONFIGURATION_TYPES)
+ STRING(REPLACE "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}"
+ CONNECT_LIB ${CONNECT_LIB})
+ ENDIF()
+ INSTALL(FILES ${CONNECT_LIB}
+ DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine)
+ENDIF(WIN32)
IF(NOT TARGET connect)
RETURN()
@@ -368,4 +387,3 @@ IF(CONNECT_WITH_JDBC AND JAVA_FOUND AND JNI_FOUND)
${CMAKE_CURRENT_BINARY_DIR}/JdbcInterface.jar
DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine)
ENDIF()
-
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 4877f147e66..39506eae533 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -172,9 +172,9 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.06.0001 April 17, 2017";
+ char version[]= "Version 1.06.0004 September 03, 2017";
#if defined(__WIN__)
- char compver[]= "Version 1.06.0001 " __DATE__ " " __TIME__;
+ char compver[]= "Version 1.06.0004 " __DATE__ " " __TIME__;
char slash= '\\';
#else // !__WIN__
char slash= '/';
@@ -360,6 +360,13 @@ static MYSQL_THDVAR_STR(java_wrapper,
NULL, NULL, "wrappers/JdbcInterface");
#endif // JDBC_SUPPORT
+#if defined(MONGO_SUPPORT)
+// Enabling MONGO table type
+static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG,
+ "Enabling the MongoDB access",
+ NULL, NULL, MONGO_ENABLED);
+#endif // MONGO_SUPPORT
+
#if defined(XMSG) || defined(NEWMSG)
const char *language_names[]=
{
@@ -420,6 +427,10 @@ char *GetJavaWrapper(void)
{return connect_hton ? THDVAR(current_thd, java_wrapper) : (char*)"wrappers/JdbcInterface";}
#endif // JDBC_SUPPORT
+#if defined(MONGO_SUPPORT)
+bool MongoEnabled(void) { return THDVAR(current_thd, enable_mongo); }
+#endif // MONGO_SUPPORT
+
extern "C" const char *msglang(void)
{
#if defined(FRENCH)
@@ -1286,9 +1297,14 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef)
else
opval= GetListOption(xp->g, opname, options->oplist);
- } else if (!stricmp(opname, "Query_String"))
- opval= thd_query_string(table->in_use)->str;
- else if (!stricmp(opname, "Partname"))
+ } else if (!stricmp(opname, "Query_String")) {
+// This escapes everything and returns a wrong query
+// opval = thd_query_string(table->in_use)->str;
+ opval = (PCSZ)PlugSubAlloc(xp->g, NULL,
+ thd_query_string(table->in_use)->length + 1);
+ strcpy((char*)opval, thd_query_string(table->in_use)->str);
+// sprintf((char*)opval, "%s", thd_query_string(table->in_use)->str);
+ } else if (!stricmp(opname, "Partname"))
opval= partname;
else if (!stricmp(opname, "Table_charset")) {
const CHARSET_INFO *chif= (tshp) ? tshp->table_charset
@@ -1434,7 +1450,7 @@ PFOS ha_connect::GetFieldOptionStruct(Field *fdp)
void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
{
const char *cp;
- char *chset, v;
+ char *chset, v = 0;
ha_field_option_struct *fop;
Field* fp;
Field* *fldp;
@@ -1487,7 +1503,6 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
} // endif fop
chset = (char *)fp->charset()->name;
- v = (!strcmp(chset, "binary")) ? 'B' : 0;
switch (fp->type()) {
case MYSQL_TYPE_BLOB:
@@ -1502,8 +1517,9 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
switch (pcf->Type) {
case TYPE_STRING:
- // Do something for case
- cp= fp->charset()->name;
+ case TYPE_BIN:
+ // Do something for case
+ cp= chset;
// Find if collation name ends by _ci
if (!strcmp(cp + strlen(cp) - 3, "_ci")) {
@@ -2115,6 +2131,11 @@ int ha_connect::MakeRecord(char *buf)
charset= tdbp->data_charset();
rc= fp->store(p, strlen(p), charset, CHECK_FIELD_WARN);
break;
+ case TYPE_BIN:
+ p = value->GetCharValue();
+ charset = &my_charset_bin;
+ rc = fp->store(p, strlen(p), charset, CHECK_FIELD_WARN);
+ break;
case TYPE_DOUBLE:
rc= fp->store(value->GetFloatValue());
break;
@@ -5164,7 +5185,8 @@ static bool add_field(String *sql, const char *field_name, int typ, int len,
error|= sql->append("` ");
error|= sql->append(type);
- if (len && typ != TYPE_DATE && (typ != TYPE_DOUBLE || dec >= 0)) {
+ if (typ == TYPE_STRING ||
+ (len && typ != TYPE_DATE && (typ != TYPE_DOUBLE || dec >= 0))) {
error|= sql->append('(');
error|= sql->append_ulonglong(len);
@@ -6390,6 +6412,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
case MYSQL_TYPE_VARCHAR:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_STRING:
+#if 0
if (!fp->field_length) {
sprintf(g->Message, "Unsupported 0 length for column %s",
fp->field_name.str);
@@ -6399,7 +6422,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
MYF(0), fp->field_name.str);
DBUG_RETURN(rc);
} // endif fp
-
+#endif // 0
break; // To be checked
case MYSQL_TYPE_BIT:
case MYSQL_TYPE_NULL:
@@ -7167,7 +7190,10 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(class_path),
MYSQL_SYSVAR(java_wrapper),
#endif // JDBC_SUPPORT
- NULL
+#if defined(MONGO_SUPPORT)
+ MYSQL_SYSVAR(enable_mongo),
+#endif // MONGO_SUPPORT
+NULL
};
maria_declare_plugin(connect)
@@ -7176,14 +7202,14 @@ maria_declare_plugin(connect)
&connect_storage_engine,
"CONNECT",
"Olivier Bertrand",
- "Management of External Data (SQL/MED), including many file formats",
+ "Management of External Data (SQL/NOSQL/MED), including many file formats",
PLUGIN_LICENSE_GPL,
connect_init_func, /* Plugin Init */
connect_done_func, /* Plugin Deinit */
0x0106, /* version number (1.05) */
NULL, /* status variables */
connect_system_variables, /* system variables */
- "1.06.0001", /* string version */
- MariaDB_PLUGIN_MATURITY_BETA /* maturity */
+ "1.06.0004", /* string version */
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 8a54f8f16d1..c3d458094a2 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -348,6 +348,13 @@ const char *GetValStr(OPVAL vop, bool neg);
PFIL CondFilter(PGLOBAL g, Item *cond);
//PFIL CheckFilter(PGLOBAL g);
+/** admin commands - called from mysql_admin_table */
+virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ // TODO: implement it
+ return HA_ADMIN_OK; // Just to avoid error message with checktables
+} // end of check
+
/**
Number of rows in table. It will only be called if
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 34a43c72016..29bccc4afeb 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -539,7 +539,7 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
SetJsonValue(g, MulVal, jvp, n);
if (!MulVal->IsNull()) {
- switch (op) {
+ switch (op) {
case OP_CNC:
if (Nodes[n].CncVal) {
val[0] = Nodes[n].CncVal;
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index 3a09b3003da..20082dcc43e 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -107,6 +107,9 @@
extern "C" HINSTANCE s_hModule; // Saved module handle
#endif // !__WIN__
+#if defined(MONGO_SUPPORT)
+bool MongoEnabled(void);
+#endif // MONGO_SUPPORT
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
/***********************************************************************/
@@ -554,7 +557,13 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_VIR: tdp= new(g) VIRDEF; break;
case TAB_JSON: tdp= new(g) JSONDEF; break;
#if defined(MONGO_SUPPORT)
- case TAB_MONGO: tdp = new(g) MGODEF; break;
+ case TAB_MONGO:
+ if (MongoEnabled())
+ tdp = new(g) MGODEF;
+ else
+ strcpy(g->Message, "MONGO type not enabled");
+
+ break;
#endif // MONGO_SUPPORT
#if defined(ZIP_SUPPORT)
case TAB_ZIP: tdp= new(g) ZIPDEF; break;
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index d2d55f33611..08bb24e14df 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -472,7 +472,7 @@ int MYSQLC::Open(PGLOBAL g, const char *host, const char *db,
int pt, const char *csname)
{
const char *pipe = NULL;
- uint cto = 6000, nrt = 12000;
+ uint cto = 10, nrt = 20;
my_bool my_true= 1;
m_DB = mysql_init(NULL);
@@ -525,7 +525,8 @@ int MYSQLC::Open(PGLOBAL g, const char *host, const char *db,
mysql_options(m_DB, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY,
(char*)&my_true);
- if (!mysql_real_connect(m_DB, host, user, pwd, db, pt, pipe, CLIENT_MULTI_RESULTS)) {
+ if (!mysql_real_connect(m_DB, host, user, pwd, db, pt, pipe,
+ CLIENT_MULTI_RESULTS | CLIENT_REMEMBER_OPTIONS)) {
#if defined(_DEBUG)
sprintf(g->Message, "mysql_real_connect failed: (%d) %s",
mysql_errno(m_DB), mysql_error(m_DB));
diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result
index 83272ec00ce..96c58221b24 100644
--- a/storage/connect/mysql-test/connect/r/json_java_2.result
+++ b/storage/connect/mysql-test/connect/r/json_java_2.result
@@ -1,4 +1,5 @@
SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo2.jar';
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -25,7 +26,6 @@ address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades 1 CHAR 0 0 0 1
grades_date 1 CHAR 256 256 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
@@ -72,7 +72,6 @@ t1 CREATE TABLE `t1` (
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -80,12 +79,12 @@ t1 CREATE TABLE `t1` (
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
-_id address_building address_coord address_street address_zipcode borough cuisine grades grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -259,7 +258,6 @@ t1 CREATE TABLE `t1` (
`address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` char(24) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -384,3 +382,4 @@ planner 167 41.75
postcard 23 5.75
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result
index 563bcef7321..09901452975 100644
--- a/storage/connect/mysql-test/connect/r/json_java_3.result
+++ b/storage/connect/mysql-test/connect/r/json_java_3.result
@@ -1,4 +1,5 @@
SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo3.jar';
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -25,7 +26,6 @@ address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades 1 CHAR 0 0 0 1
grades_date 1 CHAR 256 256 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
@@ -72,7 +72,6 @@ t1 CREATE TABLE `t1` (
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -80,12 +79,12 @@ t1 CREATE TABLE `t1` (
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
-_id address_building address_coord address_street address_zipcode borough cuisine grades grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1 1416787200 Z 20 Tov Kosher Kitchen 40356068
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -259,7 +258,6 @@ t1 CREATE TABLE `t1` (
`address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` bigint(13) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -384,3 +382,4 @@ planner 167 41.75
postcard 23 5.75
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result
index d3363f39eab..afcad8d2ea2 100644
--- a/storage/connect/mysql-test/connect/r/json_mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result
@@ -1,3 +1,4 @@
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -24,7 +25,6 @@ address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades 1 CHAR 0 0 0 1
grades_date 1 CHAR 256 256 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
@@ -71,7 +71,6 @@ t1 CREATE TABLE `t1` (
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -79,12 +78,12 @@ t1 CREATE TABLE `t1` (
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
SELECT * FROM t1 LIMIT 5;
-_id address_building address_coord address_street address_zipcode borough cuisine grades grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1 1416787200 Z 20 Tov Kosher Kitchen 40356068
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -258,7 +257,6 @@ t1 CREATE TABLE `t1` (
`address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
`address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades` char(1) DEFAULT NULL,
`grades_date` bigint(13) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
`grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
@@ -383,3 +381,4 @@ planner 167 41.75
postcard 23 5.75
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result
index da0832994c2..f90f3a94b44 100644
--- a/storage/connect/mysql-test/connect/r/mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/mongo_c.result
@@ -1,3 +1,4 @@
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -376,3 +377,4 @@ planner 167 41.750000
postcard 23 5.750000
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result
index b1e9ea74f72..02b8ae09d34 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_2.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result
@@ -1,4 +1,5 @@
SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo2.jar';
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -377,3 +378,4 @@ planner 167 41.75
postcard 23 5.75
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result
index e2fe584620f..c4387bfa5b1 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_3.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result
@@ -1,4 +1,5 @@
SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo3.jar';
+set connect_enable_mongo=1;
#
# Test the MONGO table type
#
@@ -377,3 +378,4 @@ planner 167 41.75
postcard 23 5.75
DROP TABLE t1;
true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/odbc_oracle.result b/storage/connect/mysql-test/connect/r/odbc_oracle.result
index db7f78f67cd..8dc7dc07bb1 100644
--- a/storage/connect/mysql-test/connect/r/odbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/odbc_oracle.result
@@ -72,11 +72,11 @@ TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
CATFUNC=Columns;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
-NULL MTR T1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR T1 B 6 NUMBER 38 40 NULL NULL 1 NULL
-NULL MTR T2 A 12 VARCHAR2 64 64 NULL NULL 1 NULL
-NULL MTR V1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR V1 B 6 NUMBER 38 40 NULL NULL 1 NULL
+ MTR T1 A 3 DECIMAL 38 40 0 10 1
+ MTR T1 B 6 NUMBER 38 40 NULL NULL 1
+ MTR T2 A 12 VARCHAR2 64 64 NULL NULL 1
+ MTR V1 A 3 DECIMAL 38 40 0 10 1
+ MTR V1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
@@ -84,18 +84,18 @@ TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
CATFUNC=Columns TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
-NULL MTR T1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR T1 B 6 NUMBER 38 40 NULL NULL 1 NULL
-NULL MTR T2 A 12 VARCHAR2 64 64 NULL NULL 1 NULL
-NULL MTR V1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR V1 B 6 NUMBER 38 40 NULL NULL 1 NULL
+ MTR T1 A 3 DECIMAL 38 40 0 10 1
+ MTR T1 B 6 NUMBER 38 40 NULL NULL 1
+ MTR T2 A 12 VARCHAR2 64 64 NULL NULL 1
+ MTR V1 A 3 DECIMAL 38 40 0 10 1
+ MTR V1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# All tables "T1" in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
-NULL MTR T1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR T1 B 6 NUMBER 38 40 NULL NULL 1 NULL
+ MTR T1 A 3 DECIMAL 38 40 0 10 1
+ MTR T1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
@@ -103,8 +103,8 @@ TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
CATFUNC=Columns TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
-NULL MTR T1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR T1 B 6 NUMBER 38 40 NULL NULL 1 NULL
+ MTR T1 A 3 DECIMAL 38 40 0 10 1
+ MTR T1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
@@ -112,8 +112,8 @@ TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
-NULL MTR T1 A 3 DECIMAL 38 40 0 10 1 NULL
-NULL MTR T1 B 6 NUMBER 38 40 NULL NULL 1 NULL
+ MTR T1 A 3 DECIMAL 38 40 0 10 1
+ MTR T1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
#
# Checking tables
diff --git a/storage/connect/mysql-test/connect/r/tbl_thread.result b/storage/connect/mysql-test/connect/r/tbl_thread.result
index ef6439462bb..f53ccd25b97 100644
--- a/storage/connect/mysql-test/connect/r/tbl_thread.result
+++ b/storage/connect/mysql-test/connect/r/tbl_thread.result
@@ -35,6 +35,22 @@ a b
9 test09
10 test10
11 test11
+CREATE TABLE rt4 (a int, b char(10));
+INSERT INTO rt4 VALUES (12,'test12'),(13,'test13'),(14,'test14'),(15,'test15');
+SELECT * FROM rt4;
+a b
+12 test12
+13 test13
+14 test14
+15 test15
+CREATE TABLE rt5 (a int, b char(10));
+INSERT INTO rt5 VALUES (16,'test16'),(17,'test17'),(18,'test18'),(19,'test19');
+SELECT * FROM rt5;
+a b
+16 test16
+17 test17
+18 test18
+19 test19
connection default;
CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL
CONNECTION='mysql://root@127.0.0.1:MASTER_PORT/test/rt2';
@@ -52,11 +68,36 @@ a b
9 test09
10 test10
11 test11
+CREATE TABLE t4 ENGINE=CONNECT TABLE_TYPE=MYSQL
+CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/rt4';
+SELECT * FROM t4;
+a b
+12 test12
+13 test13
+14 test14
+15 test15
+CREATE TABLE t5 ENGINE=CONNECT TABLE_TYPE=MYSQL
+CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/rt5';
+SELECT * FROM t5;
+a b
+16 test16
+17 test17
+18 test18
+19 test19
CREATE TABLE total (a int, b char(10))
-ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3'
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3,t4,t5'
OPTION_LIST='thread=yes,port=PORT';
+set connect_xtrace=1;
SELECT * FROM total order by a desc;
a b
+19 test19
+18 test18
+17 test17
+16 test16
+15 test15
+14 test14
+13 test13
+12 test12
11 test11
10 test10
9 test09
@@ -69,12 +110,13 @@ a b
2 test02
1 test01
0 test00
+set connect_xtrace=0;
connection master;
DROP TABLE rt2;
connection slave;
-DROP TABLE rt3;
+DROP TABLE rt3,rt4,rt5;
connection default;
-DROP TABLE t1,t2,t3,total;
+DROP TABLE t1,t2,t3,t4,t5,total;
#
# Old thread TBL tables test modified
#
@@ -87,13 +129,15 @@ SELECT * FROM t2;
v
22
CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=PORT';;
+set connect_xtrace=1;
SELECT * FROM total order by v desc;
v
22
11
+set connect_xtrace=0;
DROP TABLE t1,t2,total;
#
-# Old thread TBL tables test not modified
+# Old thread TBL tables test not modified (suppressed until MDEV-10179 is fixed)
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MYSQL SRCDEF='select 11 as v';
SELECT * FROM t1;
@@ -104,10 +148,12 @@ SELECT * FROM t2;
v
22
CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=PORT';;
+set connect_xtrace=1;
SELECT * FROM total order by v desc;
v
22
11
+set connect_xtrace=0;
DROP TABLE total;
DROP TABLE t1;
DROP TABLE t2;
diff --git a/storage/connect/mysql-test/connect/std_data/Mongo2.jar b/storage/connect/mysql-test/connect/std_data/Mongo2.jar
index d019bf6906b..9be654bd4c8 100644
--- a/storage/connect/mysql-test/connect/std_data/Mongo2.jar
+++ b/storage/connect/mysql-test/connect/std_data/Mongo2.jar
Binary files differ
diff --git a/storage/connect/mysql-test/connect/std_data/Mongo3.jar b/storage/connect/mysql-test/connect/std_data/Mongo3.jar
index 73eb3571290..2850177a668 100644
--- a/storage/connect/mysql-test/connect/std_data/Mongo3.jar
+++ b/storage/connect/mysql-test/connect/std_data/Mongo3.jar
Binary files differ
diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc
index c223e70f719..c728b85fd2f 100644
--- a/storage/connect/mysql-test/connect/t/mongo_test.inc
+++ b/storage/connect/mysql-test/connect/t/mongo_test.inc
@@ -1,3 +1,5 @@
+set connect_enable_mongo=1;
+
--echo #
--echo # Test the MONGO table type
--echo #
@@ -201,3 +203,5 @@ OPTION_LIST='Driver=$DRV,Version=$VERS,Pipeline=YES' $CONN;
SELECT * FROM t1;
DROP TABLE t1;
--exec $MONGO --eval "db.testcoll.drop()" --quiet
+
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/t/tbl_thread.test b/storage/connect/mysql-test/connect/t/tbl_thread.test
index abc1ef34729..68a0ebcd44d 100644
--- a/storage/connect/mysql-test/connect/t/tbl_thread.test
+++ b/storage/connect/mysql-test/connect/t/tbl_thread.test
@@ -2,8 +2,6 @@
connection default;
-let $PORT= `select @@port`;
-
--echo #
--echo # Checking thread TBL tables
--echo #
@@ -24,6 +22,14 @@ CREATE TABLE rt3 (a int, b char(10));
INSERT INTO rt3 VALUES (8,'test08'),(9,'test09'),(10,'test10'),(11,'test11');
SELECT * FROM rt3;
+CREATE TABLE rt4 (a int, b char(10));
+INSERT INTO rt4 VALUES (12,'test12'),(13,'test13'),(14,'test14'),(15,'test15');
+SELECT * FROM rt4;
+
+CREATE TABLE rt5 (a int, b char(10));
+INSERT INTO rt5 VALUES (16,'test16'),(17,'test17'),(18,'test18'),(19,'test19');
+SELECT * FROM rt5;
+
connection default;
--replace_result $MASTER_MYPORT MASTER_PORT
@@ -36,11 +42,23 @@ eval CREATE TABLE t3 ENGINE=CONNECT TABLE_TYPE=MYSQL
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/rt3';
SELECT * FROM t3;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t4 ENGINE=CONNECT TABLE_TYPE=MYSQL
+CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/rt4';
+SELECT * FROM t4;
+
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval CREATE TABLE t5 ENGINE=CONNECT TABLE_TYPE=MYSQL
+CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/rt5';
+SELECT * FROM t5;
+
--replace_result $PORT PORT
eval CREATE TABLE total (a int, b char(10))
-ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3'
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3,t4,t5'
OPTION_LIST='thread=yes,port=$PORT';
+set connect_xtrace=1;
SELECT * FROM total order by a desc;
+set connect_xtrace=0;
connection master;
@@ -48,11 +66,11 @@ DROP TABLE rt2;
connection slave;
-DROP TABLE rt3;
+DROP TABLE rt3,rt4,rt5;
connection default;
-DROP TABLE t1,t2,t3,total;
+DROP TABLE t1,t2,t3,t4,t5,total;
--echo #
--echo # Old thread TBL tables test modified
@@ -67,11 +85,13 @@ SELECT * FROM t2;
--replace_result $PORT PORT
--eval CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=$PORT';
+set connect_xtrace=1;
SELECT * FROM total order by v desc;
+set connect_xtrace=0;
DROP TABLE t1,t2,total;
--echo #
---echo # Old thread TBL tables test not modified
+--echo # Old thread TBL tables test not modified (suppressed until MDEV-10179 is fixed)
--echo #
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MYSQL SRCDEF='select 11 as v';
SELECT * FROM t1;
@@ -81,7 +101,9 @@ SELECT * FROM t2;
--replace_result $PORT PORT
--eval CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=$PORT';
+set connect_xtrace=1;
SELECT * FROM total order by v desc;
+set connect_xtrace=0;
DROP TABLE total;
DROP TABLE t1;
diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp
index c2053f1c832..338a79d9455 100644
--- a/storage/connect/myutil.cpp
+++ b/storage/connect/myutil.cpp
@@ -218,7 +218,7 @@ int MYSQLtoPLG(int mytype, char *var)
case MYSQL_TYPE_VARCHAR:
#endif // !ALPHA)
case MYSQL_TYPE_STRING:
- type = TYPE_STRING;
+ type = (*var == 'B') ? TYPE_BIN : TYPE_STRING;
break;
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_TINY_BLOB:
@@ -232,7 +232,7 @@ int MYSQLtoPLG(int mytype, char *var)
type = TYPE_STRING;
*var = 'X';
} else
- type = TYPE_ERROR;
+ type = TYPE_BIN;
break;
case TPC_SKIP:
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 95069baf76e..031fdebe650 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -789,7 +789,7 @@ int COLDEF::Define(PGLOBAL g, void *, PCOLINFO cfp, int poff)
Poff = poff;
Buf_Type = cfp->Type;
- if ((Clen = GetTypeSize(Buf_Type, cfp->Length)) <= 0) {
+ if ((Clen = GetTypeSize(Buf_Type, cfp->Length)) < 0) {
sprintf(g->Message, MSG(BAD_COL_TYPE), GetTypeName(Buf_Type), Name);
return -1;
} // endswitch
diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h
index 8b19a413ade..84ae2a491f0 100644
--- a/storage/connect/reldef.h
+++ b/storage/connect/reldef.h
@@ -94,6 +94,7 @@ public:
virtual void SetIndx(PIXDEF) {}
virtual bool IsHuge(void) {return false;}
const CHARSET_INFO *data_charset() {return m_data_charset;}
+ const char *GetCsName(void) {return csname;}
// Methods
int GetColCatInfo(PGLOBAL g);
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index 1d76e0417fa..207c8401c7b 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -279,10 +279,57 @@ int TDBEXT::Decode(PCSZ txt, char *buf, size_t n)
} // end of Decode
/***********************************************************************/
-/* MakeSQL: make the SQL statement use with remote connection. */
-/* TODO: when implementing remote filtering, column only used in */
-/* local filter should be removed from column list. */
+/* MakeSrcdef: make the SQL statement from SRDEF option. */
/***********************************************************************/
+bool TDBEXT::MakeSrcdef(PGLOBAL g)
+{
+ char *catp = strstr(Srcdef, "%s");
+
+ if (catp) {
+ char *fil1= 0, *fil2;
+ PCSZ ph = ((EXTDEF*)To_Def)->Phpos;
+
+ if (!ph)
+ ph = (strstr(catp + 2, "%s")) ? "WH" : "W";
+
+ if (stricmp(ph, "H")) {
+ fil1 = (To_CondFil && *To_CondFil->Body)
+ ? To_CondFil->Body : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (stricmp(ph, "W")) {
+ fil2 = (To_CondFil && To_CondFil->Having && *To_CondFil->Having)
+ ? To_CondFil->Having : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (!stricmp(ph, "W")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1));
+ } else if (!stricmp(ph, "WH")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1, fil2));
+ } else if (!stricmp(ph, "H")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2));
+ } else if (!stricmp(ph, "HW")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1));
+ } else {
+ strcpy(g->Message, "MakeSQL: Wrong place holders specification");
+ return true;
+ } // endif's ph
+
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
+ return false;
+} // end of MakeSrcdef
+
+ /***********************************************************************/
+ /* MakeSQL: make the SQL statement use with remote connection. */
+ /* TODO: when implementing remote filtering, column only used in */
+ /* local filter should be removed from column list. */
+ /***********************************************************************/
bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
{
PCSZ schmp = NULL;
@@ -292,46 +339,8 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
PTABLE tablep = To_Table;
PCOL colp;
- if (Srcdef) {
- if ((catp = strstr(Srcdef, "%s"))) {
- char *fil1= 0, *fil2;
- PCSZ ph = ((EXTDEF*)To_Def)->Phpos;
-
- if (!ph)
- ph = (strstr(catp + 2, "%s")) ? "WH" : "W";
-
- if (stricmp(ph, "H")) {
- fil1 = (To_CondFil && *To_CondFil->Body)
- ? To_CondFil->Body : PlugDup(g, "1=1");
- } // endif ph
-
- if (stricmp(ph, "W")) {
- fil2 = (To_CondFil && To_CondFil->Having && *To_CondFil->Having)
- ? To_CondFil->Having : PlugDup(g, "1=1");
- } // endif ph
-
- if (!stricmp(ph, "W")) {
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1));
- } else if (!stricmp(ph, "WH")) {
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1, fil2));
- } else if (!stricmp(ph, "H")) {
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil2));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2));
- } else if (!stricmp(ph, "HW")) {
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1));
- } else {
- strcpy(g->Message, "MakeSQL: Wrong place holders specification");
- return true;
- } // endif's ph
-
- } else
- Query = new(g)STRING(g, 0, Srcdef);
-
- return false;
- } // endif Srcdef
+ if (Srcdef)
+ return MakeSrcdef(g);
// Allocate the string used to contain the Query
Query = new(g)STRING(g, 1023, "SELECT ");
diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h
index 7ddf2a68117..6b67c2ab5ed 100644
--- a/storage/connect/tabext.h
+++ b/storage/connect/tabext.h
@@ -126,6 +126,7 @@ public:
protected:
// Internal functions
+ virtual bool MakeSrcdef(PGLOBAL g);
virtual bool MakeSQL(PGLOBAL g, bool cnt);
//virtual bool MakeInsert(PGLOBAL g);
virtual bool MakeCommand(PGLOBAL g);
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 76a3d5e9988..401441520da 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -136,6 +136,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0;
tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2);
tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL);
+ tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false);
tdp->Uri = (dsn && *dsn ? dsn : NULL);
if (!tdp->Fn && !tdp->Uri) {
@@ -365,7 +366,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale);
jcp->Cbn |= jcol.Cbn;
jcp->Found = true;
- } else {
+ } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) {
// New column
jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL));
*jcp = jcol;
@@ -448,8 +449,8 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
/* Now get the results into blocks. */
/*********************************************************************/
for (i = 0, jcp = fjcp; jcp; i++, jcp = jcp->Next) {
- if (jcp->Type == TYPE_UNKNOWN) // Void column
- jcp->Type = TYPE_STRING;
+ if (jcp->Type == TYPE_UNKNOWN)
+ jcp->Type = TYPE_STRING; // Void column
crp = qrp->Colresp; // Column Name
crp->Kdata->SetValue(jcp->Name, i);
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index bdddcf64ca8..d1e2ae69608 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -513,18 +513,8 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
if (Query)
return false; // already done
- if (Srcdef) {
- if (strstr(Srcdef, "%s")) {
- char *fil;
-
- fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
- Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
- Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
- } else
- Query = new(g)STRING(g, 0, Srcdef);
-
- return false;
- } // endif Srcdef
+ if (Srcdef)
+ return MakeSrcdef(g);
// Allocate the string used to contain Query
Query = new(g) STRING(g, 1023, "SELECT ");
@@ -1270,7 +1260,8 @@ MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am)
: COLBLK(NULL, tdbp, i)
{
const char *chset = get_charset_name(fld->charsetnr);
- char v = (!strcmp(chset, "binary")) ? 'B' : 0;
+//char v = (!strcmp(chset, "binary")) ? 'B' : 0;
+ char v = 0;
Name = fld->name;
Opt = 0;
diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h
index 3c37ae5bf3b..39fba87bcc9 100644
--- a/storage/connect/tabmysql.h
+++ b/storage/connect/tabmysql.h
@@ -69,6 +69,7 @@ class MYSQLDEF : public EXTDEF {/* Logical table description */
/***********************************************************************/
class TDBMYSQL : public TDBEXT {
friend class MYSQLCOL;
+ friend class TDBTBM;
public:
// Constructor
TDBMYSQL(PMYDEF tdp);
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp
index afd6b47c5a2..6e4a038ec92 100644
--- a/storage/connect/tabtbl.cpp
+++ b/storage/connect/tabtbl.cpp
@@ -578,10 +578,19 @@ pthread_handler_t ThreadOpen(void *p)
// Try to open the connection
if (!cmp->Tap->GetTo_Tdb()->OpenDB(cmp->G)) {
pthread_mutex_lock(&tblmut);
+ if (trace)
+ htrc("Table %s ready\n", cmp->Tap->GetName());
+
cmp->Ready = true;
pthread_mutex_unlock(&tblmut);
- } else
- cmp->Rc = RC_FX;
+ } else {
+ pthread_mutex_lock(&tblmut);
+ if (trace)
+ htrc("Opening %s failed\n", cmp->Tap->GetName());
+
+ cmp->Rc = RC_FX;
+ pthread_mutex_unlock(&tblmut);
+ } // endif OpenDB
my_thread_end();
} else
@@ -633,6 +642,18 @@ int TDBTBM::RowNumber(PGLOBAL g, bool b)
} // end of RowNumber
/***********************************************************************/
+/* Returns true if this MYSQL table refers to a local table. */
+/***********************************************************************/
+bool TDBTBM::IsLocal(PTABLE tbp)
+{
+ TDBMYSQL *tdbp = (TDBMYSQL*)tbp->GetTo_Tdb();
+
+ return ((!stricmp(tdbp->Host, "localhost") ||
+ !strcmp(tdbp->Host, "127.0.0.1")) &&
+ tdbp->Port == GetDefaultPort());
+} // end of IsLocal
+
+/***********************************************************************/
/* Initialyze table parallel processing. */
/***********************************************************************/
bool TDBTBM::OpenTables(PGLOBAL g)
@@ -644,10 +665,13 @@ bool TDBTBM::OpenTables(PGLOBAL g)
// Allocates the TBMT blocks for the tables
for (tabp = Tablist; tabp; tabp = tabp->Next)
- if (tabp->GetTo_Tdb()->GetAmType() == TYPE_AM_MYSQL) {
+ if (tabp->GetTo_Tdb()->GetAmType() == TYPE_AM_MYSQL && !IsLocal(tabp)) {
// Remove remote table from the local list
*ptabp = tabp->Next;
+ if (trace)
+ htrc("=====> New remote table %s\n", tabp->GetName());
+
// Make the remote table block
tp = (PTBMT)PlugSubAlloc(g, NULL, sizeof(TBMT));
memset(tp, 0, sizeof(TBMT));
@@ -671,7 +695,10 @@ bool TDBTBM::OpenTables(PGLOBAL g)
ptp = &tp->Next;
Nrc++; // Number of remote connections
} else {
- ptabp = &tabp->Next;
+ if (trace)
+ htrc("=====> Local table %s\n", tabp->GetName());
+
+ ptabp = &tabp->Next;
Nlc++; // Number of local connections
} // endif Type
@@ -788,7 +815,7 @@ int TDBTBM::ReadDB(PGLOBAL g)
/***********************************************************************/
int TDBTBM::ReadNextRemote(PGLOBAL g)
{
- bool b = false;
+ bool b;
if (Tdbp)
Tdbp->CloseDB(g);
@@ -796,17 +823,22 @@ int TDBTBM::ReadNextRemote(PGLOBAL g)
Cmp = NULL;
retry:
- // Search for a remote table having its result set
+ b = false;
+
+ // Search for a remote table having its result set
pthread_mutex_lock(&tblmut);
for (PTBMT tp = Tmp; tp; tp = tp->Next)
- if (tp->Ready) {
- if (!tp->Complete) {
- Cmp = tp;
- break;
- } // endif Complete
+ if (tp->Rc != RC_FX) {
+ if (tp->Ready) {
+ if (!tp->Complete) {
+ Cmp = tp;
+ break;
+ } // endif Complete
- } else
- b = true;
+ } else
+ b = true;
+
+ } // endif Rc
pthread_mutex_unlock(&tblmut);
diff --git a/storage/connect/tabtbl.h b/storage/connect/tabtbl.h
index 3a5ec45d025..f02bf620aae 100644
--- a/storage/connect/tabtbl.h
+++ b/storage/connect/tabtbl.h
@@ -146,6 +146,7 @@ class DllExport TDBTBM : public TDBTBL {
protected:
// Internal functions
+ bool IsLocal(PTABLE tbp);
bool OpenTables(PGLOBAL g);
int ReadNextRemote(PGLOBAL g);
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index 158cf744a4a..9ab3d5e8806 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -186,7 +186,8 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db,
crp->Kdata->SetValue(colname, i);
chset = (char *)fp->charset()->name;
- v = (!strcmp(chset, "binary")) ? 'B' : 0;
+// v = (!strcmp(chset, "binary")) ? 'B' : 0;
+ v = 0;
if ((type = MYSQLtoPLG(fp->type(), &v)) == TYPE_ERROR) {
if (v == 'K') {
diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp
index 5b98f3eb425..018c7ee3fe1 100644
--- a/storage/connect/valblk.cpp
+++ b/storage/connect/valblk.cpp
@@ -59,11 +59,12 @@ PVBLK AllocValBlock(PGLOBAL g, void *mp, int type, int nval, int len,
switch (type) {
case TYPE_STRING:
+ case TYPE_BIN:
case TYPE_DECIM:
if (len)
- blkp = new(g) CHRBLK(mp, nval, len, prec, blank);
+ blkp = new(g) CHRBLK(mp, nval, type, len, prec, blank);
else
- blkp = new(g) STRBLK(g, mp, nval);
+ blkp = new(g) STRBLK(g, mp, nval, type);
break;
case TYPE_SHORT:
@@ -615,8 +616,8 @@ int TYPBLK<TYPE>::GetMaxLength(void)
/***********************************************************************/
/* Constructor. */
/***********************************************************************/
-CHRBLK::CHRBLK(void *mp, int nval, int len, int prec, bool blank)
- : VALBLK(mp, TYPE_STRING, nval), Chrp((char*&)Blkp)
+CHRBLK::CHRBLK(void *mp, int nval, int type, int len, int prec, bool blank)
+ : VALBLK(mp, type, nval), Chrp((char*&)Blkp)
{
Valp = NULL;
Blanks = blank;
@@ -1008,8 +1009,8 @@ int CHRBLK::GetMaxLength(void)
/***********************************************************************/
/* Constructor. */
/***********************************************************************/
-STRBLK::STRBLK(PGLOBAL g, void *mp, int nval)
- : VALBLK(mp, TYPE_STRING, nval), Strp((PSZ*&)Blkp)
+STRBLK::STRBLK(PGLOBAL g, void *mp, int nval, int type)
+ : VALBLK(mp, type, nval), Strp((PSZ*&)Blkp)
{
Global = g;
Nullable = true;
diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h
index 38a73424985..a3d7bf30fcf 100644
--- a/storage/connect/valblk.h
+++ b/storage/connect/valblk.h
@@ -214,7 +214,7 @@ class TYPBLK : public VALBLK {
class CHRBLK : public VALBLK {
public:
// Constructors
- CHRBLK(void *mp, int size, int len, int prec, bool b);
+ CHRBLK(void *mp, int size, int type, int len, int prec, bool b);
// Implementation
virtual bool Init(PGLOBAL g, bool check);
@@ -267,7 +267,7 @@ class CHRBLK : public VALBLK {
class STRBLK : public VALBLK {
public:
// Constructors
- STRBLK(PGLOBAL g, void *mp, int size);
+ STRBLK(PGLOBAL g, void *mp, int size, int type);
// Implementation
virtual void SetNull(int n, bool b) {if (b) {Strp[n] = NULL;}}
@@ -345,7 +345,7 @@ class PTRBLK : public STRBLK {
bool, bool, bool);
protected:
// Constructors
- PTRBLK(PGLOBAL g, void *mp, int size) : STRBLK(g, mp, size) {}
+ PTRBLK(PGLOBAL g, void *mp, int size) : STRBLK(g, mp, size, TYPE_PCHAR) {}
// Implementation
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 60d1c2f459c..a80da808548 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -176,7 +176,7 @@ int GetTypeSize(int type, int len)
case TYPE_DOUBLE: len = sizeof(double); break;
case TYPE_TINY: len = sizeof(char); break;
case TYPE_PCHAR: len = sizeof(char*); break;
- default: len = 0;
+ default: len = -1;
} // endswitch type
return len;
@@ -236,6 +236,7 @@ bool IsTypeChar(int type)
switch (type) {
case TYPE_STRING:
case TYPE_DECIM:
+ case TYPE_BIN:
return true;
} // endswitch type
@@ -1369,7 +1370,7 @@ bool TYPVAL<PSZ>::SetValue_char(const char *cp, int n)
if (!cp || n == 0) {
Reset();
- Null = Nullable;
+ Null = (cp) ? false : Nullable;
} else if (cp != Strp) {
const char *p = cp + n - 1;
@@ -1858,8 +1859,9 @@ int DECVAL::CompareValue(PVAL vp)
BINVAL::BINVAL(PGLOBAL g, void *p, int cl, int n) : VALUE(TYPE_BIN)
{
assert(g);
- Len = n;
- Clen = cl;
+//Len = n;
+ Len = (g) ? n : (p) ? strlen((char*)p) : 0;
+ Clen = cl;
Binp = PlugSubAlloc(g, NULL, Clen + 1);
memset(Binp, 0, Clen + 1);
@@ -1992,10 +1994,15 @@ bool BINVAL::SetValue_pval(PVAL valp, bool chktype)
return true;
if (!(Null = valp->IsNull() && Nullable)) {
- if ((rc = (Len = valp->GetSize()) > Clen))
+ int len = Len;
+
+ if ((rc = (Len = valp->GetSize()) > Clen))
Len = Clen;
+ else if (len > Len)
+ memset(Binp, 0, len);
memcpy(Binp, valp->GetTo_Val(), Len);
+ ((char*)Binp)[Len] = 0;
} else
Reset();
@@ -2012,10 +2019,15 @@ bool BINVAL::SetValue_char(const char *p, int n)
bool rc;
if (p && n > 0) {
- rc = n > Clen;
- Len = MY_MIN(n, Clen);
- memcpy(Binp, p, Len);
- Null = false;
+ int len = Len;
+
+ if (len > (Len = MY_MIN(n, Clen)))
+ memset(Binp, 0, len);
+
+ memcpy(Binp, p, Len);
+ ((char*)Binp)[Len] = 0;
+ rc = n > Clen;
+ Null = false;
} else {
rc = false;
Reset();
@@ -2031,9 +2043,14 @@ bool BINVAL::SetValue_char(const char *p, int n)
void BINVAL::SetValue_psz(PCSZ s)
{
if (s) {
- Len = MY_MIN(Clen, (signed)strlen(s));
- memcpy(Binp, s, Len);
- Null = false;
+ int len = Len;
+
+ if (len > (Len = MY_MIN(Clen, (signed)strlen(s))))
+ memset(Binp, 0, len);
+
+ memcpy(Binp, s, Len);
+ ((char*)Binp)[Len] = 0;
+ Null = false;
} else {
Reset();
Null = Nullable;
@@ -2053,14 +2070,19 @@ void BINVAL::SetValue_pvblk(PVBLK blk, int n)
Reset();
Null = Nullable;
} else if (vp != Binp) {
+ int len = Len;
+
if (blk->GetType() == TYPE_STRING)
Len = strlen((char*)vp);
else
Len = blk->GetVlen();
- Len = MY_MIN(Clen, Len);
+ if (len > (Len = MY_MIN(Clen, Len)))
+ memset(Binp, 0, len);
+
memcpy(Binp, vp, Len);
- Null = false;
+ ((char*)Binp)[Len] = 0;
+ Null = false;
} // endif vp
} // end of SetValue_pvblk
@@ -2071,7 +2093,10 @@ void BINVAL::SetValue_pvblk(PVBLK blk, int n)
void BINVAL::SetValue(int n)
{
if (Clen >= 4) {
- *((int*)Binp) = n;
+ if (Len > 4)
+ memset(Binp, 0, Len);
+
+ *((int*)Binp) = n;
Len = 4;
} else
SetValue((short)n);
@@ -2084,7 +2109,10 @@ void BINVAL::SetValue(int n)
void BINVAL::SetValue(uint n)
{
if (Clen >= 4) {
- *((uint*)Binp) = n;
+ if (Len > 4)
+ memset(Binp, 0, Len);
+
+ *((uint*)Binp) = n;
Len = 4;
} else
SetValue((ushort)n);
@@ -2097,7 +2125,10 @@ void BINVAL::SetValue(uint n)
void BINVAL::SetValue(short i)
{
if (Clen >= 2) {
- *((int*)Binp) = i;
+ if (Len > 2)
+ memset(Binp, 0, Len);
+
+ *((int*)Binp) = i;
Len = 2;
} else
SetValue((char)i);
@@ -2110,7 +2141,10 @@ void BINVAL::SetValue(short i)
void BINVAL::SetValue(ushort i)
{
if (Clen >= 2) {
- *((uint*)Binp) = i;
+ if (Len > 2)
+ memset(Binp, 0, Len);
+
+ *((uint*)Binp) = i;
Len = 2;
} else
SetValue((uchar)i);
@@ -2123,7 +2157,10 @@ void BINVAL::SetValue(ushort i)
void BINVAL::SetValue(longlong n)
{
if (Clen >= 8) {
- *((longlong*)Binp) = n;
+ if (Len > 8)
+ memset(Binp, 0, Len);
+
+ *((longlong*)Binp) = n;
Len = 8;
} else
SetValue((int)n);
@@ -2136,7 +2173,10 @@ void BINVAL::SetValue(longlong n)
void BINVAL::SetValue(ulonglong n)
{
if (Clen >= 8) {
- *((ulonglong*)Binp) = n;
+ if (Len > 8)
+ memset(Binp, 0, Len);
+
+ *((ulonglong*)Binp) = n;
Len = 8;
} else
SetValue((uint)n);
@@ -2147,6 +2187,9 @@ void BINVAL::SetValue(ulonglong n)
/***********************************************************************/
void BINVAL::SetValue(double n)
{
+ if (Len > 8)
+ memset(Binp, 0, Len);
+
if (Clen >= 8) {
*((double*)Binp) = n;
Len = 8;
@@ -2163,7 +2206,10 @@ void BINVAL::SetValue(double n)
/***********************************************************************/
void BINVAL::SetValue(char c)
{
- *((char*)Binp) = c;
+ if (Len > 1)
+ memset(Binp, 0, Len);
+
+ *((char*)Binp) = c;
Len = 1;
} // end of SetValue
@@ -2172,7 +2218,10 @@ void BINVAL::SetValue(char c)
/***********************************************************************/
void BINVAL::SetValue(uchar c)
{
- *((uchar*)Binp) = c;
+ if (Len > 1)
+ memset(Binp, 0, Len);
+
+ *((uchar*)Binp) = c;
Len = 1;
} // end of SetValue
@@ -2182,6 +2231,7 @@ void BINVAL::SetValue(uchar c)
void BINVAL::SetBinValue(void *p)
{
memcpy(Binp, p, Clen);
+ Len = Clen;
} // end of SetBinValue
/***********************************************************************/
@@ -2207,10 +2257,11 @@ bool BINVAL::GetBinValue(void *buf, int buflen, bool go)
/***********************************************************************/
char *BINVAL::ShowValue(char *buf, int len)
{
- int n = MY_MIN(Len, len / 2);
+ //int n = MY_MIN(Len, len / 2);
- sprintf(buf, GetXfmt(), n, Binp);
- return buf;
+ //sprintf(buf, GetXfmt(), n, Binp);
+ //return buf;
+ return (char*)Binp;
} // end of ShowValue
/***********************************************************************/
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index e35da50ba96..059113e2fa5 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -2980,6 +2980,9 @@ int ha_federated::reset(void)
}
reset_dynamic(&results);
+ if (mysql)
+ mysql->net.thd= NULL;
+
return 0;
}
@@ -3200,12 +3203,14 @@ int ha_federated::real_query(const char *query, size_t length)
int rc= 0;
DBUG_ENTER("ha_federated::real_query");
- if (!mysql && (rc= real_connect()))
+ if (!query || !length)
goto end;
- if (!query || !length)
+ if (!mysql && (rc= real_connect()))
goto end;
+ mysql->net.thd= table->in_use;
+
rc= mysql_real_query(mysql, query, (uint) length);
end:
@@ -3289,66 +3294,6 @@ int ha_federated::external_lock(THD *thd, int lock_type)
int error= 0;
DBUG_ENTER("ha_federated::external_lock");
- /*
- Support for transactions disabled until WL#2952 fixes it.
- */
-#ifdef XXX_SUPERCEDED_BY_WL2952
- if (lock_type != F_UNLCK)
- {
- ha_federated *trx= (ha_federated *)thd_get_ha_data(thd, ht);
-
- DBUG_PRINT("info",("federated not lock F_UNLCK"));
- if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
- {
- DBUG_PRINT("info",("federated autocommit"));
- /*
- This means we are doing an autocommit
- */
- error= connection_autocommit(TRUE);
- if (error)
- {
- DBUG_PRINT("info", ("error setting autocommit TRUE: %d", error));
- DBUG_RETURN(error);
- }
- trans_register_ha(thd, FALSE, ht);
- }
- else
- {
- DBUG_PRINT("info",("not autocommit"));
- if (!trx)
- {
- /*
- This is where a transaction gets its start
- */
- error= connection_autocommit(FALSE);
- if (error)
- {
- DBUG_PRINT("info", ("error setting autocommit FALSE: %d", error));
- DBUG_RETURN(error);
- }
- thd_set_ha_data(thd, ht, this);
- trans_register_ha(thd, TRUE, ht);
- /*
- Send a lock table to the remote end.
- We do not support this at the moment
- */
- if (thd->options & (OPTION_TABLE_LOCK))
- {
- DBUG_PRINT("info", ("We do not support lock table yet"));
- }
- }
- else
- {
- ha_federated *ptr;
- for (ptr= trx; ptr; ptr= ptr->trx_next)
- if (ptr == this)
- break;
- else if (!ptr->trx_next)
- ptr->trx_next= this;
- }
- }
- }
-#endif /* XXX_SUPERCEDED_BY_WL2952 */
table_will_be_deleted = FALSE;
DBUG_RETURN(error);
}
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 815324825bd..3e9f26ad125 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -720,8 +720,9 @@ btr_page_free_low(
ulint* offsets = NULL;
rec_t* rec = page_rec_get_next(page_get_infimum_rec(page));
while (!page_rec_is_supremum(rec)) {
- offsets = rec_get_offsets(rec, index,
- offsets, ULINT_UNDEFINED,
+ offsets = rec_get_offsets(rec, index, offsets,
+ page_is_leaf(page),
+ ULINT_UNDEFINED,
&heap);
ulint size = rec_offs_data_size(offsets);
memset(rec, 0, size);
@@ -832,7 +833,7 @@ btr_node_ptr_set_child_page_no(
ulint len;
ut_ad(rec_offs_validate(rec, NULL, offsets));
- ut_ad(!page_is_leaf(page_align(rec)));
+ ut_ad(!page_rec_is_leaf(rec));
ut_ad(!rec_offs_comp(offsets) || rec_get_node_ptr_flag(rec));
/* The child address is in the last field */
@@ -937,7 +938,7 @@ btr_page_get_father_node_ptr_func(
node_ptr = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(node_ptr, index, offsets,
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) {
@@ -953,10 +954,11 @@ btr_page_get_father_node_ptr_func(
print_rec = page_rec_get_next(
page_get_infimum_rec(page_align(user_rec)));
- offsets = rec_get_offsets(print_rec, index,
- offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(print_rec, index, offsets,
+ page_rec_is_leaf(user_rec),
+ ULINT_UNDEFINED, &heap);
page_rec_print(print_rec, offsets);
- offsets = rec_get_offsets(node_ptr, index, offsets,
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
ULINT_UNDEFINED, &heap);
page_rec_print(node_ptr, offsets);
@@ -2275,9 +2277,9 @@ btr_page_get_split_rec(
/* Include tuple */
incl_data += insert_size;
} else {
- offsets = rec_get_offsets(rec, cursor->index,
- offsets, ULINT_UNDEFINED,
- &heap);
+ offsets = rec_get_offsets(rec, cursor->index, offsets,
+ page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
incl_data += rec_offs_size(offsets);
}
@@ -2385,6 +2387,7 @@ btr_page_insert_fits(
space after rec is removed from page. */
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ page_is_leaf(page),
ULINT_UNDEFINED, heap);
total_data -= rec_offs_size(*offsets);
@@ -2673,7 +2676,7 @@ btr_page_tuple_smaller(
first_rec = page_cur_get_rec(&pcur);
*offsets = rec_get_offsets(
- first_rec, cursor->index, *offsets,
+ first_rec, cursor->index, *offsets, page_is_leaf(block->frame),
n_uniq, heap);
return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0);
@@ -2980,7 +2983,7 @@ func_start:
first_rec = move_limit = split_rec;
*offsets = rec_get_offsets(split_rec, cursor->index, *offsets,
- n_uniq, heap);
+ page_is_leaf(page), n_uniq, heap);
if (tuple != NULL) {
insert_left = cmp_dtuple_rec(
@@ -3808,8 +3811,9 @@ retry:
cursor2.tree_height = cursor->tree_height;
offsets2 = rec_get_offsets(
- btr_cur_get_rec(&cursor2), index,
- NULL, ULINT_UNDEFINED, &heap);
+ btr_cur_get_rec(&cursor2), index, NULL,
+ page_is_leaf(cursor2.page_cur.block->frame),
+ ULINT_UNDEFINED, &heap);
/* Check if parent entry needs to be updated */
mbr_changed = rtr_merge_mbr_changed(
@@ -3989,8 +3993,9 @@ retry:
ulint rec_info;
offsets2 = rec_get_offsets(
- btr_cur_get_rec(&cursor2),
- index, NULL, ULINT_UNDEFINED, &heap);
+ btr_cur_get_rec(&cursor2), index, NULL,
+ page_is_leaf(cursor2.page_cur.block->frame),
+ ULINT_UNDEFINED, &heap);
ut_ad(btr_node_ptr_get_child_page_no(
btr_cur_get_rec(&cursor2), offsets2)
@@ -4468,8 +4473,9 @@ btr_print_recursive(
node_ptr = page_cur_get_rec(&cursor);
- *offsets = rec_get_offsets(node_ptr, index, *offsets,
- ULINT_UNDEFINED, heap);
+ *offsets = rec_get_offsets(
+ node_ptr, index, *offsets, false,
+ ULINT_UNDEFINED, heap);
btr_print_recursive(index,
btr_node_ptr_get_child(node_ptr,
index,
@@ -4662,7 +4668,8 @@ btr_index_rec_validate(
return(FALSE);
}
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
for (i = 0; i < n; i++) {
dict_field_t* field = dict_index_get_nth_field(index, i);
@@ -4916,7 +4923,7 @@ btr_validate_level(
page_cur_move_to_next(&cursor);
node_ptr = page_cur_get_rec(&cursor);
- offsets = rec_get_offsets(node_ptr, index, offsets,
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
ULINT_UNDEFINED, &heap);
savepoint2 = mtr_set_savepoint(&mtr);
@@ -5042,10 +5049,12 @@ loop:
rec = page_rec_get_prev(page_get_supremum_rec(page));
right_rec = page_rec_get_next(page_get_infimum_rec(
right_page));
- offsets = rec_get_offsets(rec, index,
- offsets, ULINT_UNDEFINED, &heap);
- offsets2 = rec_get_offsets(right_rec, index,
- offsets2, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets,
+ page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
+ offsets2 = rec_get_offsets(right_rec, index, offsets2,
+ page_is_leaf(right_page),
+ ULINT_UNDEFINED, &heap);
/* For spatial index, we cannot guarantee the key ordering
across pages, so skip the record compare verification for
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index 8a954f9d3c3..139e3116d06 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -176,7 +176,8 @@ PageBulk::insert(
if (!page_rec_is_infimum(m_cur_rec)) {
rec_t* old_rec = m_cur_rec;
ulint* old_offsets = rec_get_offsets(
- old_rec, m_index, NULL, ULINT_UNDEFINED, &m_heap);
+ old_rec, m_index, NULL, page_rec_is_leaf(old_rec),
+ ULINT_UNDEFINED, &m_heap);
ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index)
> 0);
@@ -377,9 +378,9 @@ PageBulk::getSplitRec()
rec = page_rec_get_next(rec);
ut_ad(page_rec_is_user_rec(rec));
- offsets = rec_get_offsets(rec, m_index,
- offsets, ULINT_UNDEFINED,
- &(m_heap));
+ offsets = rec_get_offsets(rec, m_index, offsets,
+ page_is_leaf(m_page),
+ ULINT_UNDEFINED, &m_heap);
total_recs_size += rec_offs_size(offsets);
n_recs++;
} while (total_recs_size + page_dir_calc_reserved_space(n_recs)
@@ -409,7 +410,8 @@ PageBulk::copyIn(
do {
offsets = rec_get_offsets(rec, m_index, offsets,
- ULINT_UNDEFINED, &(m_heap));
+ page_rec_is_leaf(split_rec),
+ ULINT_UNDEFINED, &m_heap);
insert(rec, offsets);
@@ -449,18 +451,18 @@ PageBulk::copyOut(
/* Set last record's next in page */
ulint* offsets = NULL;
rec = page_rec_get_prev(split_rec);
- offsets = rec_get_offsets(rec, m_index,
- offsets, ULINT_UNDEFINED,
- &(m_heap));
+ offsets = rec_get_offsets(rec, m_index, offsets,
+ page_rec_is_leaf(split_rec),
+ ULINT_UNDEFINED, &m_heap);
page_rec_set_next(rec, page_get_supremum_rec(m_page));
/* Set related members */
m_cur_rec = rec;
m_heap_top = rec_get_end(rec, offsets);
- offsets = rec_get_offsets(last_rec, m_index,
- offsets, ULINT_UNDEFINED,
- &(m_heap));
+ offsets = rec_get_offsets(last_rec, m_index, offsets,
+ page_rec_is_leaf(split_rec),
+ ULINT_UNDEFINED, &m_heap);
m_free_space += rec_get_end(last_rec, offsets)
- m_heap_top
@@ -876,8 +878,8 @@ BtrBulk::insert(
/* Convert tuple to rec. */
rec = rec_convert_dtuple_to_rec(static_cast<byte*>(mem_heap_alloc(
page_bulk->m_heap, rec_size)), m_index, tuple, n_ext);
- offsets = rec_get_offsets(rec, m_index, offsets, ULINT_UNDEFINED,
- &(page_bulk->m_heap));
+ offsets = rec_get_offsets(rec, m_index, offsets, !level,
+ ULINT_UNDEFINED, &page_bulk->m_heap);
page_bulk->insert(rec, offsets);
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 40317c54334..63eac83337f 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1545,8 +1545,8 @@ retry_page_get:
node_ptr = page_cur_get_rec(page_cursor);
- offsets = rec_get_offsets(
- node_ptr, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -1662,10 +1662,8 @@ need_opposite_intention:
&& latch_mode == BTR_MODIFY_TREE
&& (up_match >= rec_offs_n_fields(offsets) - 1
|| low_match >= rec_offs_n_fields(offsets) - 1)) {
- const rec_t* first_rec
- = page_rec_get_next_const(
- page_get_infimum_rec(
- page));
+ const rec_t* first_rec = page_rec_get_next_const(
+ page_get_infimum_rec(page));
ulint matched_fields;
ut_ad(upper_rw_latch == RW_X_LATCH);
@@ -1678,7 +1676,7 @@ need_opposite_intention:
offsets2 = rec_get_offsets(
first_rec, index, offsets2,
- ULINT_UNDEFINED, &heap);
+ false, ULINT_UNDEFINED, &heap);
cmp_rec_rec_with_match(node_ptr, first_rec,
offsets, offsets2, index, FALSE,
&matched_fields);
@@ -1690,14 +1688,13 @@ need_opposite_intention:
const rec_t* last_rec;
last_rec = page_rec_get_prev_const(
- page_get_supremum_rec(
- page));
+ page_get_supremum_rec(page));
matched_fields = 0;
offsets2 = rec_get_offsets(
last_rec, index, offsets2,
- ULINT_UNDEFINED, &heap);
+ false, ULINT_UNDEFINED, &heap);
cmp_rec_rec_with_match(
node_ptr, last_rec,
offsets, offsets2, index,
@@ -1854,14 +1851,9 @@ need_opposite_intention:
cursor->rtr_info->path;
if (!path->empty() && found) {
-#ifdef UNIV_DEBUG
- node_visit_t last_visit = path->back();
-
- ut_ad(last_visit.page_no == page_id.page_no());
-#endif /* UNIV_DEBUG */
-
+ ut_ad(path->back().page_no
+ == page_id.page_no());
path->pop_back();
-
#ifdef UNIV_DEBUG
if (page_mode == PAGE_CUR_RTREE_LOCATE
&& (latch_mode != BTR_MODIFY_LEAF)) {
@@ -1873,14 +1865,13 @@ need_opposite_intention:
offsets = rec_get_offsets(
my_node_ptr, index, offsets,
- ULINT_UNDEFINED, &heap);
+ false, ULINT_UNDEFINED, &heap);
ulint my_page_no
= btr_node_ptr_get_child_page_no(
my_node_ptr, offsets);
ut_ad(page_id.page_no() == my_page_no);
-
}
#endif
}
@@ -2316,7 +2307,7 @@ btr_cur_open_at_index_side_func(
node_ptr = page_cur_get_rec(page_cursor);
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- ULINT_UNDEFINED, &heap);
+ false, ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -2612,7 +2603,7 @@ btr_cur_open_at_rnd_pos_func(
node_ptr = page_cur_get_rec(page_cursor);
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- ULINT_UNDEFINED, &heap);
+ false, ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -3086,10 +3077,11 @@ fail_err:
}
#ifdef BTR_CUR_HASH_ADAPT
+ if (!leaf) {
# ifdef MYSQL_INDEX_DISABLE_AHI
- if (index->disable_ahi); else
+ } else if (index->disable_ahi) {
# endif
- if (!reorg && leaf && (cursor->flag == BTR_CUR_HASH)) {
+ } else if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert(cursor);
} else {
btr_search_update_hash_on_insert(cursor);
@@ -3285,15 +3277,19 @@ btr_cur_pessimistic_insert(
}
}
+ if (!page_is_leaf(btr_cur_get_page(cursor))) {
+ ut_ad(!big_rec_vec);
+ } else {
#ifdef BTR_CUR_HASH_ADAPT
# ifdef MYSQL_INDEX_DISABLE_AHI
- if (index->disable_ahi); else
+ if (index->disable_ahi); else
# endif
- btr_search_update_hash_on_insert(cursor);
+ btr_search_update_hash_on_insert(cursor);
#endif /* BTR_CUR_HASH_ADAPT */
- if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
+ if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
- lock_update_insert(btr_cur_get_block(cursor), *rec);
+ lock_update_insert(btr_cur_get_block(cursor), *rec);
+ }
}
if (n_reserved > 0) {
@@ -3489,7 +3485,8 @@ btr_cur_parse_update_in_place(
/* We do not need to reserve search latch, as the page is only
being recovered, and there cannot be a hash index to it. */
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, NULL, true,
+ ULINT_UNDEFINED, &heap);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields_in_recovery(rec, page_zip, offsets,
@@ -3813,7 +3810,7 @@ btr_cur_optimistic_update(
ut_ad(fil_page_index_page_check(page));
ut_ad(btr_page_get_index_id(page) == index->id);
- *offsets = rec_get_offsets(rec, index, *offsets,
+ *offsets = rec_get_offsets(rec, index, *offsets, true,
ULINT_UNDEFINED, heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(rec, *offsets)
@@ -4180,7 +4177,8 @@ btr_cur_pessimistic_update(
rec = btr_cur_get_rec(cursor);
*offsets = rec_get_offsets(
- rec, index, *offsets, ULINT_UNDEFINED, offsets_heap);
+ rec, index, *offsets, page_is_leaf(page),
+ ULINT_UNDEFINED, offsets_heap);
dtuple_t* new_entry = row_rec_to_index_entry(
rec, index, *offsets, &n_ext, entry_heap);
@@ -4614,7 +4612,7 @@ btr_cur_parse_del_mark_set_clust_rec(
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields_in_recovery(
rec, page_zip,
- rec_get_offsets(rec, index, offsets,
+ rec_get_offsets(rec, index, offsets, true,
pos + 2, &heap),
pos, trx_id, roll_ptr);
} else {
@@ -4623,8 +4621,8 @@ btr_cur_parse_del_mark_set_clust_rec(
ut_ad(memcmp(rec_get_nth_field(
rec,
rec_get_offsets(rec, index,
- offsets, pos,
- &heap),
+ offsets, true,
+ pos, &heap),
pos, &offset),
field_ref_zero, DATA_TRX_ID_LEN));
ut_ad(offset == DATA_TRX_ID_LEN);
@@ -4665,7 +4663,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
ut_ad(buf_block_get_frame(block) == page_align(rec));
- ut_ad(page_is_leaf(page_align(rec)));
+ ut_ad(page_rec_is_leaf(rec));
ut_ad(mtr->is_named_space(index->space));
if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
@@ -4959,7 +4957,7 @@ btr_cur_optimistic_delete_func(
|| (flags & BTR_CREATE_FLAG));
rec = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(rec, cursor->index, offsets,
+ offsets = rec_get_offsets(rec, cursor->index, offsets, true,
ULINT_UNDEFINED, &heap);
no_compress_needed = !rec_offs_any_extern(offsets)
@@ -5103,7 +5101,8 @@ btr_cur_pessimistic_delete(
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
if (rec_offs_any_extern(offsets)) {
btr_rec_free_externally_stored_fields(index,
@@ -5134,10 +5133,9 @@ btr_cur_pessimistic_delete(
level = btr_page_get_level(page, mtr);
- if (level > 0
- && UNIV_UNLIKELY(rec == page_rec_get_next(
- page_get_infimum_rec(page)))) {
-
+ if (level == 0) {
+ btr_search_update_hash_on_delete(cursor);
+ } else if (UNIV_UNLIKELY(page_rec_is_first(rec, page))) {
rec_t* next_rec = page_rec_get_next(rec);
if (btr_page_get_prev(page, mtr) == FIL_NULL) {
@@ -5166,8 +5164,8 @@ btr_cur_pessimistic_delete(
block, mtr, NULL,
&father_cursor);
offsets = rec_get_offsets(
- btr_cur_get_rec(&father_cursor), index,
- NULL, ULINT_UNDEFINED, &heap);
+ btr_cur_get_rec(&father_cursor), index, NULL,
+ false, ULINT_UNDEFINED, &heap);
father_rec = btr_cur_get_rec(&father_cursor);
rtr_read_mbr(rec_get_nth_field(
@@ -5204,8 +5202,6 @@ btr_cur_pessimistic_delete(
}
}
- btr_search_update_hash_on_delete(cursor);
-
page_cur_delete_rec(btr_cur_get_page_cur(cursor), index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
@@ -6061,10 +6057,12 @@ btr_estimate_number_of_different_key_vals(
page = btr_cur_get_page(&cursor);
rec = page_rec_get_next(page_get_infimum_rec(page));
+ ut_d(const bool is_leaf = page_is_leaf(page));
if (!page_rec_is_supremum(rec)) {
not_empty_flag = 1;
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
+ is_leaf,
ULINT_UNDEFINED, &heap);
if (n_not_null != NULL) {
@@ -6085,6 +6083,7 @@ btr_estimate_number_of_different_key_vals(
offsets_next_rec = rec_get_offsets(next_rec, index,
offsets_next_rec,
+ is_leaf,
ULINT_UNDEFINED,
&heap);
@@ -7353,6 +7352,8 @@ btr_rec_free_externally_stored_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(mtr_is_page_fix(mtr, rec, MTR_MEMO_PAGE_X_FIX, index->table));
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(page_rec_is_leaf(rec));
/* Free possible externally stored fields in the record */
ut_ad(dict_table_is_comp(index->table) == !!rec_offs_comp(offsets));
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index d4b83930191..335b4fc220d 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -402,6 +402,7 @@ btr_defragment_calc_n_recs_for_size(
while (page_cur_get_rec(&cur) != page_get_supremum_rec(page)) {
rec_t* cur_rec = page_cur_get_rec(&cur);
offsets = rec_get_offsets(cur_rec, index, offsets,
+ page_is_leaf(page),
ULINT_UNDEFINED, &heap);
ulint rec_size = rec_offs_size(offsets);
size += rec_size;
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index fdde6f5d3e7..2b85c764a3b 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -303,10 +303,10 @@ btr_pcur_restore_position_func(
heap = mem_heap_create(256);
offsets1 = rec_get_offsets(
- cursor->old_rec, index, NULL,
+ cursor->old_rec, index, NULL, true,
cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(
- rec, index, NULL,
+ rec, index, NULL, true,
cursor->old_n_fields, &heap);
ut_ad(!cmp_rec_rec(cursor->old_rec,
@@ -331,7 +331,7 @@ btr_pcur_restore_position_func(
heap = mem_heap_create(256);
- tuple = dict_index_build_data_tuple(index, cursor->old_rec,
+ tuple = dict_index_build_data_tuple(cursor->old_rec, index, true,
cursor->old_n_fields, heap);
/* Save the old search mode of the cursor */
@@ -365,7 +365,8 @@ btr_pcur_restore_position_func(
&& btr_pcur_is_on_user_rec(cursor)
&& !cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
rec_get_offsets(btr_pcur_get_rec(cursor),
- index, NULL, ULINT_UNDEFINED, &heap))) {
+ index, NULL, true,
+ ULINT_UNDEFINED, &heap))) {
/* We have to store the NEW value for the modify clock,
since the cursor can now be on a different page!
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 3ae9e95819a..750c2506ff5 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -609,8 +609,8 @@ btr_search_update_hash_ref(
ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
|| rw_lock_own(&(block->lock), RW_LOCK_X));
- ut_ad(page_align(btr_cur_get_rec(cursor))
- == buf_block_get_frame(block));
+ ut_ad(page_align(btr_cur_get_rec(cursor)) == block->frame);
+ ut_ad(page_is_leaf(block->frame));
assert_block_ahi_valid(block);
index = block->index;
@@ -640,7 +640,7 @@ btr_search_update_hash_ref(
}
fold = rec_fold(rec,
- rec_get_offsets(rec, index, offsets_,
+ rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap),
block->curr_n_fields,
block->curr_n_bytes, index->id);
@@ -750,10 +750,11 @@ btr_search_check_guess(
rec = btr_cur_get_rec(cursor);
ut_ad(page_rec_is_user_rec(rec));
+ ut_ad(page_rec_is_leaf(rec));
match = 0;
- offsets = rec_get_offsets(rec, cursor->index, offsets,
+ offsets = rec_get_offsets(rec, cursor->index, offsets, true,
n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match);
@@ -808,7 +809,7 @@ btr_search_check_guess(
}
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
- n_unique, &heap);
+ true, n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, prev_rec, offsets, &match);
if (mode == PAGE_CUR_GE) {
@@ -837,7 +838,7 @@ btr_search_check_guess(
}
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
- n_unique, &heap);
+ true, n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, next_rec, offsets, &match);
if (mode == PAGE_CUR_LE) {
@@ -1139,6 +1140,7 @@ retry:
|| buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH
|| rw_lock_own(&block->lock, RW_LOCK_S)
|| rw_lock_own(&block->lock, RW_LOCK_X));
+ ut_ad(page_is_leaf(block->frame));
/* We must not dereference index here, because it could be freed
if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)).
@@ -1229,7 +1231,7 @@ retry:
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
@@ -1392,6 +1394,7 @@ btr_search_build_page_hash_index(
ut_ad(index);
ut_ad(block->page.id.space() == index->space);
ut_a(!dict_index_is_ibuf(index));
+ ut_ad(page_is_leaf(block->frame));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
@@ -1445,7 +1448,7 @@ btr_search_build_page_hash_index(
rec = page_rec_get_next(page_get_infimum_rec(page));
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
ut_ad(page_rec_is_supremum(rec)
@@ -1476,7 +1479,7 @@ btr_search_build_page_hash_index(
}
offsets = rec_get_offsets(
- next_rec, index, offsets,
+ next_rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
@@ -1630,9 +1633,11 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor)
mem_heap_t* heap = NULL;
rec_offs_init(offsets_);
+ ut_ad(page_is_leaf(btr_cur_get_page(cursor)));
#ifdef MYSQL_INDEX_DISABLE_AHI
if (cursor->index->disable_ahi) return;
#endif
+
if (!btr_search_enabled) {
return;
}
@@ -1658,7 +1663,7 @@ btr_search_update_hash_on_delete(btr_cur_t* cursor)
rec = btr_cur_get_rec(cursor);
- fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
+ fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
@@ -1777,6 +1782,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
ulint* offsets = offsets_;
rec_offs_init(offsets_);
+ ut_ad(page_is_leaf(btr_cur_get_page(cursor)));
#ifdef MYSQL_INDEX_DISABLE_AHI
if (cursor->index->disable_ahi) return;
#endif
@@ -1816,13 +1822,13 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
ins_rec = page_rec_get_next_const(rec);
next_rec = page_rec_get_next_const(ins_rec);
- offsets = rec_get_offsets(ins_rec, index, offsets,
+ offsets = rec_get_offsets(ins_rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
if (!page_rec_is_supremum(next_rec)) {
offsets = rec_get_offsets(
- next_rec, index, offsets,
+ next_rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
@@ -1830,7 +1836,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor)
if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else {
@@ -2029,7 +2035,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
page_index_id = btr_page_get_index_id(block->frame);
offsets = rec_get_offsets(
- node->data, block->index, offsets,
+ node->data, block->index, offsets, true,
btr_search_get_n_fields(block->curr_n_fields,
block->curr_n_bytes),
&heap);
diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc
index e511a676054..9ed4faa8e70 100644
--- a/storage/innobase/data/data0data.cc
+++ b/storage/innobase/data/data0data.cc
@@ -803,14 +803,11 @@ big_rec_t::alloc(
return(rec);
}
-/** Create a deep copy of this object
-@param[in] heap the memory heap in which the clone will be
- created.
-
-@return the cloned object. */
+/** Create a deep copy of this object.
+@param[in,out] heap memory heap in which the clone will be created
+@return the cloned object */
dfield_t*
-dfield_t::clone(
- mem_heap_t* heap)
+dfield_t::clone(mem_heap_t* heap) const
{
const ulint size = len == UNIV_SQL_NULL ? 0 : len;
dfield_t* obj = static_cast<dfield_t*>(
diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc
index ccb73e02f43..3d1ee3f76e9 100644
--- a/storage/innobase/dict/dict0defrag_bg.cc
+++ b/storage/innobase/dict/dict0defrag_bg.cc
@@ -290,7 +290,7 @@ dict_stats_save_defrag_summary(
dberr_t ret=DB_SUCCESS;
lint now = (lint) ut_time();
- if (dict_index_is_univ(index)) {
+ if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}
@@ -320,7 +320,7 @@ dict_stats_save_defrag_stats(
{
dberr_t ret;
- if (dict_index_is_univ(index)) {
+ if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 55429b2680f..ecd8839c36e 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -5654,7 +5654,7 @@ dict_index_build_node_ptr(
dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4);
- rec_copy_prefix_to_dtuple(tuple, rec, index, n_unique, heap);
+ rec_copy_prefix_to_dtuple(tuple, rec, index, !level, n_unique, heap);
dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple)
| REC_STATUS_NODE_PTR);
@@ -5686,7 +5686,7 @@ dict_index_copy_rec_order_prefix(
ut_a(!dict_table_is_comp(index->table));
n = rec_get_n_fields_old(rec);
} else {
- if (page_is_leaf(page_align(rec))) {
+ if (page_rec_is_leaf(rec)) {
n = dict_index_get_n_unique_in_tree(index);
} else {
n = dict_index_get_n_unique_in_tree_nonleaf(index);
@@ -5703,16 +5703,22 @@ dict_index_copy_rec_order_prefix(
return(rec_copy_prefix_to_buf(rec, index, n, buf, buf_size));
}
-/**********************************************************************//**
-Builds a typed data tuple out of a physical record.
+/** Convert a physical record into a search tuple.
+@param[in] rec index record (not necessarily in an index page)
+@param[in] index index
+@param[in] leaf whether rec is in a leaf page
+@param[in] n_fields number of data fields
+@param[in,out] heap memory heap for allocation
@return own: data tuple */
dtuple_t*
-dict_index_build_data_tuple(
-/*========================*/
- dict_index_t* index, /*!< in: index tree */
- rec_t* rec, /*!< in: record for which to build data tuple */
- ulint n_fields,/*!< in: number of data fields */
- mem_heap_t* heap) /*!< in: memory heap where tuple created */
+dict_index_build_data_tuple_func(
+ const rec_t* rec,
+ const dict_index_t* index,
+#ifdef UNIV_DEBUG
+ bool leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
+ mem_heap_t* heap)
{
dtuple_t* tuple;
@@ -5723,7 +5729,7 @@ dict_index_build_data_tuple(
dict_index_copy_types(tuple, index, n_fields);
- rec_copy_prefix_to_dtuple(tuple, rec, index, n_fields, heap);
+ rec_copy_prefix_to_dtuple(tuple, rec, index, leaf, n_fields, heap);
ut_ad(dtuple_check_typed(tuple));
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 177a16a2b37..17e66b3d99c 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1172,6 +1172,7 @@ dict_stats_analyze_index_level(
prev_rec_offsets = rec_get_offsets(
prev_rec, index, prev_rec_offsets,
+ true,
n_uniq, &heap);
prev_rec = rec_copy_prefix_to_buf(
@@ -1185,7 +1186,7 @@ dict_stats_analyze_index_level(
continue;
}
rec_offsets = rec_get_offsets(
- rec, index, rec_offsets, n_uniq, &heap);
+ rec, index, rec_offsets, !level, n_uniq, &heap);
(*total_recs)++;
@@ -1193,7 +1194,7 @@ dict_stats_analyze_index_level(
ulint matched_fields;
prev_rec_offsets = rec_get_offsets(
- prev_rec, index, prev_rec_offsets,
+ prev_rec, index, prev_rec_offsets, !level,
n_uniq, &heap);
cmp_rec_rec_with_match(rec,
@@ -1399,7 +1400,7 @@ dict_stats_scan_page(
return(NULL);
}
- offsets_rec = rec_get_offsets(rec, index, offsets_rec,
+ offsets_rec = rec_get_offsets(rec, index, offsets_rec, is_leaf,
ULINT_UNDEFINED, &heap);
if (should_count_external_pages) {
@@ -1416,7 +1417,7 @@ dict_stats_scan_page(
ulint matched_fields;
offsets_next_rec = rec_get_offsets(next_rec, index,
- offsets_next_rec,
+ offsets_next_rec, is_leaf,
ULINT_UNDEFINED,
&heap);
@@ -1522,8 +1523,9 @@ dict_stats_analyze_index_below_cur(
rec_offs_set_n_alloc(offsets2, size);
rec = btr_cur_get_rec(cur);
+ ut_ad(!page_rec_is_leaf(rec));
- offsets_rec = rec_get_offsets(rec, index, offsets1,
+ offsets_rec = rec_get_offsets(rec, index, offsets1, false,
ULINT_UNDEFINED, &heap);
page_id_t page_id(dict_index_get_space(index),
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 60cc3c91fef..dd8de511b21 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -3460,10 +3460,10 @@ fts_add_doc_by_id(
}
- offsets = rec_get_offsets(clust_rec, clust_index,
- NULL, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(clust_rec, clust_index, NULL, true,
+ ULINT_UNDEFINED, &heap);
- for (ulint i = 0; i < num_idx; ++i) {
+ for (ulint i = 0; i < num_idx; ++i) {
fts_doc_t doc;
dict_table_t* table;
fts_get_doc_t* get_doc;
@@ -3633,7 +3633,7 @@ fts_get_max_doc_id(
}
offsets = rec_get_offsets(
- rec, index, offsets, ULINT_UNDEFINED, &heap);
+ rec, index, offsets, true, ULINT_UNDEFINED, &heap);
data = rec_get_nth_field(rec, offsets, 0, &len);
@@ -5120,7 +5120,7 @@ fts_get_doc_id_from_rec(
rec_offs_init(offsets_);
offsets = rec_get_offsets(
- rec, index, offsets, ULINT_UNDEFINED, &my_heap);
+ rec, index, offsets, true, ULINT_UNDEFINED, &my_heap);
col_no = dict_col_get_index_pos(
&table->cols[table->fts->doc_col], index);
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index d14bafff72b..b8220d73ec0 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -85,7 +85,8 @@ rtr_page_split_initialize_nodes(
stop = task + n_recs;
rec = page_rec_get_next(page_get_infimum_rec(page));
- *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ ut_d(const bool is_leaf = page_is_leaf(page));
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets, is_leaf,
n_uniq, &heap);
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
@@ -98,7 +99,7 @@ rtr_page_split_initialize_nodes(
rec = page_rec_get_next(rec);
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
- n_uniq, &heap);
+ is_leaf, n_uniq, &heap);
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
}
@@ -324,7 +325,7 @@ rtr_update_mbr_field(
if (cursor2) {
rec_t* del_rec = btr_cur_get_rec(cursor2);
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
- index, NULL,
+ index, NULL, false,
ULINT_UNDEFINED, &heap);
del_page_no = btr_node_ptr_get_child_page_no(del_rec, offsets2);
cur2_pos = page_rec_get_n_recs_before(btr_cur_get_rec(cursor2));
@@ -389,7 +390,7 @@ rtr_update_mbr_field(
= page_rec_get_nth(page, cur2_pos);
}
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
- index, NULL,
+ index, NULL, false,
ULINT_UNDEFINED, &heap);
ut_ad(del_page_no == btr_node_ptr_get_child_page_no(
cursor2->page_cur.rec,
@@ -427,8 +428,7 @@ rtr_update_mbr_field(
ut_ad(old_rec != insert_rec);
page_cur_position(old_rec, block, &page_cur);
- offsets2 = rec_get_offsets(old_rec,
- index, NULL,
+ offsets2 = rec_get_offsets(old_rec, index, NULL, !level,
ULINT_UNDEFINED, &heap);
page_cur_delete_rec(&page_cur, index, offsets2, mtr);
@@ -458,6 +458,7 @@ update_mbr:
cur2_rec = cursor2->page_cur.rec;
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
+ !level,
ULINT_UNDEFINED, &heap);
cur2_rec_info = rec_get_info_bits(cur2_rec,
@@ -517,7 +518,7 @@ update_mbr:
if (ins_suc) {
btr_cur_position(index, insert_rec, block, cursor);
offsets = rec_get_offsets(insert_rec,
- index, offsets,
+ index, offsets, !level,
ULINT_UNDEFINED, &heap);
}
@@ -532,6 +533,7 @@ update_mbr:
cur2_rec = btr_cur_get_rec(cursor2);
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
+ !level,
ULINT_UNDEFINED, &heap);
/* If the cursor2 position is on a wrong rec, we
@@ -545,6 +547,7 @@ update_mbr:
while (!page_rec_is_supremum(cur2_rec)) {
offsets2 = rec_get_offsets(cur2_rec, index,
NULL,
+ !level,
ULINT_UNDEFINED,
&heap);
cur2_pno = btr_node_ptr_get_child_page_no(
@@ -862,6 +865,7 @@ rtr_split_page_move_rec_list(
rec_move = static_cast<rtr_rec_move_t*>(mem_heap_alloc(
heap,
sizeof (*rec_move) * max_to_move));
+ const bool is_leaf = page_is_leaf(page);
/* Insert the recs in group 2 to new page. */
for (cur_split_node = node_array;
@@ -871,11 +875,10 @@ rtr_split_page_move_rec_list(
block, cur_split_node->key);
offsets = rec_get_offsets(cur_split_node->key,
- index, offsets,
+ index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
- ut_ad (cur_split_node->key != first_rec
- || !page_is_leaf(page));
+ ut_ad(!is_leaf || cur_split_node->key != first_rec);
rec = page_cur_insert_rec_low(
page_cur_get_rec(&new_page_cursor),
@@ -910,8 +913,7 @@ rtr_split_page_move_rec_list(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (page_is_leaf(page)
- && !dict_table_is_temporary(index->table)) {
+ if (is_leaf && !dict_table_is_temporary(index->table)) {
page_update_max_trx_id(new_block, NULL,
page_get_max_trx_id(page),
mtr);
@@ -964,7 +966,7 @@ rtr_split_page_move_rec_list(
block, &page_cursor);
offsets = rec_get_offsets(
page_cur_get_rec(&page_cursor), index,
- offsets, ULINT_UNDEFINED,
+ offsets, is_leaf, ULINT_UNDEFINED,
&heap);
page_cur_delete_rec(&page_cursor,
index, offsets, mtr);
@@ -1183,9 +1185,8 @@ func_start:
*offsets = rec_get_offsets(
page_cur_get_rec(page_cursor),
- cursor->index,
- *offsets, ULINT_UNDEFINED,
- heap);
+ cursor->index, *offsets, !page_level,
+ ULINT_UNDEFINED, heap);
page_cur_delete_rec(page_cursor,
cursor->index, *offsets, mtr);
@@ -1201,9 +1202,8 @@ func_start:
block, page_cursor);
*offsets = rec_get_offsets(
page_cur_get_rec(page_cursor),
- cursor->index,
- *offsets, ULINT_UNDEFINED,
- heap);
+ cursor->index, *offsets, !page_level,
+ ULINT_UNDEFINED, heap);
page_cur_delete_rec(page_cursor,
cursor->index, *offsets, mtr);
}
@@ -1461,13 +1461,14 @@ rtr_page_copy_rec_list_end_no_locks(
cur_rec = page_rec_get_next(cur_rec);
}
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
ULINT_UNDEFINED, &heap);
while (!page_rec_is_supremum(cur_rec)) {
ulint cur_matched_fields = 0;
int cmp;
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
+ is_leaf,
ULINT_UNDEFINED, &heap);
cmp = cmp_rec_rec_with_match(cur1_rec, cur_rec,
offsets1, offsets2,
@@ -1503,7 +1504,7 @@ rtr_page_copy_rec_list_end_no_locks(
cur_rec = page_cur_get_rec(&page_cur);
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur_rec, index,
@@ -1579,7 +1580,7 @@ rtr_page_copy_rec_list_start_no_locks(
cur_rec = page_rec_get_next(cur_rec);
}
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
ULINT_UNDEFINED, &heap);
while (!page_rec_is_supremum(cur_rec)) {
@@ -1587,6 +1588,7 @@ rtr_page_copy_rec_list_start_no_locks(
int cmp;
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
+ is_leaf,
ULINT_UNDEFINED, &heap);
cmp = cmp_rec_rec_with_match(cur1_rec, cur_rec,
offsets1, offsets2,
@@ -1624,7 +1626,7 @@ rtr_page_copy_rec_list_start_no_locks(
cur_rec = page_cur_get_rec(&page_cur);
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur_rec, index,
@@ -1787,7 +1789,7 @@ rtr_check_same_block(
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(
- rec, index, NULL, ULINT_UNDEFINED, &heap);
+ rec, index, NULL, false, ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(rec, offsets) == page_no) {
btr_cur_position(index, rec, parentb, cursor);
@@ -1913,7 +1915,8 @@ rtr_estimate_n_rows_in_range(
heap = mem_heap_create(512);
rec = page_rec_get_next(page_get_infimum_rec(page));
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, page_rec_is_leaf(rec),
+ ULINT_UNDEFINED, &heap);
/* Scan records in root page and calculate area. */
double area = 0;
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 97163cae410..87ebd9ad34a 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -533,7 +533,7 @@ rtr_compare_cursor_rec(
rec = btr_cur_get_rec(cursor);
offsets = rec_get_offsets(
- rec, index, NULL, ULINT_UNDEFINED, heap);
+ rec, index, NULL, false, ULINT_UNDEFINED, heap);
return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no);
}
@@ -723,7 +723,7 @@ rtr_page_get_father_node_ptr(
user_rec = btr_cur_get_rec(cursor);
ut_a(page_rec_is_user_rec(user_rec));
- offsets = rec_get_offsets(user_rec, index, offsets,
+ offsets = rec_get_offsets(user_rec, index, offsets, !level,
ULINT_UNDEFINED, &heap);
rtr_get_mbr_from_rec(user_rec, offsets, &mbr);
@@ -740,7 +740,7 @@ rtr_page_get_father_node_ptr(
node_ptr = btr_cur_get_rec(cursor);
ut_ad(!page_rec_is_comp(node_ptr)
|| rec_get_status(node_ptr) == REC_STATUS_NODE_PTR);
- offsets = rec_get_offsets(node_ptr, index, offsets,
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
ULINT_UNDEFINED, &heap);
ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets);
@@ -757,13 +757,14 @@ rtr_page_get_father_node_ptr(
print_rec = page_rec_get_next(
page_get_infimum_rec(page_align(user_rec)));
- offsets = rec_get_offsets(print_rec, index,
- offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(print_rec, index, offsets,
+ page_rec_is_leaf(user_rec),
+ ULINT_UNDEFINED, &heap);
error << "; child ";
rec_print(error.m_oss, print_rec,
rec_get_info_bits(print_rec, rec_offs_comp(offsets)),
offsets);
- offsets = rec_get_offsets(node_ptr, index, offsets,
+ offsets = rec_get_offsets(node_ptr, index, offsets, false,
ULINT_UNDEFINED, &heap);
error << "; parent ";
rec_print(error.m_oss, print_rec,
@@ -1310,10 +1311,10 @@ rtr_cur_restore_position(
heap = mem_heap_create(256);
offsets1 = rec_get_offsets(
- r_cursor->old_rec, index, NULL,
+ r_cursor->old_rec, index, NULL, !level,
r_cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(
- rec, index, NULL,
+ rec, index, NULL, !level,
r_cursor->old_n_fields, &heap);
comp = rec_offs_comp(offsets1);
@@ -1351,7 +1352,7 @@ rtr_cur_restore_position(
heap = mem_heap_create(256);
- tuple = dict_index_build_data_tuple(index, r_cursor->old_rec,
+ tuple = dict_index_build_data_tuple(r_cursor->old_rec, index, !level,
r_cursor->old_n_fields, heap);
page_cursor = btr_pcur_get_page_cur(r_cursor);
@@ -1383,10 +1384,10 @@ search_again:
rec = btr_pcur_get_rec(r_cursor);
offsets1 = rec_get_offsets(
- r_cursor->old_rec, index, NULL,
+ r_cursor->old_rec, index, NULL, !level,
r_cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(
- rec, index, NULL,
+ rec, index, NULL, !level,
r_cursor->old_n_fields, &heap);
comp = rec_offs_comp(offsets1);
@@ -1433,6 +1434,7 @@ rtr_leaf_push_match_rec(
rtr_rec_t rtr_rec;
buf = match_rec->block.frame + match_rec->used;
+ ut_ad(page_rec_is_leaf(rec));
copy = rec_copy(buf, rec, offsets);
@@ -1661,11 +1663,9 @@ rtr_cur_search_with_match(
ulint* offsets = offsets_;
mem_heap_t* heap = NULL;
int cmp = 1;
- bool is_leaf;
double least_inc = DBL_MAX;
const rec_t* best_rec;
const rec_t* last_match_rec = NULL;
- ulint level;
bool match_init = false;
ulint space = block->page.id.space();
page_cur_mode_t orig_mode = mode;
@@ -1679,8 +1679,8 @@ rtr_cur_search_with_match(
page = buf_block_get_frame(block);
- is_leaf = page_is_leaf(page);
- level = btr_page_get_level(page, mtr);
+ const ulint level = btr_page_get_level(page, mtr);
+ const bool is_leaf = !level;
if (mode == PAGE_CUR_RTREE_LOCATE) {
ut_ad(level != 0);
@@ -1702,7 +1702,7 @@ rtr_cur_search_with_match(
ulint new_rec_size = rec_get_converted_size(index, tuple, 0);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, is_leaf,
dtuple_get_n_fields_cmp(tuple),
&heap);
@@ -1723,7 +1723,7 @@ rtr_cur_search_with_match(
}
while (!page_rec_is_supremum(rec)) {
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, is_leaf,
dtuple_get_n_fields_cmp(tuple),
&heap);
if (!is_leaf) {
@@ -1818,7 +1818,7 @@ rtr_cur_search_with_match(
== PAGE_CUR_RTREE_GET_FATHER);
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, false,
ULINT_UNDEFINED, &heap);
page_no = btr_node_ptr_get_child_page_no(
@@ -1867,7 +1867,7 @@ rtr_cur_search_with_match(
/* Collect matched records on page */
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
rtr_leaf_push_match_rec(
rec, rtr_info, offsets,
@@ -1899,9 +1899,8 @@ rtr_cur_search_with_match(
ulint child_no;
ut_ad(least_inc < DBL_MAX);
offsets = rec_get_offsets(
- best_rec, index,
- offsets, ULINT_UNDEFINED,
- &heap);
+ best_rec, index, offsets,
+ false, ULINT_UNDEFINED, &heap);
child_no =
btr_node_ptr_get_child_page_no(
best_rec, offsets);
@@ -1953,12 +1952,12 @@ rtr_cur_search_with_match(
/* Verify the record to be positioned is the same
as the last record in matched_rec vector */
offsets2 = rec_get_offsets(test_rec.r_rec, index,
- offsets2, ULINT_UNDEFINED,
- &heap);
+ offsets2, true,
+ ULINT_UNDEFINED, &heap);
offsets = rec_get_offsets(last_match_rec, index,
- offsets, ULINT_UNDEFINED,
- &heap);
+ offsets, true,
+ ULINT_UNDEFINED, &heap);
ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec,
offsets2, offsets, index) == 0);
@@ -1975,7 +1974,8 @@ rtr_cur_search_with_match(
ut_ad(!last_match_rec && rec);
offsets = rec_get_offsets(
- rec, index, offsets, ULINT_UNDEFINED, &heap);
+ rec, index, offsets, false,
+ ULINT_UNDEFINED, &heap);
child_no = btr_node_ptr_get_child_page_no(rec, offsets);
@@ -1997,7 +1997,7 @@ rtr_cur_search_with_match(
&& mode != PAGE_CUR_RTREE_INSERT) {
ulint page_no;
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, false,
ULINT_UNDEFINED, &heap);
page_no = btr_node_ptr_get_child_page_no(rec, offsets);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index d0380cd914c..2fa38b24ad9 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -71,8 +71,7 @@ static const char *MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN=
/** Operations for creating secondary indexes (no rebuild needed) */
static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE
= Alter_inplace_info::ADD_INDEX
- | Alter_inplace_info::ADD_UNIQUE_INDEX
- | Alter_inplace_info::ADD_SPATIAL_INDEX;
+ | Alter_inplace_info::ADD_UNIQUE_INDEX;
/** Operations for rebuilding a table in place */
static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_REBUILD
@@ -702,11 +701,11 @@ ha_innobase::check_if_supported_inplace_alter(
codes for certain types. In some cases the signed/unsigned bit was
generated differently too.
- Online ALTER would change the mtype/unsigned_flag (to what the
+ Inplace ALTER would change the mtype/unsigned_flag (to what the
current code generates) without changing the underlying data
represenation, and it might result in data corruption.
- Don't do online ALTER if mtype/unsigned_flag are wrong.
+ Don't do inplace ALTER if mtype/unsigned_flag are wrong.
*/
for (ulint i = 0, icol= 0; i < table->s->fields; i++) {
const Field* field = table->field[i];
@@ -897,29 +896,6 @@ ha_innobase::check_if_supported_inplace_alter(
DBUG_ASSERT(!m_prebuilt->table->fts || m_prebuilt->table->fts->doc_col
< dict_table_get_n_user_cols(m_prebuilt->table));
- /* Spatial indexes should use copy method for now.
- TOO: remove this when below ADD_SPATIAL_INDEX supported. */
- for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
- const KEY* key =
- &ha_alter_info->key_info_buffer[
- ha_alter_info->index_add_buffer[i]];
- if (key->flags & HA_SPATIAL) {
- ha_alter_info->unsupported_reason = innobase_get_err_msg(
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS);
-
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
- }
-
-#ifdef MYSQL_SPATIAL_INDEX
- if (ha_alter_info->handler_flags
- & Alter_inplace_info::ADD_SPATIAL_INDEX) {
- ha_alter_info->unsupported_reason = innobase_get_err_msg(
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS);
- online = false;
- }
-#endif
-
if (m_prebuilt->table->fts
&& innobase_fulltext_exist(altered_table)) {
/* FULLTEXT indexes are supposed to remain. */
@@ -964,7 +940,7 @@ ha_innobase::check_if_supported_inplace_alter(
operation is possible. */
} else if (((ha_alter_info->handler_flags
& Alter_inplace_info::ADD_PK_INDEX)
- || innobase_need_rebuild(ha_alter_info, table))
+ || innobase_need_rebuild(ha_alter_info, table))
&& (innobase_fulltext_exist(altered_table)
|| innobase_spatial_exist(altered_table))) {
/* Refuse to rebuild the table online, if
@@ -982,8 +958,6 @@ ha_innobase::check_if_supported_inplace_alter(
ha_alter_info->unsupported_reason =
innobase_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS);
-
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
} else {
ha_alter_info->unsupported_reason =
innobase_get_err_msg(
@@ -991,10 +965,15 @@ ha_innobase::check_if_supported_inplace_alter(
}
} else if ((ha_alter_info->handler_flags
& Alter_inplace_info::ADD_INDEX)) {
- /* Building a full-text index requires a lock.
- We could do without a lock if the table already contains
- an FTS_DOC_ID column, but in that case we would have
- to apply the modification log to the full-text indexes. */
+ /* ADD FULLTEXT|SPATIAL INDEX requires a lock.
+
+ We could do ADD FULLTEXT INDEX without a lock if the
+ table already contains an FTS_DOC_ID column, but in
+ that case we would have to apply the modification log
+ to the full-text indexes.
+
+ We could also do ADD SPATIAL INDEX by implementing
+ row_log_apply() for it. */
for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
const KEY* key =
@@ -1011,6 +990,12 @@ ha_innobase::check_if_supported_inplace_alter(
online = false;
break;
}
+ if (key->flags & HA_SPATIAL) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS);
+ online = false;
+ break;
+ }
}
}
@@ -5609,10 +5594,7 @@ ha_innobase::prepare_inplace_alter_table(
/* The clustered index is corrupted. */
my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0));
DBUG_RETURN(true);
- }
-
- if (ha_alter_info->handler_flags
- & Alter_inplace_info::CHANGE_CREATE_OPTION) {
+ } else {
const char* invalid_opt = info.create_options_are_invalid();
/* Check engine specific table options */
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index eda6684e69f..b53ede41427 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -559,7 +559,7 @@ ibuf_init_at_db_start(void)
ibuf->index = dict_mem_index_create(
"innodb_change_buffer", "CLUST_IND",
- IBUF_SPACE_ID, DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 1);
+ IBUF_SPACE_ID, DICT_CLUSTERED | DICT_IBUF, 1);
ibuf->index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID;
ibuf->index->table = dict_mem_table_create(
"innodb_change_buffer", IBUF_SPACE_ID, 1, 0, 0, 0);
@@ -1502,6 +1502,7 @@ ibuf_dummy_index_create(
/* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
index->cached = TRUE;
+ ut_d(index->is_dummy = true);
return(index);
}
@@ -4000,8 +4001,8 @@ dump:
row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */
ut_ad(rec_get_deleted_flag(rec, page_is_comp(page)));
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED,
- &heap);
+ offsets = rec_get_offsets(rec, index, NULL, true,
+ ULINT_UNDEFINED, &heap);
update = row_upd_build_sec_rec_difference_binary(
rec, index, offsets, entry, heap);
@@ -4193,7 +4194,7 @@ ibuf_delete(
rec_offs_init(offsets_);
offsets = rec_get_offsets(
- rec, index, offsets, ULINT_UNDEFINED, &heap);
+ rec, index, offsets, true, ULINT_UNDEFINED, &heap);
if (page_get_n_recs(page) <= 1
|| !(REC_INFO_DELETED_FLAG
diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index 87a2228ff2d..b6187d46025 100644
--- a/storage/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -586,11 +587,10 @@ struct dfield_t{
unsigned len; /*!< data length; UNIV_SQL_NULL if SQL null */
dtype_t type; /*!< type of data */
- /** Create a deep copy of this object
- @param[in] heap the memory heap in which the clone will be
- created.
- @return the cloned object. */
- dfield_t* clone(mem_heap_t* heap);
+ /** Create a deep copy of this object.
+ @param[in,out] heap memory heap in which the clone will be created
+ @return the cloned object */
+ dfield_t* clone(mem_heap_t* heap) const;
};
/** Structure for an SQL data tuple of fields (logical record) */
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 7802366a149..ace0029e632 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -1451,17 +1451,31 @@ dict_index_copy_rec_order_prefix(
copied prefix, or NULL */
ulint* buf_size)/*!< in/out: buffer size */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/**********************************************************************//**
-Builds a typed data tuple out of a physical record.
+/** Convert a physical record into a search tuple.
+@param[in] rec index record (not necessarily in an index page)
+@param[in] index index
+@param[in] leaf whether rec is in a leaf page
+@param[in] n_fields number of data fields
+@param[in,out] heap memory heap for allocation
@return own: data tuple */
dtuple_t*
-dict_index_build_data_tuple(
-/*========================*/
- dict_index_t* index, /*!< in: index */
- rec_t* rec, /*!< in: record for which to build data tuple */
- ulint n_fields,/*!< in: number of data fields */
- mem_heap_t* heap) /*!< in: memory heap where tuple created */
+dict_index_build_data_tuple_func(
+ const rec_t* rec,
+ const dict_index_t* index,
+#ifdef UNIV_DEBUG
+ bool leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
+ mem_heap_t* heap)
MY_ATTRIBUTE((nonnull, warn_unused_result));
+#ifdef UNIV_DEBUG
+# define dict_index_build_data_tuple(rec, index, leaf, n_fields, heap) \
+ dict_index_build_data_tuple_func(rec, index, leaf, n_fields, heap)
+#else /* UNIV_DEBUG */
+# define dict_index_build_data_tuple(rec, index, leaf, n_fields, heap) \
+ dict_index_build_data_tuple_func(rec, index, n_fields, heap)
+#endif /* UNIV_DEBUG */
+
/*********************************************************************//**
Gets the space id of the root of the index tree.
@return space id */
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 9220e46e759..62561c8af4f 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -327,20 +327,6 @@ dict_index_is_unique(
}
/********************************************************************//**
-Check whether the index is an universal index tree.
-@return nonzero for universal tree, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_univ(
-/*===============*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
-
- return(index->type & DICT_UNIVERSAL);
-}
-
-/********************************************************************//**
Check whether the index is a Spatial Index.
@return nonzero for Spatial Index, zero for other indexes */
UNIV_INLINE
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 0155d1d5d20..034729e1595 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -66,8 +66,6 @@ combination of types */
auto-generated clustered indexes,
also DICT_UNIQUE will be set */
#define DICT_UNIQUE 2 /*!< unique index */
-#define DICT_UNIVERSAL 4 /*!< index which can contain records from any
- other index */
#define DICT_IBUF 8 /*!< insert buffer tree */
#define DICT_CORRUPT 16 /*!< bit to store the corrupted flag
in SYS_INDEXES.TYPE */
@@ -864,6 +862,8 @@ struct dict_index_t{
data dictionary yet */
#ifdef UNIV_DEBUG
+ /** whether this is a dummy index object */
+ bool is_dummy;
uint32_t magic_n;/*!< magic number */
/** Value of dict_index_t::magic_n */
# define DICT_INDEX_MAGIC_N 76789786
diff --git a/storage/innobase/include/gis0rtree.ic b/storage/innobase/include/gis0rtree.ic
index 7f64a9b13a1..e852ebd8028 100644
--- a/storage/innobase/include/gis0rtree.ic
+++ b/storage/innobase/include/gis0rtree.ic
@@ -57,7 +57,8 @@ rtr_page_cal_mbr(
page = buf_block_get_frame(block);
rec = page_rec_get_next(page_get_infimum_rec(page));
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
do {
/* The mbr address is in the first field. */
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 7b3726b1fef..459304fc712 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -43,7 +43,6 @@ class THD;
#undef MYSQL_PFS
#undef MYSQL_RENAME_INDEX
#undef MYSQL_REPLACE_TRX_IN_THD
-#undef MYSQL_SPATIAL_INDEX
#undef MYSQL_STORE_FTS_DOC_ID
/*******************************************************************//**
diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic
index 5eb1bc0cbc5..3e6d40cba4a 100644
--- a/storage/innobase/include/page0cur.ic
+++ b/storage/innobase/include/page0cur.ic
@@ -280,7 +280,9 @@ page_cur_tuple_insert(
rec = rec_convert_dtuple_to_rec((byte*) mem_heap_alloc(*heap, size),
index, tuple, n_ext);
- *offsets = rec_get_offsets(rec, index, *offsets, ULINT_UNDEFINED, heap);
+ *offsets = rec_get_offsets(rec, index, *offsets,
+ page_is_leaf(cursor->block->frame),
+ ULINT_UNDEFINED, heap);
if (buf_block_get_page_zip(cursor->block)) {
rec = page_cur_insert_rec_zip(
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index 9243bcaa717..3b6a0215249 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -167,25 +167,196 @@ directory. */
#define PAGE_DIR_SLOT_MIN_N_OWNED 4
extern my_bool srv_immediate_scrub_data_uncompressed;
+#endif /* UNIV_INNOCHECKSUM */
-/************************************************************//**
-Gets the start of a page.
-@return start of the page */
-UNIV_INLINE
+/** Get the start of a page frame.
+@param[in] ptr pointer within a page frame
+@return start of the page frame */
+MY_ATTRIBUTE((const))
+inline
page_t*
-page_align(
-/*=======*/
- const void* ptr) /*!< in: pointer to page frame */
- MY_ATTRIBUTE((const));
-/************************************************************//**
-Gets the offset within a page.
+page_align(const void* ptr)
+{
+ return(static_cast<page_t*>(ut_align_down(ptr, UNIV_PAGE_SIZE)));
+}
+
+/** Gets the byte offset within a page frame.
+@param[in] ptr pointer within a page frame
@return offset from the start of the page */
-UNIV_INLINE
+MY_ATTRIBUTE((const))
+inline
ulint
-page_offset(
-/*========*/
- const void* ptr) /*!< in: pointer to page frame */
- MY_ATTRIBUTE((const));
+page_offset(const void* ptr)
+{
+ return(ut_align_offset(ptr, UNIV_PAGE_SIZE));
+}
+
+/** Determine whether an index page is not in ROW_FORMAT=REDUNDANT.
+@param[in] page index page
+@return nonzero if ROW_FORMAT is one of COMPACT,DYNAMIC,COMPRESSED
+@retval 0 if ROW_FORMAT=REDUNDANT */
+inline
+byte
+page_is_comp(const page_t* page)
+{
+ ut_ad(!ut_align_offset(page, UNIV_ZIP_SIZE_MIN));
+ return(page[PAGE_HEADER + PAGE_N_HEAP] & 0x80);
+}
+
+/** Determine whether an index page is empty.
+@param[in] page index page
+@return whether the page is empty (PAGE_N_RECS = 0) */
+inline
+bool
+page_is_empty(const page_t* page)
+{
+ ut_ad(!ut_align_offset(page, UNIV_ZIP_SIZE_MIN));
+ return !*reinterpret_cast<const uint16_t*>(PAGE_HEADER + PAGE_N_RECS
+ + page);
+}
+
+/** Determine whether an index page contains garbage.
+@param[in] page index page
+@return whether the page contains garbage (PAGE_GARBAGE is not 0) */
+inline
+bool
+page_has_garbage(const page_t* page)
+{
+ ut_ad(!ut_align_offset(page, UNIV_ZIP_SIZE_MIN));
+ return *reinterpret_cast<const uint16_t*>(PAGE_HEADER + PAGE_GARBAGE
+ + page);
+}
+
+/** Determine whether an B-tree or R-tree index page is a leaf page.
+@param[in] page index page
+@return true if the page is a leaf (PAGE_LEVEL = 0) */
+inline
+bool
+page_is_leaf(const page_t* page)
+{
+ ut_ad(!ut_align_offset(page, UNIV_ZIP_SIZE_MIN));
+ return !*reinterpret_cast<const uint16_t*>(PAGE_HEADER + PAGE_LEVEL
+ + page);
+}
+
+#ifndef UNIV_INNOCHECKSUM
+/** Determine whether an index page record is not in ROW_FORMAT=REDUNDANT.
+@param[in] rec record in an index page frame (not a copy)
+@return nonzero if ROW_FORMAT is one of COMPACT,DYNAMIC,COMPRESSED
+@retval 0 if ROW_FORMAT=REDUNDANT */
+inline
+byte
+page_rec_is_comp(const byte* rec)
+{
+ return(page_is_comp(page_align(rec)));
+}
+
+/** Determine the offset of the infimum record on the page.
+@param[in] page index page
+@return offset of the infimum record in record list, relative from page */
+inline
+unsigned
+page_get_infimum_offset(const page_t* page)
+{
+ ut_ad(!page_offset(page));
+ return page_is_comp(page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM;
+}
+
+/** Determine the offset of the supremum record on the page.
+@param[in] page index page
+@return offset of the supremum record in record list, relative from page */
+inline
+unsigned
+page_get_supremum_offset(const page_t* page)
+{
+ ut_ad(!page_offset(page));
+ return page_is_comp(page) ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM;
+}
+
+/** Determine whether an index page record is a user record.
+@param[in] offset record offset in the page
+@retval true if a user record
+@retval false if the infimum or supremum pseudo-record */
+inline
+bool
+page_rec_is_user_rec_low(ulint offset)
+{
+ compile_time_assert(PAGE_OLD_INFIMUM >= PAGE_NEW_INFIMUM);
+ compile_time_assert(PAGE_OLD_SUPREMUM >= PAGE_NEW_SUPREMUM);
+ compile_time_assert(PAGE_NEW_INFIMUM < PAGE_OLD_SUPREMUM);
+ compile_time_assert(PAGE_OLD_INFIMUM < PAGE_NEW_SUPREMUM);
+ compile_time_assert(PAGE_NEW_SUPREMUM < PAGE_OLD_SUPREMUM_END);
+ compile_time_assert(PAGE_OLD_SUPREMUM < PAGE_NEW_SUPREMUM_END);
+ ut_ad(offset >= PAGE_NEW_INFIMUM);
+ ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
+
+ return(offset != PAGE_NEW_SUPREMUM
+ && offset != PAGE_NEW_INFIMUM
+ && offset != PAGE_OLD_INFIMUM
+ && offset != PAGE_OLD_SUPREMUM);
+}
+
+/** Determine if a record is the supremum record on an index page.
+@param[in] offset record offset in an index page
+@return true if the supremum record */
+inline
+bool
+page_rec_is_supremum_low(ulint offset)
+{
+ ut_ad(offset >= PAGE_NEW_INFIMUM);
+ ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
+ return(offset == PAGE_NEW_SUPREMUM || offset == PAGE_OLD_SUPREMUM);
+}
+
+/** Determine if a record is the infimum record on an index page.
+@param[in] offset record offset in an index page
+@return true if the infimum record */
+inline
+bool
+page_rec_is_infimum_low(ulint offset)
+{
+ ut_ad(offset >= PAGE_NEW_INFIMUM);
+ ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
+ return(offset == PAGE_NEW_INFIMUM || offset == PAGE_OLD_INFIMUM);
+}
+
+/** Determine whether an B-tree or R-tree index record is in a leaf page.
+@param[in] rec index record in an index page
+@return true if the record is in a leaf page */
+inline
+bool
+page_rec_is_leaf(const page_t* rec)
+{
+ const page_t* page = page_align(rec);
+ ut_ad(rec - page >= page_get_infimum_offset(page));
+ bool leaf = page_is_leaf(page);
+ ut_ad(!page_rec_is_comp(rec)
+ || !page_rec_is_user_rec_low(rec - page)
+ || leaf == !rec_get_node_ptr_flag(rec));
+ return leaf;
+}
+
+/** Determine whether an index page record is a user record.
+@param[in] rec record in an index page
+@return true if a user record */
+inline
+bool
+page_rec_is_user_rec(const rec_t* rec);
+
+/** Determine whether an index page record is the supremum record.
+@param[in] rec record in an index page
+@return true if the supremum record */
+inline
+bool
+page_rec_is_supremum(const rec_t* rec);
+
+/** Determine whether an index page record is the infimum record.
+@param[in] rec record in an index page
+@return true if the infimum record */
+inline
+bool
+page_rec_is_infimum(const rec_t* rec);
+
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
@@ -321,22 +492,6 @@ page_header_reset_last_insert(
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be updated, or NULL */
mtr_t* mtr); /*!< in: mtr */
-/************************************************************//**
-Gets the offset of the first record on the page.
-@return offset of the first record in record list, relative from page */
-UNIV_INLINE
-ulint
-page_get_infimum_offset(
-/*====================*/
- const page_t* page); /*!< in: page which must have record(s) */
-/************************************************************//**
-Gets the offset of the last record on the page.
-@return offset of the last record in record list, relative from page */
-UNIV_INLINE
-ulint
-page_get_supremum_offset(
-/*=====================*/
- const page_t* page); /*!< in: page which must have record(s) */
#define page_get_infimum_rec(page) ((page) + page_get_infimum_offset(page))
#define page_get_supremum_rec(page) ((page) + page_get_supremum_offset(page))
@@ -522,23 +677,7 @@ ulint
page_dir_find_owner_slot(
/*=====================*/
const rec_t* rec); /*!< in: the physical record */
-/************************************************************//**
-Determine whether the page is in new-style compact format.
-@return nonzero if the page is in compact format, zero if it is in
-old-style format */
-UNIV_INLINE
-ulint
-page_is_comp(
-/*=========*/
- const page_t* page); /*!< in: index page */
-/************************************************************//**
-TRUE if the record is on a page in compact format.
-@return nonzero if in compact format */
-UNIV_INLINE
-ulint
-page_rec_is_comp(
-/*=============*/
- const rec_t* rec); /*!< in: record */
+
/***************************************************************//**
Returns the heap number of a record.
@return heap number */
@@ -547,24 +686,6 @@ ulint
page_rec_get_heap_no(
/*=================*/
const rec_t* rec); /*!< in: the physical record */
-/************************************************************//**
-Determine whether the page is a B-tree leaf.
-@return true if the page is a B-tree leaf (PAGE_LEVEL = 0) */
-UNIV_INLINE
-bool
-page_is_leaf(
-/*=========*/
- const page_t* page) /*!< in: page */
- MY_ATTRIBUTE((warn_unused_result));
-/************************************************************//**
-Determine whether the page is empty.
-@return true if the page is empty (PAGE_N_RECS = 0) */
-UNIV_INLINE
-bool
-page_is_empty(
-/*==========*/
- const page_t* page) /*!< in: page */
- MY_ATTRIBUTE((warn_unused_result));
/** Determine whether a page is an index root page.
@param[in] page page frame
@return true if the page is a root page of an index */
@@ -574,15 +695,6 @@ page_is_root(
const page_t* page)
MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
-Determine whether the page contains garbage.
-@return true if the page contains garbage (PAGE_GARBAGE is not 0) */
-UNIV_INLINE
-bool
-page_has_garbage(
-/*=============*/
- const page_t* page) /*!< in: page */
- MY_ATTRIBUTE((warn_unused_result));
-/************************************************************//**
Gets the pointer to the next record on the page.
@return pointer to next record */
UNIV_INLINE
@@ -645,62 +757,6 @@ page_rec_get_prev(
/*==============*/
rec_t* rec); /*!< in: pointer to record,
must not be page infimum */
-/************************************************************//**
-TRUE if the record is a user record on the page.
-@return TRUE if a user record */
-UNIV_INLINE
-ibool
-page_rec_is_user_rec_low(
-/*=====================*/
- ulint offset) /*!< in: record offset on page */
- MY_ATTRIBUTE((const));
-/************************************************************//**
-TRUE if the record is the supremum record on a page.
-@return TRUE if the supremum record */
-UNIV_INLINE
-ibool
-page_rec_is_supremum_low(
-/*=====================*/
- ulint offset) /*!< in: record offset on page */
- MY_ATTRIBUTE((const));
-/************************************************************//**
-TRUE if the record is the infimum record on a page.
-@return TRUE if the infimum record */
-UNIV_INLINE
-ibool
-page_rec_is_infimum_low(
-/*====================*/
- ulint offset) /*!< in: record offset on page */
- MY_ATTRIBUTE((const));
-
-/************************************************************//**
-TRUE if the record is a user record on the page.
-@return TRUE if a user record */
-UNIV_INLINE
-ibool
-page_rec_is_user_rec(
-/*=================*/
- const rec_t* rec) /*!< in: record */
- MY_ATTRIBUTE((warn_unused_result));
-/************************************************************//**
-TRUE if the record is the supremum record on a page.
-@return TRUE if the supremum record */
-UNIV_INLINE
-ibool
-page_rec_is_supremum(
-/*=================*/
- const rec_t* rec) /*!< in: record */
- MY_ATTRIBUTE((warn_unused_result));
-
-/************************************************************//**
-TRUE if the record is the infimum record on a page.
-@return TRUE if the infimum record */
-UNIV_INLINE
-ibool
-page_rec_is_infimum(
-/*================*/
- const rec_t* rec) /*!< in: record */
- MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
true if the record is the first user record on a page.
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index db98f2e6558..0062db56bfa 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -40,32 +40,7 @@ Created 2/2/1994 Heikki Tuuri
#undef UNIV_INLINE
#define UNIV_INLINE
#endif
-#endif /* !UNIV_INNOCHECKSUM */
-/************************************************************//**
-Gets the start of a page.
-@return start of the page */
-UNIV_INLINE
-page_t*
-page_align(
-/*=======*/
- const void* ptr) /*!< in: pointer to page frame */
-{
- return((page_t*) ut_align_down(ptr, UNIV_PAGE_SIZE));
-}
-
-#ifndef UNIV_INNOCHECKSUM
-/************************************************************//**
-Gets the offset within a page.
-@return offset from the start of the page */
-UNIV_INLINE
-ulint
-page_offset(
-/*========*/
- const void* ptr) /*!< in: pointer to page frame */
-{
- return(ut_align_offset(ptr, UNIV_PAGE_SIZE));
-}
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
@@ -286,34 +261,6 @@ page_header_reset_last_insert(
}
}
-#endif /* !UNIV_INNOCHECKSUM */
-
-/************************************************************//**
-Determine whether the page is in new-style compact format.
-@return nonzero if the page is in compact format, zero if it is in
-old-style format */
-UNIV_INLINE
-ulint
-page_is_comp(
-/*=========*/
- const page_t* page) /*!< in: index page */
-{
- return(page[PAGE_HEADER + PAGE_N_HEAP] & 0x80);
-}
-
-#ifndef UNIV_INNOCHECKSUM
-/************************************************************//**
-TRUE if the record is on a page in compact format.
-@return nonzero if in compact format */
-UNIV_INLINE
-ulint
-page_rec_is_comp(
-/*=============*/
- const rec_t* rec) /*!< in: record */
-{
- return(page_is_comp(page_align(rec)));
-}
-
/***************************************************************//**
Returns the heap number of a record.
@return heap number */
@@ -330,33 +277,6 @@ page_rec_get_heap_no(
}
}
-#endif /* !UNIV_INNOCHECKSUM */
-
-/************************************************************//**
-Determine whether the page is a B-tree leaf.
-@return true if the page is a B-tree leaf (PAGE_LEVEL = 0) */
-UNIV_INLINE
-bool
-page_is_leaf(
-/*=========*/
- const page_t* page) /*!< in: page */
-{
- return(!*(const uint16*) (page + (PAGE_HEADER + PAGE_LEVEL)));
-}
-
-#ifndef UNIV_INNOCHECKSUM
-/************************************************************//**
-Determine whether the page is empty.
-@return true if the page is empty (PAGE_N_RECS = 0) */
-UNIV_INLINE
-bool
-page_is_empty(
-/*==========*/
- const page_t* page) /*!< in: page */
-{
- return(!*(const uint16*) (page + (PAGE_HEADER + PAGE_N_RECS)));
-}
-
/** Determine whether a page is an index root page.
@param[in] page page frame
@return true if the page is a root page of an index */
@@ -382,162 +302,36 @@ page_is_root(
== IB_UINT64_MAX);
}
-/************************************************************//**
-Determine whether the page contains garbage.
-@return true if the page contains garbage (PAGE_GARBAGE is not 0) */
-UNIV_INLINE
+/** Determine whether an index page record is a user record.
+@param[in] rec record in an index page
+@return true if a user record */
+inline
bool
-page_has_garbage(
-/*=============*/
- const page_t* page) /*!< in: page */
-{
- return(!!*(const uint16*) (page + (PAGE_HEADER + PAGE_GARBAGE)));
-}
-
-/************************************************************//**
-Gets the offset of the first record on the page.
-@return offset of the first record in record list, relative from page */
-UNIV_INLINE
-ulint
-page_get_infimum_offset(
-/*====================*/
- const page_t* page) /*!< in: page which must have record(s) */
-{
- ut_ad(page);
- ut_ad(!page_offset(page));
-
- if (page_is_comp(page)) {
- return(PAGE_NEW_INFIMUM);
- } else {
- return(PAGE_OLD_INFIMUM);
- }
-}
-
-/************************************************************//**
-Gets the offset of the last record on the page.
-@return offset of the last record in record list, relative from page */
-UNIV_INLINE
-ulint
-page_get_supremum_offset(
-/*=====================*/
- const page_t* page) /*!< in: page which must have record(s) */
-{
- ut_ad(page);
- ut_ad(!page_offset(page));
-
- if (page_is_comp(page)) {
- return(PAGE_NEW_SUPREMUM);
- } else {
- return(PAGE_OLD_SUPREMUM);
- }
-}
-
-/************************************************************//**
-TRUE if the record is a user record on the page.
-@return TRUE if a user record */
-UNIV_INLINE
-ibool
-page_rec_is_user_rec_low(
-/*=====================*/
- ulint offset) /*!< in: record offset on page */
-{
- ut_ad(offset >= PAGE_NEW_INFIMUM);
-#if PAGE_OLD_INFIMUM < PAGE_NEW_INFIMUM
-# error "PAGE_OLD_INFIMUM < PAGE_NEW_INFIMUM"
-#endif
-#if PAGE_OLD_SUPREMUM < PAGE_NEW_SUPREMUM
-# error "PAGE_OLD_SUPREMUM < PAGE_NEW_SUPREMUM"
-#endif
-#if PAGE_NEW_INFIMUM > PAGE_OLD_SUPREMUM
-# error "PAGE_NEW_INFIMUM > PAGE_OLD_SUPREMUM"
-#endif
-#if PAGE_OLD_INFIMUM > PAGE_NEW_SUPREMUM
-# error "PAGE_OLD_INFIMUM > PAGE_NEW_SUPREMUM"
-#endif
-#if PAGE_NEW_SUPREMUM > PAGE_OLD_SUPREMUM_END
-# error "PAGE_NEW_SUPREMUM > PAGE_OLD_SUPREMUM_END"
-#endif
-#if PAGE_OLD_SUPREMUM > PAGE_NEW_SUPREMUM_END
-# error "PAGE_OLD_SUPREMUM > PAGE_NEW_SUPREMUM_END"
-#endif
- ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
-
- return(offset != PAGE_NEW_SUPREMUM
- && offset != PAGE_NEW_INFIMUM
- && offset != PAGE_OLD_INFIMUM
- && offset != PAGE_OLD_SUPREMUM);
-}
-
-/************************************************************//**
-TRUE if the record is the supremum record on a page.
-@return TRUE if the supremum record */
-UNIV_INLINE
-ibool
-page_rec_is_supremum_low(
-/*=====================*/
- ulint offset) /*!< in: record offset on page */
-{
- ut_ad(offset >= PAGE_NEW_INFIMUM);
- ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
-
- return(offset == PAGE_NEW_SUPREMUM
- || offset == PAGE_OLD_SUPREMUM);
-}
-
-/************************************************************//**
-TRUE if the record is the infimum record on a page.
-@return TRUE if the infimum record */
-UNIV_INLINE
-ibool
-page_rec_is_infimum_low(
-/*====================*/
- ulint offset) /*!< in: record offset on page */
-{
- ut_ad(offset >= PAGE_NEW_INFIMUM);
- ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START);
-
- return(offset == PAGE_NEW_INFIMUM || offset == PAGE_OLD_INFIMUM);
-}
-
-/************************************************************//**
-TRUE if the record is a user record on the page.
-@return TRUE if a user record */
-UNIV_INLINE
-ibool
-page_rec_is_user_rec(
-/*=================*/
- const rec_t* rec) /*!< in: record */
+page_rec_is_user_rec(const rec_t* rec)
{
ut_ad(page_rec_check(rec));
-
return(page_rec_is_user_rec_low(page_offset(rec)));
}
-/************************************************************//**
-TRUE if the record is the supremum record on a page.
-@return TRUE if the supremum record */
-UNIV_INLINE
-ibool
-page_rec_is_supremum(
-/*=================*/
- const rec_t* rec) /*!< in: record */
+/** Determine whether an index page record is the supremum record.
+@param[in] rec record in an index page
+@return true if the supremum record */
+inline
+bool
+page_rec_is_supremum(const rec_t* rec)
{
ut_ad(page_rec_check(rec));
-
return(page_rec_is_supremum_low(page_offset(rec)));
}
-/************************************************************//**
-TRUE if the record is the infimum record on a page.
-@return TRUE if the infimum record */
-UNIV_INLINE
-ibool
-page_rec_is_infimum(
-/*================*/
- const rec_t* rec) /*!< in: record */
+/** Determine whether an index page record is the infimum record.
+@param[in] rec record in an index page
+@return true if the infimum record */
+inline
+bool
+page_rec_is_infimum(const rec_t* rec)
{
ut_ad(page_rec_check(rec));
-
return(page_rec_is_infimum_low(page_offset(rec)));
}
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 61220d4f533..6e927da9bd9 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -444,38 +444,41 @@ rec_get_n_extern_new(
ulint n) /*!< in: number of columns to scan */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/******************************************************//**
-The following function determines the offsets to each field
-in the record. It can reuse a previously allocated array.
+/** Determine the offsets to each field in an index record.
+@param[in] rec physical record
+@param[in] index the index that the record belongs to
+@param[in,out] offsets array comprising offsets[0] allocated elements,
+ or an array from rec_get_offsets(), or NULL
+@param[in] leaf whether this is a leaf-page record
+@param[in] n_fields maximum number of offsets to compute
+ (ULINT_UNDEFINED to compute all offsets)
+@param[in,out] heap memory heap
@return the new offsets */
ulint*
rec_get_offsets_func(
-/*=================*/
- const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets,/*!< in/out: array consisting of
- offsets[0] allocated elements,
- or an array from rec_get_offsets(),
- or NULL */
- ulint n_fields,/*!< in: maximum number of
- initialized fields
- (ULINT_UNDEFINED if all fields) */
+ const rec_t* rec,
+ const dict_index_t* index,
+ ulint* offsets,
+#ifdef UNIV_DEBUG
+ bool leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
unsigned line, /*!< in: line number where called */
#endif /* UNIV_DEBUG */
mem_heap_t** heap) /*!< in/out: memory heap */
#ifdef UNIV_DEBUG
- MY_ATTRIBUTE((nonnull(1,2,5,7),warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,2,6,8),warn_unused_result));
#else /* UNIV_DEBUG */
MY_ATTRIBUTE((nonnull(1,2,5),warn_unused_result));
#endif /* UNIV_DEBUG */
#ifdef UNIV_DEBUG
-# define rec_get_offsets(rec,index,offsets,n,heap) \
- rec_get_offsets_func(rec,index,offsets,n,__FILE__,__LINE__,heap)
+# define rec_get_offsets(rec, index, offsets, leaf, n, heap) \
+ rec_get_offsets_func(rec,index,offsets,leaf,n,__FILE__,__LINE__,heap)
#else /* UNIV_DEBUG */
-# define rec_get_offsets(rec, index, offsets, n, heap) \
+# define rec_get_offsets(rec, index, offsets, leaf, n, heap) \
rec_get_offsets_func(rec, index, offsets, n, heap)
#endif /* UNIV_DEBUG */
@@ -933,19 +936,31 @@ rec_get_converted_size(
const dtuple_t* dtuple, /*!< in: data tuple */
ulint n_ext) /*!< in: number of externally stored columns */
MY_ATTRIBUTE((warn_unused_result, nonnull));
-/**************************************************************//**
-Copies the first n fields of a physical record to a data tuple.
-The fields are copied to the memory heap. */
+/** Copy the first n fields of a (copy of a) physical record to a data tuple.
+The fields are copied into the memory heap.
+@param[out] tuple data tuple
+@param[in] rec index record, or a copy thereof
+@param[in] is_leaf whether rec is a leaf page record
+@param[in] n_fields number of fields to copy
+@param[in,out] heap memory heap */
void
-rec_copy_prefix_to_dtuple(
-/*======================*/
- dtuple_t* tuple, /*!< out: data tuple */
- const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint n_fields, /*!< in: number of fields
- to copy */
- mem_heap_t* heap) /*!< in: memory heap */
+rec_copy_prefix_to_dtuple_func(
+ dtuple_t* tuple,
+ const rec_t* rec,
+ const dict_index_t* index,
+#ifdef UNIV_DEBUG
+ bool is_leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
+ mem_heap_t* heap)
MY_ATTRIBUTE((nonnull));
+#ifdef UNIV_DEBUG
+# define rec_copy_prefix_to_dtuple(tuple,rec,index,leaf,n_fields,heap) \
+ rec_copy_prefix_to_dtuple_func(tuple,rec,index,leaf,n_fields,heap)
+#else /* UNIV_DEBUG */
+# define rec_copy_prefix_to_dtuple(tuple,rec,index,leaf,n_fields,heap) \
+ rec_copy_prefix_to_dtuple_func(tuple,rec,index,n_fields,heap)
+#endif /* UNIV_DEBUG */
/***************************************************************//**
Validates the consistency of a physical record.
@return TRUE if ok */
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 3166bb810a1..90755445d95 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -5595,13 +5595,14 @@ lock_rec_print(FILE* file, const lock_t* lock)
fprintf(file, "Record lock, heap no %lu", (ulong) i);
if (block) {
+ ut_ad(page_is_leaf(block->frame));
const rec_t* rec;
rec = page_find_rec_with_heap_no(
buf_block_get_frame(block), i);
offsets = rec_get_offsets(
- rec, lock->index, offsets,
+ rec, lock->index, offsets, true,
ULINT_UNDEFINED, &heap);
putc(' ', file);
@@ -6433,8 +6434,10 @@ loop:
rec = page_find_rec_with_heap_no(block->frame, i);
ut_a(rec);
+ ut_ad(page_rec_is_leaf(rec));
offsets = rec_get_offsets(rec, lock->index, offsets,
- ULINT_UNDEFINED, &heap);
+ true, ULINT_UNDEFINED,
+ &heap);
/* If this thread is holding the file space
latch (fil_space_t::latch), the following
@@ -6777,7 +6780,7 @@ lock_rec_insert_check_and_lock(
const ulint* offsets;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(next_rec, index, offsets_,
+ offsets = rec_get_offsets(next_rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
@@ -7004,7 +7007,7 @@ lock_sec_rec_modify_check_and_lock(
const ulint* offsets;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
@@ -7214,7 +7217,8 @@ lock_clust_rec_read_check_and_lock_alt(
dberr_t err;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets,
+ ut_ad(page_rec_is_leaf(rec));
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &tmp_heap);
err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
offsets, mode, gap_mode, thr);
diff --git a/storage/innobase/mtr/mtr0log.cc b/storage/innobase/mtr/mtr0log.cc
index 7d97aaa3f42..8cfde15a3ba 100644
--- a/storage/innobase/mtr/mtr0log.cc
+++ b/storage/innobase/mtr/mtr0log.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -443,10 +444,11 @@ mlog_open_and_write_index(
alloc = mtr_buf_t::MAX_DATA_SIZE;
}
+ const bool is_leaf = page_is_leaf(page_align(rec));
+
/* For spatial index, on non-leaf page, we just keep
2 fields, MBR and page no. */
- if (dict_index_is_spatial(index)
- && !page_is_leaf(page_align(rec))) {
+ if (!is_leaf && dict_index_is_spatial(index)) {
n = DICT_INDEX_SPATIAL_NODEPTR_SIZE;
}
@@ -464,7 +466,7 @@ mlog_open_and_write_index(
mach_write_to_2(log_ptr, n);
log_ptr += 2;
- if (page_is_leaf(page_align(rec))) {
+ if (is_leaf) {
mach_write_to_2(
log_ptr, dict_index_get_n_unique_in_tree(index));
} else {
@@ -601,6 +603,7 @@ mlog_parse_index(
}
/* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
ind->cached = TRUE;
+ ut_d(ind->is_dummy = true);
*index = ind;
return(ptr);
}
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index e5c903935f1..23a11fcfaa3 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -107,9 +107,10 @@ page_cur_try_search_shortcut(
rec_offs_init(offsets_);
ut_ad(dtuple_check_typed(tuple));
+ ut_ad(page_is_leaf(page));
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
dtuple_get_n_fields(tuple), &heap);
ut_ad(rec);
@@ -124,7 +125,7 @@ page_cur_try_search_shortcut(
next_rec = page_rec_get_next_const(rec);
if (!page_rec_is_supremum(next_rec)) {
- offsets = rec_get_offsets(next_rec, index, offsets,
+ offsets = rec_get_offsets(next_rec, index, offsets, true,
dtuple_get_n_fields(tuple), &heap);
if (cmp_dtuple_rec_with_match(tuple, next_rec, offsets,
@@ -190,9 +191,10 @@ page_cur_try_search_shortcut_bytes(
rec_offs_init(offsets_);
ut_ad(dtuple_check_typed(tuple));
+ ut_ad(page_is_leaf(page));
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
dtuple_get_n_fields(tuple), &heap);
ut_ad(rec);
@@ -213,7 +215,7 @@ page_cur_try_search_shortcut_bytes(
next_rec = page_rec_get_next_const(rec);
if (!page_rec_is_supremum(next_rec)) {
- offsets = rec_get_offsets(next_rec, index, offsets,
+ offsets = rec_get_offsets(next_rec, index, offsets, true,
dtuple_get_n_fields(tuple), &heap);
if (cmp_dtuple_rec_with_match_bytes(
@@ -354,9 +356,10 @@ page_cur_search_with_match(
#endif /* UNIV_ZIP_DEBUG */
ut_d(page_check_dir(page));
+ const bool is_leaf = page_is_leaf(page);
#ifdef BTR_CUR_HASH_ADAPT
- if (page_is_leaf(page)
+ if (is_leaf
&& (mode == PAGE_CUR_LE)
&& !dict_index_is_spatial(index)
&& (page_header_get_field(page, PAGE_N_DIRECTION) > 3)
@@ -383,7 +386,7 @@ page_cur_search_with_match(
if (dict_index_is_spatial(index) && mode > PAGE_CUR_LE) {
/* For leaf level insert, we still use the traditional
compare function for now */
- if (mode == PAGE_CUR_RTREE_INSERT && page_is_leaf(page)){
+ if (mode == PAGE_CUR_RTREE_INSERT && is_leaf) {
mode = PAGE_CUR_LE;
} else {
rtr_cur_search_with_match(
@@ -428,7 +431,7 @@ page_cur_search_with_match(
offsets = offsets_;
offsets = rec_get_offsets(
- mid_rec, index, offsets,
+ mid_rec, index, offsets, is_leaf,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
@@ -482,7 +485,7 @@ up_slot_match:
offsets = offsets_;
offsets = rec_get_offsets(
- mid_rec, index, offsets,
+ mid_rec, index, offsets, is_leaf,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
@@ -664,6 +667,7 @@ page_cur_search_with_match_bytes(
/* Perform binary search until the lower and upper limit directory
slots come to the distance 1 of each other */
+ ut_d(bool is_leaf = page_is_leaf(page));
while (up - low > 1) {
mid = (low + up) / 2;
@@ -675,7 +679,7 @@ page_cur_search_with_match_bytes(
up_matched_fields, up_matched_bytes);
offsets = rec_get_offsets(
- mid_rec, index, offsets_,
+ mid_rec, index, offsets_, is_leaf,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match_bytes(
@@ -732,7 +736,7 @@ up_slot_match:
up_matched_fields, up_matched_bytes);
offsets = rec_get_offsets(
- mid_rec, index, offsets_,
+ mid_rec, index, offsets_, is_leaf,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match_bytes(
@@ -850,15 +854,9 @@ page_cur_insert_rec_write_log(
const byte* log_end;
ulint i;
- /* Avoid REDO logging to save on costly IO because
- temporary tables are not recovered during crash recovery. */
if (dict_table_is_temporary(index->table)) {
- byte* log_ptr = mlog_open(mtr, 0);
- if (log_ptr == NULL) {
- return;
- }
- mlog_close(mtr, log_ptr);
- log_ptr = NULL;
+ ut_ad(!mlog_open(mtr, 0));
+ return;
}
ut_a(rec_size < UNIV_PAGE_SIZE);
@@ -867,6 +865,8 @@ page_cur_insert_rec_write_log(
ut_ad(!page_rec_is_comp(insert_rec)
== !dict_table_is_comp(index->table));
+ ut_d(const bool is_leaf = page_rec_is_leaf(cursor_rec));
+
{
mem_heap_t* heap = NULL;
ulint cur_offs_[REC_OFFS_NORMAL_SIZE];
@@ -879,9 +879,9 @@ page_cur_insert_rec_write_log(
rec_offs_init(ins_offs_);
cur_offs = rec_get_offsets(cursor_rec, index, cur_offs_,
- ULINT_UNDEFINED, &heap);
+ is_leaf, ULINT_UNDEFINED, &heap);
ins_offs = rec_get_offsets(insert_rec, index, ins_offs_,
- ULINT_UNDEFINED, &heap);
+ is_leaf, ULINT_UNDEFINED, &heap);
extra_size = rec_offs_extra_size(ins_offs);
cur_extra_size = rec_offs_extra_size(cur_offs);
@@ -1139,7 +1139,9 @@ page_cur_parse_insert_rec(
/* Read from the log the inserted index record end segment which
differs from the cursor record */
- offsets = rec_get_offsets(cursor_rec, index, offsets,
+ ut_d(bool is_leaf = page_is_leaf(page));
+
+ offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
if (!(end_seg_len & 0x1UL)) {
@@ -1184,7 +1186,7 @@ page_cur_parse_insert_rec(
page_cur_position(cursor_rec, block, &cursor);
offsets = rec_get_offsets(buf + origin_offset, index, offsets,
- ULINT_UNDEFINED, &heap);
+ is_leaf, ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(!page_cur_rec_insert(&cursor,
buf + origin_offset,
index, offsets, mtr))) {
@@ -1275,7 +1277,8 @@ page_cur_insert_rec_low(
rec_offs_init(foffsets_);
foffsets = rec_get_offsets(
- free_rec, index, foffsets, ULINT_UNDEFINED, &heap);
+ free_rec, index, foffsets, page_is_leaf(page),
+ ULINT_UNDEFINED, &heap);
if (rec_offs_size(foffsets) < rec_size) {
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
@@ -1705,6 +1708,7 @@ page_cur_insert_rec_zip(
rec_offs_init(foffsets_);
foffsets = rec_get_offsets(free_rec, index, foffsets,
+ page_rec_is_leaf(free_rec),
ULINT_UNDEFINED, &heap);
if (rec_offs_size(foffsets) < rec_size) {
too_small:
@@ -2052,7 +2056,6 @@ page_copy_rec_list_end_to_created_page(
page_header_set_ptr(new_page, NULL, PAGE_HEAP_TOP,
new_page + UNIV_PAGE_SIZE - 1);
#endif
-
log_ptr = page_copy_rec_list_to_created_page_write_log(new_page,
index, mtr);
@@ -2075,8 +2078,10 @@ page_copy_rec_list_end_to_created_page(
slot_index = 0;
n_recs = 0;
+ ut_d(const bool is_leaf = page_is_leaf(new_page));
+
do {
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
insert_rec = rec_copy(heap_top, rec, offsets);
@@ -2251,6 +2256,7 @@ page_cur_parse_delete_rec(
page_cur_delete_rec(&cursor, index,
rec_get_offsets(rec, index, offsets_,
+ page_rec_is_leaf(rec),
ULINT_UNDEFINED, &heap),
mtr);
if (UNIV_LIKELY_NULL(heap)) {
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index b2c27acfbaa..fcc77aeb591 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -598,6 +598,7 @@ page_copy_rec_list_end_no_locks(
ut_a(page_is_comp(new_page) == page_rec_is_comp(rec));
ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint)
(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
+ ut_d(const bool is_leaf = page_is_leaf(block->frame));
cur2 = page_get_infimum_rec(buf_block_get_frame(new_block));
@@ -606,7 +607,7 @@ page_copy_rec_list_end_no_locks(
while (!page_cur_is_after_last(&cur1)) {
rec_t* cur1_rec = page_cur_get_rec(&cur1);
rec_t* ins_rec;
- offsets = rec_get_offsets(cur1_rec, index, offsets,
+ offsets = rec_get_offsets(cur1_rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur2, index,
cur1_rec, offsets, mtr);
@@ -835,6 +836,8 @@ page_copy_rec_list_start(
cur2 = ret;
+ const bool is_leaf = page_rec_is_leaf(rec);
+
/* Copy records from the original page to the new page */
if (dict_index_is_spatial(index)) {
ulint max_to_move = page_get_n_recs(
@@ -856,6 +859,7 @@ page_copy_rec_list_start(
while (page_cur_get_rec(&cur1) != rec) {
rec_t* cur1_rec = page_cur_get_rec(&cur1);
offsets = rec_get_offsets(cur1_rec, index, offsets,
+ is_leaf,
ULINT_UNDEFINED, &heap);
cur2 = page_cur_insert_rec_low(cur2, index,
cur1_rec, offsets, mtr);
@@ -872,8 +876,7 @@ page_copy_rec_list_start(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (dict_index_is_sec_or_ibuf(index)
- && page_is_leaf(page_align(rec))
+ if (is_leaf && dict_index_is_sec_or_ibuf(index)
&& !dict_table_is_temporary(index->table)) {
page_update_max_trx_id(new_block, NULL,
page_get_max_trx_id(page_align(rec)),
@@ -1103,6 +1106,8 @@ delete_all:
? MLOG_COMP_LIST_END_DELETE
: MLOG_LIST_END_DELETE, mtr);
+ ut_d(const bool is_leaf = page_is_leaf(page));
+
if (page_zip) {
mtr_log_t log_mode;
@@ -1115,7 +1120,7 @@ delete_all:
page_cur_t cur;
page_cur_position(rec, block, &cur);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
@@ -1149,6 +1154,7 @@ delete_all:
do {
ulint s;
offsets = rec_get_offsets(rec2, index, offsets,
+ is_leaf,
ULINT_UNDEFINED, &heap);
s = rec_offs_size(offsets);
ut_ad(rec2 - page + s - rec_offs_extra_size(offsets)
@@ -1291,10 +1297,12 @@ page_delete_rec_list_start(
/* Individual deletes are not logged */
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
+ ut_d(const bool is_leaf = page_rec_is_leaf(rec));
while (page_cur_get_rec(&cur1) != rec) {
offsets = rec_get_offsets(page_cur_get_rec(&cur1), index,
- offsets, ULINT_UNDEFINED, &heap);
+ offsets, is_leaf,
+ ULINT_UNDEFINED, &heap);
page_cur_delete_rec(&cur1, index, offsets, mtr);
}
@@ -2466,6 +2474,7 @@ page_validate(
for (;;) {
offsets = rec_get_offsets(rec, index, offsets,
+ page_is_leaf(page),
ULINT_UNDEFINED, &heap);
if (page_is_comp(page) && page_rec_is_user_rec(rec)
@@ -2635,6 +2644,7 @@ n_owned_zero:
while (rec != NULL) {
offsets = rec_get_offsets(rec, index, offsets,
+ page_is_leaf(page),
ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(!page_rec_validate(rec, offsets))) {
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 277af52eaef..10d905e0c8b 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -885,7 +885,7 @@ page_zip_compress_node_ptrs(
do {
const rec_t* rec = *recs++;
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, false,
ULINT_UNDEFINED, &heap);
/* Only leaf nodes may contain externally stored columns. */
ut_ad(!rec_offs_any_extern(offsets));
@@ -1134,7 +1134,7 @@ page_zip_compress_clust(
do {
const rec_t* rec = *recs++;
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_n_fields(offsets)
== dict_index_get_n_fields(index));
@@ -2078,6 +2078,7 @@ page_zip_apply_log(
sorted by address (indexed by
heap_no - PAGE_HEAP_NO_USER_LOW) */
ulint n_dense,/*!< in: size of recs[] */
+ bool is_leaf,/*!< in: whether this is a leaf page */
ulint trx_id_col,/*!< in: column number of trx_id in the index,
or ULINT_UNDEFINED if none */
ulint heap_status,
@@ -2153,7 +2154,7 @@ page_zip_apply_log(
/* Clear the data bytes of the record. */
mem_heap_t* heap = NULL;
ulint* offs;
- offs = rec_get_offsets(rec, index, offsets,
+ offs = rec_get_offsets(rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
memset(rec, 0, rec_offs_data_size(offs));
@@ -2349,7 +2350,7 @@ page_zip_decompress_node_ptrs(
}
/* Read the offsets. The status bits are needed here. */
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, false,
ULINT_UNDEFINED, &heap);
/* Non-leaf nodes should not have any externally
@@ -2435,7 +2436,7 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense,
+ recs, n_dense, false,
ULINT_UNDEFINED, heap_status,
index, offsets);
@@ -2466,7 +2467,7 @@ zlib_done:
for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot];
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, false,
ULINT_UNDEFINED, &heap);
/* Non-leaf nodes should not have any externally
stored columns. */
@@ -2587,7 +2588,7 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense,
+ recs, n_dense, true,
ULINT_UNDEFINED, heap_status,
index, offsets);
@@ -2790,7 +2791,7 @@ page_zip_decompress_clust(
}
/* Read the offsets. The status bits are needed here. */
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
/* This is a leaf page in a clustered index. */
@@ -2916,7 +2917,7 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense,
+ recs, n_dense, true,
trx_id_col, heap_status,
index, offsets);
@@ -2952,7 +2953,7 @@ zlib_done:
rec_t* rec = recs[slot];
ibool exists = !page_zip_dir_find_free(
page_zip, page_offset(rec));
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
dst = rec_get_nth_field(rec, offsets,
@@ -3463,6 +3464,7 @@ page_zip_validate_low(
page + PAGE_NEW_INFIMUM, TRUE);
trec = page_rec_get_next_low(
temp_page + PAGE_NEW_INFIMUM, TRUE);
+ ut_d(const bool is_leaf = page_is_leaf(page));
do {
if (page_offset(rec) != page_offset(trec)) {
@@ -3477,7 +3479,7 @@ page_zip_validate_low(
if (index) {
/* Compare the data. */
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
if (memcmp(rec - rec_offs_extra_size(offsets),
diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc
index 7c965f791be..0e2bc9b30de 100644
--- a/storage/innobase/rem/rem0cmp.cc
+++ b/storage/innobase/rem/rem0cmp.cc
@@ -797,7 +797,6 @@ cmp_dtuple_rec_with_match_bytes(
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, index, offsets));
- //ut_ad(page_is_leaf(page_align(rec)));
ut_ad(!(REC_INFO_MIN_REC_FLAG
& dtuple_get_info_bits(dtuple)));
ut_ad(!(REC_INFO_MIN_REC_FLAG
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 66e8ccec178..d4d1a2bab14 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -520,22 +520,25 @@ resolved:
}
}
-/******************************************************//**
-The following function determines the offsets to each field
-in the record. It can reuse a previously returned array.
+/** Determine the offsets to each field in an index record.
+@param[in] rec physical record
+@param[in] index the index that the record belongs to
+@param[in,out] offsets array comprising offsets[0] allocated elements,
+ or an array from rec_get_offsets(), or NULL
+@param[in] leaf whether this is a leaf-page record
+@param[in] n_fields maximum number of offsets to compute
+ (ULINT_UNDEFINED to compute all offsets)
+@param[in,out] heap memory heap
@return the new offsets */
ulint*
rec_get_offsets_func(
-/*=================*/
- const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets,/*!< in/out: array consisting of
- offsets[0] allocated elements,
- or an array from rec_get_offsets(),
- or NULL */
- ulint n_fields,/*!< in: maximum number of
- initialized fields
- (ULINT_UNDEFINED if all fields) */
+ const rec_t* rec,
+ const dict_index_t* index,
+ ulint* offsets,
+#ifdef UNIV_DEBUG
+ bool leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
unsigned line, /*!< in: line number where called */
@@ -553,17 +556,23 @@ rec_get_offsets_func(
switch (UNIV_EXPECT(rec_get_status(rec),
REC_STATUS_ORDINARY)) {
case REC_STATUS_ORDINARY:
+ ut_ad(leaf);
n = dict_index_get_n_fields(index);
break;
case REC_STATUS_NODE_PTR:
/* Node pointer records consist of the
uniquely identifying fields of the record
followed by a child page number field. */
+ ut_ad(!leaf);
n = dict_index_get_n_unique_in_tree_nonleaf(index) + 1;
break;
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
/* infimum or supremum record */
+ ut_ad(rec_get_heap_no_new(rec)
+ == (rec_get_status(rec) == REC_STATUS_INFIMUM
+ ? PAGE_HEAP_NO_INFIMUM
+ : PAGE_HEAP_NO_SUPREMUM));
n = 1;
break;
default:
@@ -572,6 +581,28 @@ rec_get_offsets_func(
}
} else {
n = rec_get_n_fields_old(rec);
+ /* Here, rec can be allocated from the heap (copied
+ from an index page record), or it can be located in an
+ index page. If rec is not in an index page, then
+ page_rec_is_user_rec(rec) and similar predicates
+ cannot be evaluated. We can still distinguish the
+ infimum and supremum record based on the heap number. */
+ ut_d(const bool is_user_rec = rec_get_heap_no_old(rec)
+ >= PAGE_HEAP_NO_USER_LOW);
+ ut_ad(n <= ulint(index->n_fields + !leaf) || index->is_dummy
+ || dict_index_is_ibuf(index));
+ /* The infimum and supremum records carry 1 field. */
+ ut_ad(is_user_rec || n == 1);
+ ut_ad(!is_user_rec || leaf || index->is_dummy
+ || dict_index_is_ibuf(index)
+ || n
+ == dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
+ ut_ad(!is_user_rec || !leaf || index->is_dummy
+ || dict_index_is_ibuf(index)
+ || n == n_fields /* btr_pcur_restore_position() */
+ || n == index->n_fields
+ || (index->id == DICT_INDEXES_ID
+ && (n == DICT_NUM_FIELDS__SYS_INDEXES - 1)));
}
if (UNIV_UNLIKELY(n_fields < n)) {
@@ -1468,30 +1499,6 @@ rec_convert_dtuple_to_rec(
rec = rec_convert_dtuple_to_rec_old(buf, dtuple, n_ext);
}
-#ifdef UNIV_DEBUG
- {
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- const ulint* offsets;
- ulint i;
- rec_offs_init(offsets_);
-
- offsets = rec_get_offsets(rec, index,
- offsets_, ULINT_UNDEFINED, &heap);
- ut_ad(rec_validate(rec, offsets));
- ut_ad(dtuple_get_n_fields(dtuple)
- == rec_offs_n_fields(offsets));
-
- for (i = 0; i < rec_offs_n_fields(offsets); i++) {
- ut_ad(!dfield_is_ext(dtuple_get_nth_field(dtuple, i))
- == !rec_offs_nth_extern(offsets, i));
- }
-
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- }
-#endif /* UNIV_DEBUG */
return(rec);
}
@@ -1541,25 +1548,32 @@ rec_convert_dtuple_to_temp(
REC_STATUS_ORDINARY, true);
}
-/**************************************************************//**
-Copies the first n fields of a physical record to a data tuple. The fields
-are copied to the memory heap. */
+/** Copy the first n fields of a (copy of a) physical record to a data tuple.
+The fields are copied into the memory heap.
+@param[out] tuple data tuple
+@param[in] rec index record, or a copy thereof
+@param[in] is_leaf whether rec is a leaf page record
+@param[in] n_fields number of fields to copy
+@param[in,out] heap memory heap */
void
-rec_copy_prefix_to_dtuple(
-/*======================*/
- dtuple_t* tuple, /*!< out: data tuple */
- const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint n_fields, /*!< in: number of fields
- to copy */
- mem_heap_t* heap) /*!< in: memory heap */
+rec_copy_prefix_to_dtuple_func(
+ dtuple_t* tuple,
+ const rec_t* rec,
+ const dict_index_t* index,
+#ifdef UNIV_DEBUG
+ bool is_leaf,
+#endif /* UNIV_DEBUG */
+ ulint n_fields,
+ mem_heap_t* heap)
{
- ulint i;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets, n_fields, &heap);
+ ut_ad(is_leaf || n_fields <= index->n_uniq + 1);
+
+ offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ n_fields, &heap);
ut_ad(rec_validate(rec, offsets));
ut_ad(dtuple_check_typed(tuple));
@@ -1567,7 +1581,7 @@ rec_copy_prefix_to_dtuple(
dtuple_set_info_bits(tuple, rec_get_info_bits(
rec, dict_table_is_comp(index->table)));
- for (i = 0; i < n_fields; i++) {
+ for (ulint i = 0; i < n_fields; i++) {
dfield_t* field;
const byte* data;
ulint len;
@@ -2155,6 +2169,7 @@ rec_print(
rec_print_new(file, rec,
rec_get_offsets(rec, index, offsets_,
+ page_rec_is_leaf(rec),
ULINT_UNDEFINED, &heap));
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
@@ -2225,7 +2240,8 @@ operator<<(std::ostream& o, const rec_index_print& r)
{
mem_heap_t* heap = NULL;
ulint* offsets = rec_get_offsets(
- r.m_rec, r.m_index, NULL, ULINT_UNDEFINED, &heap);
+ r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec),
+ ULINT_UNDEFINED, &heap);
rec_print(o, r.m_rec,
rec_get_info_bits(r.m_rec, rec_offs_comp(offsets)),
offsets);
@@ -2272,10 +2288,12 @@ rec_get_trx_id(
ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
== index->id);
ut_ad(dict_index_is_clust(index));
+ ut_ad(page_rec_is_leaf(rec));
ut_ad(trx_id_col > 0);
ut_ad(trx_id_col != ULINT_UNDEFINED);
- offsets = rec_get_offsets(rec, index, offsets, trx_id_col + 1, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true,
+ trx_id_col + 1, &heap);
trx_id = rec_get_nth_field(rec, offsets, trx_id_col, &len);
@@ -2323,7 +2341,7 @@ wsrep_rec_get_foreign_key(
ut_ad(index_ref);
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index_for, offsets_,
+ offsets = rec_get_offsets(rec, index_for, offsets_, true,
ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_validate(rec, NULL, offsets));
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 0b538660c9b..c109afe2682 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -1735,7 +1735,7 @@ PageConverter::update_records(
if (deleted || clust_index) {
m_offsets = rec_get_offsets(
- rec, m_index->m_srv_index, m_offsets,
+ rec, m_index->m_srv_index, m_offsets, true,
ULINT_UNDEFINED, &m_heap);
}
@@ -2323,7 +2323,7 @@ row_import_set_sys_max_row_id(
rec_offs_init(offsets_);
offsets = rec_get_offsets(
- rec, index, offsets_, ULINT_UNDEFINED, &heap);
+ rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
field = rec_get_nth_field(
rec, offsets,
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index b7f28e633a3..a8258fa2c0f 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -952,7 +952,7 @@ row_ins_foreign_fill_virtual(
ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
const ulint* offsets =
- rec_get_offsets(rec, index, offsets_,
+ rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &cascade->heap);
mem_heap_t* v_heap = NULL;
upd_t* update = cascade->update;
@@ -1732,8 +1732,8 @@ row_ins_check_foreign_constraint(
continue;
}
- offsets = rec_get_offsets(rec, check_index,
- offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, check_index, offsets, true,
+ ULINT_UNDEFINED, &heap);
if (page_rec_is_supremum(rec)) {
@@ -2122,7 +2122,7 @@ row_ins_scan_sec_index_for_duplicate(
continue;
}
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &offsets_heap);
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -2251,7 +2251,7 @@ row_ins_duplicate_error_in_clust_online(
const rec_t* rec = btr_cur_get_rec(cursor);
if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
- *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
ULINT_UNDEFINED, heap);
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
if (err != DB_SUCCESS) {
@@ -2262,7 +2262,7 @@ row_ins_duplicate_error_in_clust_online(
rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
- *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
ULINT_UNDEFINED, heap);
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
}
@@ -2318,6 +2318,7 @@ row_ins_duplicate_error_in_clust(
if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets,
+ true,
ULINT_UNDEFINED, &heap);
ulint lock_type;
@@ -2377,6 +2378,7 @@ duplicate:
if (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets,
+ true,
ULINT_UNDEFINED, &heap);
if (trx->duplicates) {
@@ -2492,7 +2494,7 @@ row_ins_index_entry_big_rec(
btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE,
&pcur, &mtr);
rec = btr_pcur_get_rec(&pcur);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, heap);
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
@@ -3052,7 +3054,7 @@ row_ins_sec_index_entry_low(
ut_ad(!page_rec_is_infimum(rec));
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &offsets_heap);
err = row_ins_set_exclusive_rec_lock(
@@ -3083,7 +3085,7 @@ row_ins_sec_index_entry_low(
prefix, we must convert the insert into a modify of an
existing record */
offsets = rec_get_offsets(
- btr_cur_get_rec(&cursor), index, offsets,
+ btr_cur_get_rec(&cursor), index, offsets, true,
ULINT_UNDEFINED, &offsets_heap);
err = row_ins_sec_index_entry_by_modify(
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 93ddb8ee45b..58c316c6327 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -998,7 +998,7 @@ row_log_table_low(
&index->lock,
RW_LOCK_FLAG_S | RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
ut_ad(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
- ut_ad(page_is_leaf(page_align(rec)));
+ ut_ad(page_rec_is_leaf(rec));
ut_ad(!page_is_comp(page_align(rec)) == !rec_offs_comp(offsets));
/* old_pk=row_log_table_get_pk() [not needed in INSERT] is a prefix
of the clustered index record (PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR),
@@ -1263,8 +1263,8 @@ row_log_table_get_pk(
if (!offsets) {
offsets = rec_get_offsets(
- rec, index, NULL, pos + 1,
- heap);
+ rec, index, NULL, true,
+ pos + 1, heap);
}
trx_id_offs = rec_get_nth_field_offs(
@@ -1313,7 +1313,7 @@ row_log_table_get_pk(
}
if (!offsets) {
- offsets = rec_get_offsets(rec, index, NULL,
+ offsets = rec_get_offsets(rec, index, NULL, true,
ULINT_UNDEFINED, heap);
}
@@ -1998,7 +1998,7 @@ all_done:
return(DB_SUCCESS);
}
- offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL,
+ offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL, true,
ULINT_UNDEFINED, &offsets_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets));
@@ -2204,8 +2204,8 @@ func_exit_committed:
/* Prepare to update (or delete) the record. */
ulint* cur_offsets = rec_get_offsets(
- btr_pcur_get_rec(&pcur),
- index, NULL, ULINT_UNDEFINED, &offsets_heap);
+ btr_pcur_get_rec(&pcur), index, NULL, true,
+ ULINT_UNDEFINED, &offsets_heap);
if (!log->same_pk) {
/* Only update the record if DB_TRX_ID,DB_ROLL_PTR match what
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 68af56e5639..b2e4657476c 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -2060,7 +2060,7 @@ end_of_index:
rec = page_cur_get_rec(cur);
if (online) {
- offsets = rec_get_offsets(rec, clust_index, NULL,
+ offsets = rec_get_offsets(rec, clust_index, NULL, true,
ULINT_UNDEFINED, &row_heap);
rec_trx_id = row_get_rec_trx_id(rec, clust_index,
offsets);
@@ -2152,7 +2152,7 @@ end_of_index:
duplicate keys. */
continue;
} else {
- offsets = rec_get_offsets(rec, clust_index, NULL,
+ offsets = rec_get_offsets(rec, clust_index, NULL, true,
ULINT_UNDEFINED, &row_heap);
/* This is a locking ALTER TABLE.
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 313a0d55a67..583acdc482b 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2031,7 +2031,6 @@ run_again:
node->cascade_upd_nodes = cascade_upd_nodes;
cascade_upd_nodes->pop_front();
thr->fk_cascade_depth++;
- prebuilt->m_mysql_table = NULL;
goto run_again;
}
@@ -2245,7 +2244,7 @@ row_unlock_for_mysql(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
rec_trx_id = row_get_rec_trx_id(rec, index, offsets);
@@ -5069,7 +5068,7 @@ func_exit:
rec = buf + mach_read_from_4(buf);
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
if (prev_entry != NULL) {
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 4bdb8b34a80..ba6c1282eaa 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -152,7 +152,7 @@ row_purge_remove_clust_if_poss_low(
rec = btr_pcur_get_rec(&node->pcur);
offsets = rec_get_offsets(
- rec, index, offsets_, ULINT_UNDEFINED, &heap);
+ rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) {
/* Someone else has modified the record later: do not remove */
@@ -686,7 +686,7 @@ row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
ulint offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2];
rec_offs_init(offsets_);
ulint* offsets = rec_get_offsets(
- rec, index, offsets_, trx_id_pos + 2, &heap);
+ rec, index, offsets_, true, trx_id_pos + 2, &heap);
ut_ad(heap == NULL);
ut_ad(dict_index_get_nth_field(index, trx_id_pos)
@@ -1215,7 +1215,8 @@ purge_node_t::validate_pcur()
dict_index_t* clust_index = pcur.btr_cur.index;
ulint* offsets = rec_get_offsets(
- pcur.old_rec, clust_index, NULL, pcur.old_n_fields, &heap);
+ pcur.old_rec, clust_index, NULL, true,
+ pcur.old_n_fields, &heap);
/* Here we are comparing the purge ref record and the stored initial
part in persistent cursor. Both cases we store n_uniq fields of the
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 69cecf2d79f..a61adb7cd15 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -400,7 +400,7 @@ row_build_low(
ut_ad(!col_map || col_table);
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &tmp_heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -792,7 +792,7 @@ row_build_row_ref(
ut_ad(heap != NULL);
ut_ad(!dict_index_is_clust(index));
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &tmp_heap);
/* Secondary indexes must not contain externally stored columns. */
ut_ad(!rec_offs_any_extern(offsets));
@@ -904,7 +904,7 @@ row_build_row_ref_in_tuple(
ut_ad(clust_index);
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 984890791bd..1b045a23fe9 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -197,9 +197,9 @@ row_sel_sec_rec_is_for_clust_rec(
heap = mem_heap_create(256);
clust_offs = rec_get_offsets(clust_rec, clust_index, clust_offs,
- ULINT_UNDEFINED, &heap);
+ true, ULINT_UNDEFINED, &heap);
sec_offs = rec_get_offsets(sec_rec, sec_index, sec_offs,
- ULINT_UNDEFINED, &heap);
+ true, ULINT_UNDEFINED, &heap);
n = dict_index_get_n_ordering_defined_by_user(sec_index);
@@ -904,7 +904,7 @@ row_sel_get_clust_rec(
offsets = rec_get_offsets(rec,
btr_pcur_get_btr_cur(&plan->pcur)->index,
- offsets, ULINT_UNDEFINED, &heap);
+ offsets, true, ULINT_UNDEFINED, &heap);
row_build_row_ref_fast(plan->clust_ref, plan->clust_map, rec, offsets);
@@ -939,7 +939,7 @@ row_sel_get_clust_rec(
goto func_exit;
}
- offsets = rec_get_offsets(clust_rec, index, offsets,
+ offsets = rec_get_offsets(clust_rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
if (!node->read_view) {
@@ -1158,7 +1158,7 @@ re_scan:
rec = btr_pcur_get_rec(pcur);
my_offsets = offsets_;
- my_offsets = rec_get_offsets(rec, index, my_offsets,
+ my_offsets = rec_get_offsets(rec, index, my_offsets, true,
ULINT_UNDEFINED, &heap);
/* No match record */
@@ -1181,8 +1181,8 @@ re_scan:
rtr_rec_t* rtr_rec = &(*it);
my_offsets = rec_get_offsets(
- rtr_rec->r_rec, index, my_offsets,
- ULINT_UNDEFINED, &heap);
+ rtr_rec->r_rec, index, my_offsets, true,
+ ULINT_UNDEFINED, &heap);
err = lock_sec_rec_read_check_and_lock(
0, &match->block, rtr_rec->r_rec, index,
@@ -1196,12 +1196,10 @@ re_scan:
} else {
goto func_end;
}
-
}
match->locked = true;
-
func_end:
rw_lock_x_unlock(&(match->block.lock));
if (heap != NULL) {
@@ -1510,7 +1508,8 @@ row_sel_try_search_shortcut(
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true,
+ ULINT_UNDEFINED, &heap);
if (dict_index_is_clust(index)) {
if (!lock_clust_rec_cons_read_sees(rec, index, offsets,
@@ -1768,6 +1767,7 @@ rec_loop:
trx = thr_get_trx(thr);
offsets = rec_get_offsets(next_rec, index, offsets,
+ true,
ULINT_UNDEFINED, &heap);
/* If innodb_locks_unsafe_for_binlog option is used
@@ -1826,7 +1826,7 @@ skip_lock:
ulint lock_type;
trx_t* trx;
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
trx = thr_get_trx(thr);
@@ -1913,7 +1913,8 @@ skip_lock:
/* PHASE 3: Get previous version in a consistent read */
cons_read_requires_clust_rec = FALSE;
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true,
+ ULINT_UNDEFINED, &heap);
if (consistent_read) {
/* This is a non-locking consistent read: if necessary, fetch
@@ -1943,7 +1944,7 @@ skip_lock:
exhausted. */
offsets = rec_get_offsets(
- rec, index, offsets,
+ rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
/* Fetch the columns needed in
@@ -3392,23 +3393,12 @@ row_sel_get_clust_rec_for_mysql(
goto func_exit;
}
- ulint page_no = page_get_page_no(
- btr_pcur_get_page(
- prebuilt->pcur));
-
- page_id_t page_id(dict_index_get_space(sec_index),
- page_no);
-
- buf_block_t* block = buf_page_get_gen(
- page_id,
- dict_table_page_size(sec_index->table),
- RW_NO_LATCH, NULL, BUF_GET,
- __FILE__, __LINE__, mtr, &err);
-
+ buf_block_t* block = btr_pcur_get_block(
+ prebuilt->pcur);
mem_heap_t* heap = mem_heap_create(256);
dtuple_t* tuple = dict_index_build_data_tuple(
- sec_index, const_cast<rec_t*>(rec),
- dict_index_get_n_fields(sec_index), heap);;
+ rec, sec_index, true,
+ sec_index->n_fields, heap);
page_cur_t page_cursor;
ulint low_match = page_cur_search(
@@ -3457,7 +3447,7 @@ row_sel_get_clust_rec_for_mysql(
goto func_exit;
}
- *offsets = rec_get_offsets(clust_rec, clust_index, *offsets,
+ *offsets = rec_get_offsets(clust_rec, clust_index, *offsets, true,
ULINT_UNDEFINED, offset_heap);
if (prebuilt->select_lock_type != LOCK_NONE) {
@@ -3901,7 +3891,7 @@ row_sel_try_search_shortcut_for_mysql(
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
- *offsets = rec_get_offsets(rec, index, *offsets,
+ *offsets = rec_get_offsets(rec, index, *offsets, true,
ULINT_UNDEFINED, heap);
if (!lock_clust_rec_cons_read_sees(
@@ -4031,8 +4021,9 @@ row_sel_fill_vrow(
rec_offs_init(offsets_);
ut_ad(!(*vrow));
+ ut_ad(page_rec_is_leaf(rec));
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
*vrow = dtuple_create_with_vcol(
@@ -4585,6 +4576,7 @@ wait_table_again:
pcur->trx_if_known = trx;
rec = btr_pcur_get_rec(pcur);
+ ut_ad(page_rec_is_leaf(rec));
if (!moves_up
&& !page_rec_is_supremum(rec)
@@ -4599,6 +4591,7 @@ wait_table_again:
const rec_t* next_rec = page_rec_get_next_const(rec);
offsets = rec_get_offsets(next_rec, index, offsets,
+ true,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(pcur,
next_rec, index, offsets,
@@ -4656,6 +4649,7 @@ rec_loop:
}
ut_ad(!!page_rec_is_comp(rec) == comp);
+ ut_ad(page_rec_is_leaf(rec));
if (page_rec_is_infimum(rec)) {
@@ -4681,7 +4675,7 @@ rec_loop:
level we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
- offsets = rec_get_offsets(rec, index, offsets,
+ offsets = rec_get_offsets(rec, index, offsets, true,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(pcur,
rec, index, offsets,
@@ -4771,7 +4765,8 @@ wrong_offs:
ut_ad(fil_page_index_page_check(btr_pcur_get_page(pcur)));
ut_ad(btr_page_get_index_id(btr_pcur_get_page(pcur)) == index->id);
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true,
+ ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
if (!rec_validate(rec, offsets)
@@ -4992,8 +4987,8 @@ no_gap_lock:
Do a normal locking read. */
offsets = rec_get_offsets(
- rec, index, offsets, ULINT_UNDEFINED,
- &heap);
+ rec, index, offsets, true,
+ ULINT_UNDEFINED, &heap);
goto locks_ok;
case DB_DEADLOCK:
goto lock_wait_or_error;
@@ -5432,6 +5427,7 @@ requires_clust_rec:
/* We used 'offsets' for the clust
rec, recalculate them for 'rec' */
offsets = rec_get_offsets(rec, index, offsets,
+ true,
ULINT_UNDEFINED,
&heap);
result_rec = rec;
@@ -5929,8 +5925,10 @@ row_search_autoinc_read_column(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
+ ut_ad(page_rec_is_leaf(rec));
- offsets = rec_get_offsets(rec, index, offsets, col_no + 1, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true,
+ col_no + 1, &heap);
if (rec_offs_nth_sql_null(offsets, col_no)) {
/* There is no non-NULL value in the auto-increment column. */
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 0fce0731307..60a9f49b576 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -112,7 +112,7 @@ row_undo_ins_remove_clust_rec(
const rec_t* rec = btr_cur_get_rec(btr_cur);
mem_heap_t* heap = NULL;
const ulint* offsets = rec_get_offsets(
- rec, index, NULL, ULINT_UNDEFINED, &heap);
+ rec, index, NULL, true, ULINT_UNDEFINED, &heap);
row_log_table_delete(rec, node->row, index, offsets, NULL);
mem_heap_free(heap);
}
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 5a0b41e2c26..1ed25e7076a 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -196,7 +196,7 @@ row_undo_mod_remove_clust_low(
offsets = rec_get_offsets(
btr_cur_get_rec(btr_cur), btr_cur_get_index(btr_cur),
- NULL, trx_id_col + 1, &heap);
+ NULL, true, trx_id_col + 1, &heap);
trx_id_offset = rec_get_nth_field_offs(
offsets, trx_id_col, &len);
@@ -751,7 +751,7 @@ try_again:
offsets_heap = NULL;
offsets = rec_get_offsets(
btr_cur_get_rec(btr_cur),
- index, NULL, ULINT_UNDEFINED, &offsets_heap);
+ index, NULL, true, ULINT_UNDEFINED, &offsets_heap);
update = row_upd_build_sec_rec_difference_binary(
btr_cur_get_rec(btr_cur), index, offsets, entry, heap);
if (upd_get_n_fields(update) == 0) {
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 4742e17b5e8..0679b29b6ca 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -185,7 +185,7 @@ row_undo_search_clust_to_pcur(
rec = btr_pcur_get_rec(&node->pcur);
- offsets = rec_get_offsets(rec, clust_index, offsets,
+ offsets = rec_get_offsets(rec, clust_index, offsets, true,
ULINT_UNDEFINED, &heap);
found = row_get_rec_roll_ptr(rec, clust_index, offsets)
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index c6e05bd236a..1444f6f7c7d 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1069,7 +1069,7 @@ row_upd_build_difference_binary(
== trx_id_pos + 1);
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -2196,7 +2196,7 @@ row_upd_store_row(
rec = btr_pcur_get_rec(node->pcur);
- offsets = rec_get_offsets(rec, clust_index, offsets_,
+ offsets = rec_get_offsets(rec, clust_index, offsets_, true,
ULINT_UNDEFINED, &heap);
if (dict_table_has_atomic_blobs(node->table)) {
@@ -2444,8 +2444,8 @@ row_upd_sec_index_entry(
&& !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
ulint* offsets = rec_get_offsets(
- rec, index, NULL, ULINT_UNDEFINED,
- &heap);
+ rec, index, NULL, true,
+ ULINT_UNDEFINED, &heap);
err = wsrep_row_upd_check_foreign_constraints(
node, &pcur, index->table,
@@ -2481,7 +2481,7 @@ row_upd_sec_index_entry(
ulint* offsets;
offsets = rec_get_offsets(
- rec, index, NULL, ULINT_UNDEFINED,
+ rec, index, NULL, true, ULINT_UNDEFINED,
&heap);
/* NOTE that the following call loses
@@ -2690,7 +2690,7 @@ row_upd_clust_rec_by_insert(
we update the primary key. Delete-mark the old record
in the clustered index and prepare to insert a new entry. */
rec = btr_cur_get_rec(btr_cur);
- offsets = rec_get_offsets(rec, index, NULL,
+ offsets = rec_get_offsets(rec, index, NULL, true,
ULINT_UNDEFINED, &heap);
ut_ad(page_rec_is_user_rec(rec));
@@ -2965,7 +2965,8 @@ row_upd_del_mark_clust_rec(
entries */
row_upd_store_row(node, trx->mysql_thd,
- thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL);
+ thr->prebuilt && thr->prebuilt->table == node->table
+ ? thr->prebuilt->m_mysql_table : NULL);
/* Mark the clustered index record deleted; we do not have to check
locks, because we assume that we have an x-lock on the record */
@@ -3131,7 +3132,7 @@ row_upd_clust_step(
}
rec = btr_pcur_get_rec(pcur);
- offsets = rec_get_offsets(rec, index, offsets_,
+ offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
if (!flags && !node->has_clust_rec_x_lock) {
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index 26f6739c3ae..a659042bb2f 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -108,7 +108,7 @@ row_vers_impl_x_locked_low(
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(
- clust_rec, clust_index, NULL, ULINT_UNDEFINED, &heap);
+ clust_rec, clust_index, NULL, true, ULINT_UNDEFINED, &heap);
trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
if (trx_id == 0) {
@@ -219,8 +219,8 @@ row_vers_impl_x_locked_low(
}
clust_offsets = rec_get_offsets(
- prev_version, clust_index, NULL, ULINT_UNDEFINED,
- &heap);
+ prev_version, clust_index, NULL, true,
+ ULINT_UNDEFINED, &heap);
vers_del = rec_get_deleted_flag(prev_version, comp);
@@ -581,7 +581,8 @@ row_vers_build_cur_vrow_low(
}
clust_offsets = rec_get_offsets(prev_version, clust_index,
- NULL, ULINT_UNDEFINED, &heap);
+ NULL,
+ true, ULINT_UNDEFINED, &heap);
ulint entry_len = dict_index_get_n_fields(index);
@@ -719,7 +720,8 @@ row_vers_vc_matches_cluster(
}
clust_offsets = rec_get_offsets(prev_version, clust_index,
- NULL, ULINT_UNDEFINED, &heap);
+ NULL,
+ true, ULINT_UNDEFINED, &heap);
ulint entry_len = dict_index_get_n_fields(index);
@@ -845,7 +847,7 @@ row_vers_build_cur_vrow(
index, roll_ptr, trx_id, v_heap, &cur_vrow, mtr);
}
- *clust_offsets = rec_get_offsets(rec, clust_index, NULL,
+ *clust_offsets = rec_get_offsets(rec, clust_index, NULL, true,
ULINT_UNDEFINED, &heap);
return(cur_vrow);
}
@@ -894,7 +896,7 @@ row_vers_old_has_index_entry(
comp = page_rec_is_comp(rec);
ut_ad(!dict_table_is_comp(index->table) == !comp);
heap = mem_heap_create(1024);
- clust_offsets = rec_get_offsets(rec, clust_index, NULL,
+ clust_offsets = rec_get_offsets(rec, clust_index, NULL, true,
ULINT_UNDEFINED, &heap);
if (dict_index_has_virtual(index)) {
@@ -968,6 +970,7 @@ row_vers_old_has_index_entry(
}
}
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
+ true,
ULINT_UNDEFINED, &heap);
} else {
@@ -1042,7 +1045,8 @@ row_vers_old_has_index_entry(
}
clust_offsets = rec_get_offsets(prev_version, clust_index,
- NULL, ULINT_UNDEFINED, &heap);
+ NULL, true,
+ ULINT_UNDEFINED, &heap);
if (dict_index_has_virtual(index)) {
if (vrow) {
@@ -1187,8 +1191,8 @@ row_vers_build_for_consistent_read(
}
*offsets = rec_get_offsets(
- prev_version, index, *offsets, ULINT_UNDEFINED,
- offset_heap);
+ prev_version, index, *offsets,
+ true, ULINT_UNDEFINED, offset_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(prev_version, *offsets));
@@ -1323,6 +1327,7 @@ committed_version_trx:
version = rec;
*offsets = rec_get_offsets(version,
index, *offsets,
+ true,
ULINT_UNDEFINED,
offset_heap);
}
@@ -1367,7 +1372,7 @@ committed_version_trx:
}
version = prev_version;
- *offsets = rec_get_offsets(version, index, *offsets,
+ *offsets = rec_get_offsets(version, index, *offsets, true,
ULINT_UNDEFINED, offset_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(version, *offsets));
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 4089a4f99ed..d87ffeea21f 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -2130,7 +2130,7 @@ files_checked:
compile_time_assert(IBUF_SPACE_ID == 0);
ulint ibuf_root = btr_create(
- DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
+ DICT_CLUSTERED | DICT_IBUF,
0, univ_page_size, DICT_IBUF_ID_MIN,
dict_ind_redundant, NULL, &mtr);
diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc
index 7854ad2ab5a..327ebf79211 100644
--- a/storage/innobase/trx/trx0i_s.cc
+++ b/storage/innobase/trx/trx0i_s.cc
@@ -726,8 +726,7 @@ fill_lock_data(
ut_a(n_fields > 0);
heap = NULL;
- offsets = rec_get_offsets(rec, index, offsets, n_fields,
- &heap);
+ offsets = rec_get_offsets(rec, index, offsets, true, n_fields, &heap);
/* format and store the data */
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 02a9f82b3c6..5a8e1e23546 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -2305,8 +2305,8 @@ trx_undo_prev_version_build(
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(
- *old_vers, rec_get_offsets(
- *old_vers, index, NULL, ULINT_UNDEFINED, &heap)));
+ *old_vers, rec_get_offsets(*old_vers, index, NULL, true,
+ ULINT_UNDEFINED, &heap)));
#endif // defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
if (vrow && !(cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 63e29eb7767..a332f6047d1 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -680,6 +680,7 @@ trx_sys_close(void)
/*********************************************************************
Check if there are any active (non-prepared) transactions.
+This is only used to check if it's safe to shutdown.
@return total number of active transactions or 0 if none */
ulint
trx_sys_any_active_transactions(void)
@@ -689,8 +690,13 @@ trx_sys_any_active_transactions(void)
trx_sys_mutex_enter();
- total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list)
- + UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
+ total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
+
+ for (trx_t* trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
+ trx != NULL;
+ trx = UT_LIST_GET_NEXT(mysql_trx_list, trx)) {
+ total_trx += trx->state != TRX_STATE_NOT_STARTED;
+ }
ut_a(total_trx >= trx_sys->n_prepared_trx);
total_trx -= trx_sys->n_prepared_trx;
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 7ee903200da..cd69f3cd8ee 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -2373,6 +2373,17 @@ ha_myisam::check_if_supported_inplace_alter(TABLE *new_table,
}
+static bool directories_differ(const char *d1, const char *d2)
+{
+ if (!d1 && !d2)
+ return false;
+ if (!d1 || !d2)
+ return true;
+ size_t l1= dirname_length(d1), l2= dirname_length(d2);
+ return l1 != l2 || strncmp(d1, d2, l1);
+}
+
+
bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{
@@ -2380,8 +2391,8 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *create_info,
if ((create_info->used_fields & HA_CREATE_USED_AUTO &&
create_info->auto_increment_value != stats.auto_increment_value) ||
- create_info->data_file_name != data_file_name ||
- create_info->index_file_name != index_file_name ||
+ directories_differ(create_info->data_file_name, data_file_name) ||
+ directories_differ(create_info->index_file_name, index_file_name) ||
table_changes == IS_EQUAL_NO ||
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet
return COMPATIBLE_DATA_NO;
diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result
index 7ca36e07438..23b7804638f 100644
--- a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result
+++ b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result
@@ -4,7 +4,7 @@ test_sql_discovery_statement
test_sql_discovery_write_frm ON
set sql_quote_show_create=0;
create table t1 (a int) engine=test_sql_discovery;
-ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
set @@test_sql_discovery_statement='t1:foobar bwa-ha-ha';
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index ad96328b8db..73e4bf37a06 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.36-82.1)
+SET(TOKUDB_VERSION 5.6.37-82.2)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
@@ -24,7 +24,7 @@ SET(TOKUDB_SOURCES
tokudb_thread.cc
tokudb_dir_cmd.cc)
MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY
- COMPONENT tokudb-engine)
+ COMPONENT tokudb-engine CONFIG tokudb.cnf)
IF(NOT TARGET tokudb)
RETURN()
@@ -111,8 +111,3 @@ TARGET_LINK_LIBRARIES(tokudb tokufractaltree_static tokuportability_static
SET(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -flto -fuse-linker-plugin")
SET(CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO} -flto -fuse-linker-plugin")
-
-IF (INSTALL_SYSCONF2DIR)
- INSTALL(FILES tokudb.cnf DESTINATION ${INSTALL_SYSCONF2DIR}
- COMPONENT tokudb-engine)
-ENDIF(INSTALL_SYSCONF2DIR)
diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
index 6f0b7c5f419..0fa876f2bd8 100644
--- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
+++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
@@ -231,6 +231,8 @@ static void print_defines (void) {
printf("#define DB_SET_RANGE_REVERSE 252\n"); // private tokudb
//printf("#define DB_GET_BOTH_RANGE_REVERSE 251\n"); // private tokudb. No longer supported #2862.
dodefine(DB_RMW);
+
+ printf("#define DB_LOCKING_READ 0x80000000\n");
printf("#define DB_IS_RESETTING_OP 0x01000000\n"); // private tokudb
printf("#define DB_PRELOCKED 0x00800000\n"); // private tokudb
printf("#define DB_PRELOCKED_WRITE 0x00400000\n"); // private tokudb
diff --git a/storage/tokudb/PerconaFT/src/ydb-internal.h b/storage/tokudb/PerconaFT/src/ydb-internal.h
index a1eb43a67c5..db2041095f7 100644
--- a/storage/tokudb/PerconaFT/src/ydb-internal.h
+++ b/storage/tokudb/PerconaFT/src/ydb-internal.h
@@ -239,13 +239,16 @@ struct __toku_dbc_internal {
struct simple_dbt skey_s,sval_s;
struct simple_dbt *skey,*sval;
- // if the rmw flag is asserted, cursor operations (like set) grab write locks instead of read locks
+ // if the rmw flag is asserted, cursor operations (like set) grab write
+ // locks instead of read locks
// the rmw flag is set when the cursor is created with the DB_RMW flag set
bool rmw;
+ bool locking_read;
};
-static_assert(sizeof(__toku_dbc_internal) <= sizeof(((DBC *) nullptr)->_internal),
- "__toku_dbc_internal doesn't fit in the internal portion of a DBC");
+static_assert(
+ sizeof(__toku_dbc_internal) <= sizeof(((DBC *)nullptr)->_internal),
+ "__toku_dbc_internal doesn't fit in the internal portion of a DBC");
static inline __toku_dbc_internal *dbc_struct_i(DBC *c) {
union dbc_union {
diff --git a/storage/tokudb/PerconaFT/src/ydb_cursor.cc b/storage/tokudb/PerconaFT/src/ydb_cursor.cc
index 015e302f1c6..1f4f00b7423 100644
--- a/storage/tokudb/PerconaFT/src/ydb_cursor.cc
+++ b/storage/tokudb/PerconaFT/src/ydb_cursor.cc
@@ -110,12 +110,14 @@ c_get_wrapper_callback(DBT const *key, DBT const *val, void *extra) {
return r;
}
-static inline uint32_t
-get_cursor_prelocked_flags(uint32_t flags, DBC* dbc) {
+static inline uint32_t get_cursor_prelocked_flags(uint32_t flags, DBC *dbc) {
uint32_t lock_flags = flags & (DB_PRELOCKED | DB_PRELOCKED_WRITE);
- //DB_READ_UNCOMMITTED and DB_READ_COMMITTED transactions 'own' all read locks for user-data dictionaries.
- if (dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE) {
+ // DB_READ_UNCOMMITTED and DB_READ_COMMITTED transactions 'own' all read
+ // locks for user-data dictionaries.
+ if (dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE &&
+ !(dbc_struct_i(dbc)->iso == TOKU_ISO_SNAPSHOT &&
+ dbc_struct_i(dbc)->locking_read)) {
lock_flags |= DB_PRELOCKED;
}
return lock_flags;
@@ -671,37 +673,44 @@ int toku_c_close(DBC *c) {
return 0;
}
-static int
-c_set_bounds(DBC *dbc, const DBT *left_key, const DBT *right_key, bool pre_acquire, int out_of_range_error) {
+static int c_set_bounds(DBC *dbc,
+ const DBT *left_key,
+ const DBT *right_key,
+ bool pre_acquire,
+ int out_of_range_error) {
if (out_of_range_error != DB_NOTFOUND &&
- out_of_range_error != TOKUDB_OUT_OF_RANGE &&
- out_of_range_error != 0) {
- return toku_ydb_do_error(
- dbc->dbp->dbenv,
- EINVAL,
- "Invalid out_of_range_error [%d] for %s\n",
- out_of_range_error,
- __FUNCTION__
- );
- }
- if (left_key == toku_dbt_negative_infinity() && right_key == toku_dbt_positive_infinity()) {
+ out_of_range_error != TOKUDB_OUT_OF_RANGE && out_of_range_error != 0) {
+ return toku_ydb_do_error(dbc->dbp->dbenv,
+ EINVAL,
+ "Invalid out_of_range_error [%d] for %s\n",
+ out_of_range_error,
+ __FUNCTION__);
+ }
+ if (left_key == toku_dbt_negative_infinity() &&
+ right_key == toku_dbt_positive_infinity()) {
out_of_range_error = 0;
}
DB *db = dbc->dbp;
DB_TXN *txn = dbc_struct_i(dbc)->txn;
HANDLE_PANICKED_DB(db);
- toku_ft_cursor_set_range_lock(dbc_ftcursor(dbc), left_key, right_key,
- (left_key == toku_dbt_negative_infinity()),
- (right_key == toku_dbt_positive_infinity()),
- out_of_range_error);
+ toku_ft_cursor_set_range_lock(dbc_ftcursor(dbc),
+ left_key,
+ right_key,
+ (left_key == toku_dbt_negative_infinity()),
+ (right_key == toku_dbt_positive_infinity()),
+ out_of_range_error);
if (!db->i->lt || !txn || !pre_acquire)
return 0;
- //READ_UNCOMMITTED and READ_COMMITTED transactions do not need read locks.
- if (!dbc_struct_i(dbc)->rmw && dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE)
+ // READ_UNCOMMITTED and READ_COMMITTED transactions do not need read locks.
+ if (!dbc_struct_i(dbc)->rmw &&
+ dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE &&
+ !(dbc_struct_i(dbc)->iso == TOKU_ISO_SNAPSHOT &&
+ dbc_struct_i(dbc)->locking_read))
return 0;
- toku::lock_request::type lock_type = dbc_struct_i(dbc)->rmw ?
- toku::lock_request::type::WRITE : toku::lock_request::type::READ;
+ toku::lock_request::type lock_type = dbc_struct_i(dbc)->rmw
+ ? toku::lock_request::type::WRITE
+ : toku::lock_request::type::READ;
int r = toku_db_get_range_lock(db, txn, left_key, right_key, lock_type);
return r;
}
@@ -783,18 +792,20 @@ toku_c_get(DBC* c, DBT* key, DBT* val, uint32_t flag) {
return r;
}
-int
-toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC *c, uint32_t flags, int is_temporary_cursor) {
+int toku_db_cursor_internal(DB *db,
+ DB_TXN *txn,
+ DBC *c,
+ uint32_t flags,
+ int is_temporary_cursor) {
HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
- DB_ENV* env = db->dbenv;
+ DB_ENV *env = db->dbenv;
- if (flags & ~(DB_SERIALIZABLE | DB_INHERIT_ISOLATION | DB_RMW | DBC_DISABLE_PREFETCHING)) {
+ if (flags &
+ ~(DB_SERIALIZABLE | DB_INHERIT_ISOLATION | DB_LOCKING_READ | DB_RMW |
+ DBC_DISABLE_PREFETCHING)) {
return toku_ydb_do_error(
- env,
- EINVAL,
- "Invalid flags set for toku_db_cursor\n"
- );
+ env, EINVAL, "Invalid flags set for toku_db_cursor\n");
}
#define SCRS(name) c->name = name
@@ -819,8 +830,8 @@ toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC *c, uint32_t flags, int is_te
c->dbp = db;
dbc_struct_i(c)->txn = txn;
- dbc_struct_i(c)->skey_s = (struct simple_dbt){0,0};
- dbc_struct_i(c)->sval_s = (struct simple_dbt){0,0};
+ dbc_struct_i(c)->skey_s = (struct simple_dbt){0, 0};
+ dbc_struct_i(c)->sval_s = (struct simple_dbt){0, 0};
if (is_temporary_cursor) {
dbc_struct_i(c)->skey = &db->i->skey;
dbc_struct_i(c)->sval = &db->i->sval;
@@ -831,28 +842,27 @@ toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC *c, uint32_t flags, int is_te
if (flags & DB_SERIALIZABLE) {
dbc_struct_i(c)->iso = TOKU_ISO_SERIALIZABLE;
} else {
- dbc_struct_i(c)->iso = txn ? db_txn_struct_i(txn)->iso : TOKU_ISO_SERIALIZABLE;
+ dbc_struct_i(c)->iso =
+ txn ? db_txn_struct_i(txn)->iso : TOKU_ISO_SERIALIZABLE;
}
dbc_struct_i(c)->rmw = (flags & DB_RMW) != 0;
- enum cursor_read_type read_type = C_READ_ANY; // default, used in serializable and read uncommitted
+ dbc_struct_i(c)->locking_read = (flags & DB_LOCKING_READ) != 0;
+ enum cursor_read_type read_type =
+ C_READ_ANY; // default, used in serializable and read uncommitted
if (txn) {
if (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED ||
- dbc_struct_i(c)->iso == TOKU_ISO_SNAPSHOT)
- {
+ dbc_struct_i(c)->iso == TOKU_ISO_SNAPSHOT) {
read_type = C_READ_SNAPSHOT;
- }
- else if (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED_ALWAYS) {
+ } else if (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED_ALWAYS) {
read_type = C_READ_COMMITTED;
}
}
- int r = toku_ft_cursor_create(
- db->i->ft_handle,
- dbc_ftcursor(c),
- txn ? db_txn_struct_i(txn)->tokutxn : NULL,
- read_type,
- ((flags & DBC_DISABLE_PREFETCHING) != 0),
- is_temporary_cursor != 0
- );
+ int r = toku_ft_cursor_create(db->i->ft_handle,
+ dbc_ftcursor(c),
+ txn ? db_txn_struct_i(txn)->tokutxn : NULL,
+ read_type,
+ ((flags & DBC_DISABLE_PREFETCHING) != 0),
+ is_temporary_cursor != 0);
if (r != 0) {
invariant(r == TOKUDB_MVCC_DICTIONARY_TOO_NEW);
}
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 6cc3ad915cf..4bd00722623 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -7631,7 +7631,13 @@ static bool tokudb_check_db_dir_exist_from_table_name(const char *table_name) {
memcpy(db_name, db_name_begin, db_name_size);
db_name[db_name_size] = '\0';
- mysql_dir_exists = (check_db_dir_existence(db_name) == 0);
+
+ // At this point, db_name contains the MySQL formatted database name.
+ // This is exactly the same format that would come into us through a
+ // CREATE TABLE. Some charaters (like ':' for example) might be expanded
+ // into hex (':' would papear as "@003a").
+ // We need to check that the MySQL destination database directory exists.
+ mysql_dir_exists = (my_access(db_name, F_OK) == 0);
return mysql_dir_exists;
}
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_row_log_tokudb.result b/storage/tokudb/mysql-test/rpl/r/rpl_row_log_tokudb.result
deleted file mode 100644
index 73c010c6eb7..00000000000
--- a/storage/tokudb/mysql-test/rpl/r/rpl_row_log_tokudb.result
+++ /dev/null
@@ -1,315 +0,0 @@
-include/master-slave.inc
-[connection master]
-connection slave;
-include/stop_slave.inc
-include/wait_for_slave_to_stop.inc
-reset master;
-reset slave;
-start slave;
-include/wait_for_slave_to_start.inc
-connection slave;
-set @save_slave_ddl_exec_mode=@@global.slave_ddl_exec_mode;
-set @@global.slave_ddl_exec_mode=STRICT;
-connection master;
-create table t1(n int not null auto_increment primary key)ENGINE=TokuDB;
-insert into t1 values (NULL);
-drop table t1;
-create table t1 (word char(20) not null)ENGINE=TokuDB;
-load data infile 'LOAD_FILE' into table t1 ignore 1 lines;
-select count(*) from t1;
-count(*)
-69
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # insert into t1 values (NULL)
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Xid # # COMMIT /* XID */
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # load data infile '../../std_data/words.dat' into table t1 ignore 1 lines
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Xid # # COMMIT /* XID */
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # insert into t1 values (NULL)
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Xid # # COMMIT /* XID */
-flush logs;
-create table t3 (a int)ENGINE=TokuDB;
-connection master;
-select * from t1 order by 1 asc;
-word
-Aarhus
-Aaron
-Aaron
-Ababa
-Ababa
-aback
-aback
-abaft
-abaft
-abandon
-abandon
-abandoned
-abandoned
-abandoning
-abandoning
-abandonment
-abandonment
-abandons
-abandons
-abase
-abased
-abasement
-abasements
-abases
-abash
-abashed
-abashes
-abashing
-abasing
-abate
-abated
-abatement
-abatements
-abater
-abates
-abating
-Abba
-abbe
-abbey
-abbeys
-abbot
-abbots
-Abbott
-abbreviate
-abbreviated
-abbreviates
-abbreviating
-abbreviation
-abbreviations
-Abby
-abdomen
-abdomens
-abdominal
-abduct
-abducted
-abduction
-abductions
-abductor
-abductors
-abducts
-Abe
-abed
-Abel
-Abelian
-Abelson
-Aberdeen
-Abernathy
-aberrant
-aberration
-connection slave;
-select * from t1 order by 1 asc;
-word
-Aarhus
-Aaron
-Aaron
-Ababa
-Ababa
-aback
-aback
-abaft
-abaft
-abandon
-abandon
-abandoned
-abandoned
-abandoning
-abandoning
-abandonment
-abandonment
-abandons
-abandons
-abase
-abased
-abasement
-abasements
-abases
-abash
-abashed
-abashes
-abashing
-abasing
-abate
-abated
-abatement
-abatements
-abater
-abates
-abating
-Abba
-abbe
-abbey
-abbeys
-abbot
-abbots
-Abbott
-abbreviate
-abbreviated
-abbreviates
-abbreviating
-abbreviation
-abbreviations
-Abby
-abdomen
-abdomens
-abdominal
-abduct
-abducted
-abduction
-abductions
-abductor
-abductors
-abducts
-Abe
-abed
-Abel
-Abelian
-Abelson
-Aberdeen
-Abernathy
-aberrant
-aberration
-flush logs;
-include/stop_slave.inc
-include/start_slave.inc
-connection master;
-create table t2 (n int)ENGINE=TokuDB;
-insert into t2 values (1);
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # insert into t1 values (NULL)
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Xid # # COMMIT /* XID */
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # load data infile '../../std_data/words.dat' into table t1 ignore 1 lines
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Xid # # COMMIT /* XID */
-master-bin.000001 # Rotate # # master-bin.000002;pos=POS
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000002 # Binlog_checkpoint # # master-bin.000002
-master-bin.000002 # Gtid # # GTID #-#-#
-master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
-master-bin.000002 # Gtid # # GTID #-#-#
-master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
-master-bin.000002 # Gtid # # BEGIN GTID #-#-#
-master-bin.000002 # Annotate_rows # # insert into t2 values (1)
-master-bin.000002 # Table_map # # table_id: # (test.t2)
-master-bin.000002 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000002 # Xid # # COMMIT /* XID */
-show binary logs;
-Log_name File_size
-master-bin.000001 #
-master-bin.000002 #
-connection slave;
-show binary logs;
-Log_name File_size
-slave-bin.000001 #
-slave-bin.000002 #
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-slave-bin.000001 # Gtid # # GTID #-#-#
-slave-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
-slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
-slave-bin.000001 # Annotate_rows # # insert into t1 values (NULL)
-slave-bin.000001 # Table_map # # table_id: # (test.t1)
-slave-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-slave-bin.000001 # Xid # # COMMIT /* XID */
-slave-bin.000001 # Gtid # # GTID #-#-#
-slave-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
-slave-bin.000001 # Gtid # # GTID #-#-#
-slave-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
-slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
-slave-bin.000001 # Annotate_rows # # load data infile '../../std_data/words.dat' into table t1 ignore 1 lines
-slave-bin.000001 # Table_map # # table_id: # (test.t1)
-slave-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-slave-bin.000001 # Xid # # COMMIT /* XID */
-slave-bin.000001 # Gtid # # GTID #-#-#
-slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
-slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
-slave-bin.000002 # Gtid # # GTID #-#-#
-slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
-slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
-slave-bin.000002 # Annotate_rows # # insert into t2 values (1)
-slave-bin.000002 # Table_map # # table_id: # (test.t2)
-slave-bin.000002 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-slave-bin.000002 # Xid # # COMMIT /* XID */
-include/check_slave_is_running.inc
-show binlog events in 'slave-bin.000005' from 4;
-ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
-connection master;
-DROP TABLE t1;
-DROP TABLE t2;
-DROP TABLE t3;
-include/rpl_reset.inc
-connection master;
-create table t1(a int auto_increment primary key, b int);
-insert into t1 values (NULL, 1);
-set insert_id=5;
-insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id());
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; create table t1(a int auto_increment primary key, b int)
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # insert into t1 values (NULL, 1)
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Query # # COMMIT
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id())
-master-bin.000001 # Table_map # # table_id: # (test.t1)
-master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000001 # Query # # COMMIT
-select * from t1;
-a b
-1 1
-5 1
-6 1
-drop table t1;
-connection slave;
-set @@global.slave_ddl_exec_mode=@save_slave_ddl_exec_mode;
-connection master;
-include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb-master.opt b/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb-master.opt
deleted file mode 100644
index 773ec62bef2..00000000000
--- a/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb-master.opt
+++ /dev/null
@@ -1,2 +0,0 @@
---skip-external-locking
---default-storage-engine=MyISAM
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb.test b/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb.test
deleted file mode 100644
index 826eb5eab86..00000000000
--- a/storage/tokudb/mysql-test/rpl/t/rpl_row_log_tokudb.test
+++ /dev/null
@@ -1,14 +0,0 @@
-###################################
-# Wrapper for rpl_row_log.test #
-# Added wrapper so that MyISAM & #
-# Innodb and NDB could all use the#
-# Same test. NDB produced a diff #
-# bin-log #
-###################################
--- source include/have_binlog_format_row.inc
--- source include/have_tokudb.inc
--- source include/master-slave.inc
-let $engine_type=TokuDB;
--- source extra/rpl_tests/rpl_log.test
-
---source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result
index 371f97406c8..30d4e4244fd 100644
--- a/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result
@@ -1,3 +1,4 @@
+SET @orig_tokudb_dir_per_db=@@global.tokudb_dir_per_db;
########
# tokudb_dir_per_db = 1
########
@@ -177,4 +178,11 @@ t1_status_id.tokudb
DROP TABLE t2;
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
-SET GLOBAL tokudb_dir_per_db=default;
+CREATE DATABASE `a::a@@`;
+CREATE TABLE `a::a@@`.`t1` (a INT) ENGINE=TOKUDB;
+CREATE DATABASE `!@#$%^&*()`;
+ALTER TABLE `a::a@@`.`t1` RENAME `!@#$%^&*()`.`t1`;
+DROP TABLE `!@#$%^&*()`.`t1`;
+DROP DATABASE `!@#$%^&*()`;
+DROP DATABASE `a::a@@`;
+SET GLOBAL tokudb_dir_per_db=@orig_tokudb_dir_per_db;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test
index b638b706d87..64745cb049c 100644
--- a/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test
@@ -1,5 +1,7 @@
source include/have_tokudb.inc;
+SET @orig_tokudb_dir_per_db=@@global.tokudb_dir_per_db;
+
--let $DB= test
--let $DATADIR= `select @@datadir`
--let $i= 2
@@ -73,4 +75,17 @@ while ($i) {
--source dir_per_db_show_table_files.inc
}
-SET GLOBAL tokudb_dir_per_db=default;
+
+# test case for TDB-72 : Can not rename table in database with non alphanum
+# characters in its name.
+CREATE DATABASE `a::a@@`;
+CREATE TABLE `a::a@@`.`t1` (a INT) ENGINE=TOKUDB;
+CREATE DATABASE `!@#$%^&*()`;
+ALTER TABLE `a::a@@`.`t1` RENAME `!@#$%^&*()`.`t1`;
+
+DROP TABLE `!@#$%^&*()`.`t1`;
+DROP DATABASE `!@#$%^&*()`;
+DROP DATABASE `a::a@@`;
+
+# cleanup
+SET GLOBAL tokudb_dir_per_db=@orig_tokudb_dir_per_db;
diff --git a/strings/CMakeLists.txt b/strings/CMakeLists.txt
index 96de24b4770..6d2190772c5 100644
--- a/strings/CMakeLists.txt
+++ b/strings/CMakeLists.txt
@@ -35,7 +35,3 @@ ADD_CONVENIENCE_LIBRARY(strings ${STRINGS_SOURCES})
ADD_EXECUTABLE(conf_to_src EXCLUDE_FROM_ALL conf_to_src.c)
TARGET_LINK_LIBRARIES(conf_to_src strings)
-
-IF(MSVC)
- INSTALL_DEBUG_TARGET(strings DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
diff --git a/support-files/policy/apparmor/usr.sbin.mysqld b/support-files/policy/apparmor/usr.sbin.mysqld
index 307872c0fff..05ec77414d2 100644
--- a/support-files/policy/apparmor/usr.sbin.mysqld
+++ b/support-files/policy/apparmor/usr.sbin.mysqld
@@ -34,6 +34,8 @@
/etc/mysql/*.pem r,
/etc/mysql/conf.d/ r,
/etc/mysql/conf.d/* r,
+ /etc/mysql/mariadb.conf.d/ r,
+ /etc/mysql/mariadb.conf.d/* r,
/etc/nsswitch.conf r,
/etc/passwd r,
/etc/services r,
diff --git a/vio/CMakeLists.txt b/vio/CMakeLists.txt
index 2fb82ef9dd2..cdb28799ada 100644
--- a/vio/CMakeLists.txt
+++ b/vio/CMakeLists.txt
@@ -20,7 +20,3 @@ ADD_DEFINITIONS(${SSL_DEFINES})
SET(VIO_SOURCES vio.c viosocket.c viossl.c viopipe.c vioshm.c viosslfactories.c)
ADD_CONVENIENCE_LIBRARY(vio ${VIO_SOURCES})
TARGET_LINK_LIBRARIES(vio ${LIBSOCKET})
-
-IF(MSVC)
- INSTALL_DEBUG_TARGET(vio DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
diff --git a/zlib/CMakeLists.txt b/zlib/CMakeLists.txt
index 7668ce723b8..dd1e45d4acf 100644
--- a/zlib/CMakeLists.txt
+++ b/zlib/CMakeLists.txt
@@ -22,8 +22,3 @@ SET(ZLIB_SOURCES adler32.c compress.c crc32.c crc32.h deflate.c deflate.h gzio.
zutil.c zutil.h)
ADD_CONVENIENCE_LIBRARY(zlib ${ZLIB_SOURCES})
RESTRICT_SYMBOL_EXPORTS(zlib)
-
-IF(MSVC)
- INSTALL_DEBUG_TARGET(zlib DESTINATION ${INSTALL_LIBDIR}/debug)
-ENDIF()
-