summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2016-06-11 01:06:09 +0200
committerSergei Golubchik <serg@mariadb.org>2016-06-11 01:06:09 +0200
commitdde558f2621094027ef4b6ea756f119f934f6a5b (patch)
treea2eeda9b26b5b77fb3d9f99e99b4b6502c02231c
parentca95cc603b35c8afd5cb0a5b257f181b6da6a8db (diff)
parentf853a99a4fad0f390d6b1f6c46648302ca6cf310 (diff)
downloadmariadb-git-dde558f2621094027ef4b6ea756f119f934f6a5b.tar.gz
Merge branch 'merge-tokudb-5.6' into 10.0-tokudb-merge
-rw-r--r--storage/tokudb/CMakeLists.txt4
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake28
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc4
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.h2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/make-tree.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/msnfilter.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc2
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.cc4
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.h2
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt10
-rw-r--r--storage/tokudb/PerconaFT/scripts/run.stress-tests.py26
-rw-r--r--storage/tokudb/PerconaFT/src/indexer-undo-do.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc55
-rw-r--r--storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc20
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.h2
-rw-r--r--storage/tokudb/ha_tokudb.cc580
-rw-r--r--storage/tokudb/ha_tokudb.h50
-rw-r--r--storage/tokudb/ha_tokudb_admin.cc21
-rw-r--r--storage/tokudb/ha_tokudb_alter_56.cc4
-rw-r--r--storage/tokudb/ha_tokudb_alter_common.cc17
-rw-r--r--storage/tokudb/ha_tokudb_update.cc4
-rw-r--r--storage/tokudb/hatoku_cmp.cc14
-rw-r--r--storage/tokudb/hatoku_defines.h6
-rw-r--r--storage/tokudb/hatoku_hton.cc34
-rw-r--r--storage/tokudb/hatoku_hton.h10
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/background_job_manager.resultbin7382 -> 7363 bytes
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result24
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result21
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/disabled.def22
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test11
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test9
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test2
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test17
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/5585.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db233.result37
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db917.result14
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db938.result34
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db945.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/5585.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db233.test70
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db917.test22
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db938.test76
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db945.test24
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/t/disabled.def2
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result47
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test48
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result85
-rw-r--r--storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test51
-rw-r--r--storage/tokudb/tokudb_card.h7
-rw-r--r--storage/tokudb/tokudb_debug.h58
-rw-r--r--storage/tokudb/tokudb_information_schema.cc4
-rw-r--r--storage/tokudb/tokudb_sysvars.cc38
-rw-r--r--storage/tokudb/tokudb_sysvars.h1
-rw-r--r--storage/tokudb/tokudb_thread.h49
95 files changed, 1355 insertions, 462 deletions
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 9255a1ebdf5..6bb3394ac94 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.28-76.1)
+SET(TOKUDB_VERSION 5.6.30-76.3)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
@@ -52,7 +52,7 @@ include(CheckCXXCompilerFlag)
# pick language dialect
check_cxx_compiler_flag(-std=c++11 HAVE_STDCXX11)
if (HAVE_STDCXX11)
- set(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "-std=c++11 -Wno-deprecated-declarations ${CMAKE_CXX_FLAGS}")
else ()
message(FATAL_ERROR "${CMAKE_CXX_COMPILER} doesn't support -std=c++11, you need one that does.")
endif ()
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index 87947bb47d2..5f2c9ef2c2a 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -95,8 +95,10 @@ set_cflags_if_supported(
-Wno-error=missing-format-attribute
-Wno-error=address-of-array-temporary
-Wno-error=tautological-constant-out-of-range-compare
+ -Wno-error=maybe-uninitialized
-Wno-ignored-attributes
-Wno-error=extern-c-compat
+ -Wno-pointer-bool-conversion
-fno-rtti
-fno-exceptions
)
@@ -152,13 +154,18 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL Clang)
set(CMAKE_C_FLAGS_RELEASE "-g -O3 ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
else ()
+ if (APPLE)
+ set(FLTO_OPTS "-fwhole-program")
+ else ()
+ set(FLTO_OPTS "-fuse-linker-plugin")
+ endif()
# we overwrite this because the default passes -DNDEBUG and we don't want that
- set(CMAKE_C_FLAGS_RELWITHDEBINFO "-flto -fuse-linker-plugin ${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
- set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-flto -fuse-linker-plugin ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
- set(CMAKE_C_FLAGS_RELEASE "-g -O3 -flto -fuse-linker-plugin ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
- set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -flto -fuse-linker-plugin ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
- set(CMAKE_EXE_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_EXE_LINKER_FLAGS}")
- set(CMAKE_SHARED_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_SHARED_LINKER_FLAGS}")
+ set(CMAKE_C_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_C_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_EXE_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_EXE_LINKER_FLAGS}")
+ set(CMAKE_SHARED_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_SHARED_LINKER_FLAGS}")
endif ()
## set warnings
@@ -192,15 +199,6 @@ endif ()
set(CMAKE_C_FLAGS "-Wall -Werror ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-Wall -Werror ${CMAKE_CXX_FLAGS}")
-## need to set -stdlib=libc++ to get real c++11 support on darwin
-if (APPLE)
- if (CMAKE_GENERATOR STREQUAL Xcode)
- set(CMAKE_XCODE_ATTRIBUTE_CLANG_CXX_LIBRARY "libc++")
- else ()
- add_definitions(-stdlib=libc++)
- endif ()
-endif ()
-
# pick language dialect
set(CMAKE_C_FLAGS "-std=c99 ${CMAKE_C_FLAGS}")
check_cxx_compiler_flag(-std=c++11 HAVE_STDCXX11)
diff --git a/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
index cbe3cc328b4..efaba49198d 100644
--- a/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
@@ -265,8 +265,8 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl
TXNID last_xid = TXNID_NONE;
r = verify_clean_shutdown_of_log_version(log_dir, version_of_logs_on_disk, &last_lsn, &last_xid);
if (r != 0) {
- if (TOKU_LOG_VERSION_25 <= version_of_logs_on_disk &&
- version_of_logs_on_disk <= TOKU_LOG_VERSION_27 &&
+ if (version_of_logs_on_disk >= TOKU_LOG_VERSION_25 &&
+ version_of_logs_on_disk <= TOKU_LOG_VERSION_29 &&
TOKU_LOG_VERSION_29 == TOKU_LOG_VERSION) {
r = 0; // can do recovery on dirty shutdown
} else {
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.h b/storage/tokudb/PerconaFT/ft/logger/recover.h
index 0d216c11a8b..bdd44d562cd 100644
--- a/storage/tokudb/PerconaFT/ft/logger/recover.h
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.h
@@ -67,7 +67,7 @@ int tokuft_recover(DB_ENV *env,
// Effect: Check the tokuft logs to determine whether or not we need to run recovery.
// If the log is empty or if there is a clean shutdown at the end of the log, then we
-// dont need to run recovery.
+// don't need to run recovery.
// Returns: true if we need recovery, otherwise false.
int tokuft_needs_recovery(const char *logdir, bool ignore_empty_log);
diff --git a/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
index 0098b6091be..270ec97660a 100644
--- a/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
@@ -112,11 +112,13 @@ if(BUILD_TESTING OR BUILD_FT_TESTS)
declare_custom_tests(test-upgrade-recovery-logs)
file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-clean")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-clean")
foreach(test ${upgrade_tests})
get_filename_component(test_basename "${test}" NAME)
add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
endforeach(test)
file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-dirty")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-dirty")
foreach(test ${upgrade_tests})
get_filename_component(test_basename "${test}" NAME)
add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
diff --git a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
index c83517b5f64..761d672539b 100644
--- a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
@@ -87,7 +87,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
leafnode->max_msn_applied_to_node_on_disk = msn;
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
index d9608258054..c37dcd089f8 100644
--- a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
@@ -160,7 +160,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
assert(pair2.call_count==2);
}
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
index 8e006498d77..7691ffaac2b 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
@@ -81,7 +81,7 @@ static void run_recovery(const char *testdir) {
bool upgrade_in_progress;
r = toku_maybe_upgrade_log(testdir, testdir, &lsn_of_clean_shutdown, &upgrade_in_progress);
if (strcmp(shutdown, "dirty") == 0 && log_version <= 24) {
- CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we dont support dirty upgrade from versions <= 24
+ CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we don't support dirty upgrade from versions <= 24
return;
} else {
CKERR(r);
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
new file mode 100755
index 00000000000..9a56e83e627
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
new file mode 100755
index 00000000000..c552cda6673
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
new file mode 100755
index 00000000000..26b8bcfbdcc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
new file mode 100755
index 00000000000..04d3190c818
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
new file mode 100755
index 00000000000..02047325aa6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
new file mode 100755
index 00000000000..ce826b5608b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
new file mode 100755
index 00000000000..9849b977d73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
new file mode 100755
index 00000000000..8b658ea4c0a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
new file mode 100644
index 00000000000..11fecfb94b2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
new file mode 100644
index 00000000000..b7a9b03b583
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
new file mode 100644
index 00000000000..a1f306f4a96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
new file mode 100644
index 00000000000..b9e79eeb1c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
index 40af5dab7ad..b10885c2e62 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
@@ -92,7 +92,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
// Create bad tree (don't do following):
// leafnode->max_msn_applied_to_node = msn;
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
index 37054eb119a..c1d08ce41a6 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
@@ -76,7 +76,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
index 42e82884432..22a29c0ff69 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
@@ -77,7 +77,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
index b3e8663ed3b..80189dd9804 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
@@ -76,7 +76,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
index df5c21ca64e..a84aac1f063 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
@@ -77,7 +77,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
index 4eccb06c1f3..ca413f52567 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
@@ -79,7 +79,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
index 4492ea9364a..6efa06913c2 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
@@ -76,7 +76,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL,
NULL);
- // dont forget to dirty the node
+ // don't forget to dirty the node
leafnode->dirty = 1;
}
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
index 805c60d30be..88eca36a261 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
@@ -47,10 +47,10 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "util/omt.h"
//this is only for testing
-static void (* test_txn_sync_callback) (uint64_t, void *) = NULL;
+static void (* test_txn_sync_callback) (pthread_t, void *) = NULL;
static void * test_txn_sync_callback_extra = NULL;
-void set_test_txn_sync_callback(void (*cb) (uint64_t, void *), void *extra) {
+void set_test_txn_sync_callback(void (*cb) (pthread_t, void *), void *extra) {
test_txn_sync_callback = cb;
test_txn_sync_callback_extra = extra;
}
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
index 28fa1ac10b6..7cdc52c4f43 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
@@ -43,7 +43,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/txn/txn.h"
-void set_test_txn_sync_callback(void (*) (uint64_t, void*), void*);
+void set_test_txn_sync_callback(void (*) (pthread_t, void*), void*);
#define toku_test_txn_sync_callback(a) ((test_txn_sync_callback)? test_txn_sync_callback( a,test_txn_sync_callback_extra) : (void) 0)
#if TOKU_DEBUG_TXN_SYNC
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
index 8cea16c914d..6f9146ce5b2 100644
--- a/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
@@ -2,6 +2,8 @@ include_directories(..)
include_directories(../../src)
include_directories(../../src/tests)
+find_library(JEMALLOC_STATIC_LIBRARY libjemalloc.a)
+
if (BUILD_TESTING)
## reference implementation with simple size-doubling buffer without
## jemalloc size tricks
@@ -24,15 +26,15 @@ if (BUILD_TESTING)
cursor_test
)
set(_testname ${impl}_${test})
- if (with_jemalloc)
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
set(_testname ${_testname}_j)
endif ()
add_executable(${_testname} ${test})
- if (with_jemalloc)
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
if (APPLE)
- target_link_libraries(${_testname} -Wl,-force_load jemalloc)
+ target_link_libraries(${_testname} -Wl,-force_load ${JEMALLOC_STATIC_LIBRARY})
else ()
- target_link_libraries(${_testname} -Wl,--whole-archive jemalloc -Wl,--no-whole-archive)
+ target_link_libraries(${_testname} -Wl,--whole-archive ${JEMALLOC_STATIC_LIBRARY} -Wl,--no-whole-archive)
endif ()
endif ()
target_link_libraries(${_testname} ${impl})
diff --git a/storage/tokudb/PerconaFT/scripts/run.stress-tests.py b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
index a8df83a3b55..e983fe8ccd9 100644
--- a/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
+++ b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
@@ -521,14 +521,16 @@ Test output:
}))
def send_mail(toaddrs, subject, body):
- m = MIMEText(body)
- fromaddr = 'tim@tokutek.com'
- m['From'] = fromaddr
- m['To'] = ', '.join(toaddrs)
- m['Subject'] = subject
- s = SMTP('192.168.1.114')
- s.sendmail(fromaddr, toaddrs, str(m))
- s.quit()
+ # m = MIMEText(body)
+ # fromaddr = 'dev-private@percona.com'
+ # m['From'] = fromaddr
+ # m['To'] = ', '.join(toaddrs)
+ # m['Subject'] = subject
+ # s = SMTP('192.168.1.114')
+ # s.sendmail(fromaddr, toaddrs, str(m))
+ # s.quit()
+ info(subject);
+ info(body);
def update(tokudb):
info('Updating from git.')
@@ -554,12 +556,12 @@ def rebuild(tokudb, builddir, tokudb_data, cc, cxx, tests):
env=newenv,
cwd=builddir)
if r != 0:
- send_mail(['leif@tokutek.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
error('Building the tests failed.')
sys.exit(r)
r = call(['make', '-j8'], cwd=builddir)
if r != 0:
- send_mail(['leif@tokutek.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
error('Building the tests failed.')
sys.exit(r)
@@ -671,7 +673,7 @@ def main(opts):
sys.exit(0)
except Exception, e:
exception('Unhandled exception caught in main.')
- send_mail(['leif@tokutek.com'], 'Stress tests caught unhandled exception in main, on %s' % gethostname(), format_exc())
+ send_mail(['dev-private@percona.com'], 'Stress tests caught unhandled exception in main, on %s' % gethostname(), format_exc())
raise e
if __name__ == '__main__':
@@ -786,7 +788,7 @@ if __name__ == '__main__':
if not opts.send_emails:
opts.email = None
elif len(opts.email) == 0:
- opts.email.append('tokueng@tokutek.com')
+ opts.email.append('dev-private@percona.com')
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
diff --git a/storage/tokudb/PerconaFT/src/indexer-undo-do.cc b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
index b93429407eb..8d0b080b9fe 100644
--- a/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
+++ b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
@@ -313,7 +313,7 @@ indexer_undo_do_provisional(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info
break;
if (outermost_xid_state != TOKUTXN_LIVE && xrindex > num_committed) {
- // if the outermost is not live, then the inner state must be retired. thats the way that the txn API works.
+ // If the outermost is not live, then the inner state must be retired. That's the way that the txn API works.
assert(this_xid_state == TOKUTXN_RETIRED);
}
diff --git a/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
index 2fc05b23f0d..f8099c7a639 100644
--- a/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
+++ b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
@@ -54,9 +54,9 @@ populate_table(int start, int end, DB_TXN * parent, DB_ENV * env, DB * db) {
char str[220];
memset(kk, 0, sizeof kk);
memcpy(kk, &k, sizeof k);
- memset(str,'a', sizeof str-1);
+ memset(str,'a', sizeof str);
DBT key = { .data = kk, .size = sizeof kk };
- DBT val = { .data = str, .size = 220 };
+ DBT val = { .data = str, .size = sizeof str };
r = db->put(db, txn, &key, &val, 0);
assert_zero(r);
}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
index c3ebbd811bb..c440bdc59e7 100644
--- a/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
@@ -83,7 +83,7 @@ static void add_records(DB* db, DB_TXN* txn, uint64_t start_id, uint64_t num) {
for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
char key[100], val[256];
DBT k,v;
- snprintf(key, 100, "%08lu", j);
+ snprintf(key, 100, "%08" PRIu64, j);
snprintf(val, 256, "%*s", 200, key);
r =
db->put(
@@ -105,7 +105,7 @@ static void delete_records(
for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
char key[100];
DBT k;
- snprintf(key, 100, "%08lu", j);
+ snprintf(key, 100, "%08" PRIu64, j);
r =
db->del(
db,
@@ -143,7 +143,7 @@ static void test_insert_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : before commit %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : before commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->commit(txn, 0);
assert(r == 0);
@@ -153,7 +153,7 @@ static void test_insert_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : after commit %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
db->close(db, 0);
}
@@ -175,7 +175,7 @@ static void test_insert_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : before delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
delete_records(db, txn, 0, num_records);
@@ -184,7 +184,7 @@ static void test_insert_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->commit(txn, 0);
assert(r == 0);
@@ -194,7 +194,7 @@ static void test_insert_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after commit %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
db->close(db, 0);
}
@@ -217,7 +217,7 @@ static void test_insert_commit_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : before insert commit %lu rows\n",
+ "%s : before insert commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -230,7 +230,7 @@ static void test_insert_commit_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : after insert commit %lu rows\n",
+ "%s : after insert commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -244,7 +244,7 @@ static void test_insert_commit_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->commit(txn, 0);
assert(r == 0);
@@ -255,7 +255,7 @@ static void test_insert_commit_delete_commit(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
printf(
- "%s : after delete commit %lu rows\n",
+ "%s : after delete commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -279,7 +279,7 @@ static void test_insert_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : before rollback %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : before rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->abort(txn);
assert(r == 0);
@@ -292,7 +292,7 @@ static void test_insert_rollback(DB_ENV* env) {
// MESSAGES ARE "IN-FLIGHT" IN THE TREE AND MUST BE APPLIED IN ORDER TO
// CORRECT THE RUNNING LOGICAL COUNT
if (verbose)
- printf("%s : after rollback %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
full_optimize(db);
@@ -302,7 +302,7 @@ static void test_insert_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
printf(
- "%s : after rollback optimize %lu rows\n",
+ "%s : after rollback optimize %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -326,7 +326,7 @@ static void test_insert_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : before delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
delete_records(db, txn, 0, num_records);
@@ -335,7 +335,7 @@ static void test_insert_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->abort(txn);
assert(r == 0);
@@ -345,7 +345,7 @@ static void test_insert_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after commit %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
db->close(db, 0);
}
@@ -368,7 +368,7 @@ static void test_insert_commit_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : before insert commit %lu rows\n",
+ "%s : before insert commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -381,7 +381,7 @@ static void test_insert_commit_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : after insert commit %lu rows\n",
+ "%s : after insert commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -395,7 +395,7 @@ static void test_insert_commit_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(0, stats);
if (verbose)
- printf("%s : after delete %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
r = txn->abort(txn);
assert(r == 0);
@@ -409,7 +409,7 @@ static void test_insert_commit_delete_rollback(DB_ENV* env) {
// CORRECT THE RUNNING LOGICAL COUNT
if (verbose)
printf(
- "%s : after delete rollback %lu rows\n",
+ "%s : after delete rollback %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -421,17 +421,12 @@ static void test_insert_commit_delete_rollback(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : after delete rollback optimize %lu rows\n",
+ "%s : after delete rollback optimize %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
db->close(db, 0);
}
-static inline uint64_t time_in_microsec() {
- struct timeval t;
- gettimeofday(&t, NULL);
- return t.tv_sec * (1UL * 1000 * 1000) + t.tv_usec;
-}
static int test_recount_insert_commit_progress(
uint64_t count,
@@ -440,7 +435,7 @@ static int test_recount_insert_commit_progress(
if (verbose)
printf(
- "%s : count[%lu] deleted[%lu]\n",
+ "%s : count[%" PRIu64 "] deleted[%" PRIu64 "]\n",
__FUNCTION__,
count,
deleted);
@@ -469,7 +464,7 @@ static void test_recount_insert_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
printf(
- "%s : before commit %lu rows\n",
+ "%s : before commit %" PRIu64 " rows\n",
__FUNCTION__,
stats.bt_ndata);
@@ -481,7 +476,7 @@ static void test_recount_insert_commit(DB_ENV* env) {
CHECK_NUM_ROWS(num_records, stats);
if (verbose)
- printf("%s : after commit %lu rows\n", __FUNCTION__, stats.bt_ndata);
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
// test that recount counted correct # of rows
r = db->recount_rows(db, test_recount_insert_commit_progress, NULL);
diff --git a/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
index b55610757e2..30cc16d73a7 100644
--- a/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
+++ b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
@@ -92,19 +92,19 @@ struct start_txn_arg {
static struct test_sync sync_s;
-static void test_callback(uint64_t self_tid, void * extra) {
+static void test_callback(pthread_t self_tid, void * extra) {
pthread_t **p = (pthread_t **) extra;
pthread_t tid_1 = *p[0];
pthread_t tid_2 = *p[1];
- assert(self_tid == tid_2);
- printf("%s: the thread[%" PRIu64 "] is going to wait...\n", __func__, tid_1);
+ assert(pthread_equal(self_tid, tid_2));
+ printf("%s: the thread[%" PRIu64 "] is going to wait...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
test_sync_next_state(&sync_s);
sleep(3);
//test_sync_sleep(&sync_s,3);
//using test_sync_sleep/test_sync_next_state pair can sync threads better, however
//after the fix, this might cause a deadlock. just simply use sleep to do a proof-
//of-concept test.
- printf("%s: the thread[%" PRIu64 "] is resuming...\n", __func__, tid_1);
+ printf("%s: the thread[%" PRIu64 "] is resuming...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
return;
}
@@ -114,7 +114,7 @@ static void * start_txn2(void * extra) {
DB * db = args->db;
DB_TXN * parent = args->parent;
test_sync_sleep(&sync_s, 1);
- printf("start %s [thread %" PRIu64 "]\n", __func__, pthread_self());
+ printf("start %s [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
DB_TXN *txn;
int r = env->txn_begin(env, parent, &txn, DB_READ_COMMITTED);
assert(r == 0);
@@ -127,7 +127,7 @@ static void * start_txn2(void * extra) {
r = txn->commit(txn, 0);
assert(r == 0);
- printf("%s done[thread %" PRIu64 "]\n", __func__, pthread_self());
+ printf("%s done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
return extra;
}
@@ -135,14 +135,14 @@ static void * start_txn1(void * extra) {
struct start_txn_arg * args = (struct start_txn_arg *) extra;
DB_ENV * env = args -> env;
DB * db = args->db;
- printf("start %s: [thread %" PRIu64 "]\n", __func__, pthread_self());
+ printf("start %s: [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
DB_TXN *txn;
int r = env->txn_begin(env, NULL, &txn, DB_READ_COMMITTED);
assert(r == 0);
- printf("%s: txn began by [thread %" PRIu64 "], will wait\n", __func__, pthread_self());
+ printf("%s: txn began by [thread %" PRIu64 "], will wait\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
test_sync_next_state(&sync_s);
test_sync_sleep(&sync_s,2);
- printf("%s: [thread %" PRIu64 "] resumed\n", __func__, pthread_self());
+ printf("%s: [thread %" PRIu64 "] resumed\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
//do some random things...
DBT key, data;
dbt_init(&key, "hello", 6);
@@ -151,7 +151,7 @@ static void * start_txn1(void * extra) {
db->get(db, txn, &key, &data, 0);
r = txn->commit(txn, 0);
assert(r == 0);
- printf("%s: done[thread %" PRIu64 "]\n", __func__, pthread_self());
+ printf("%s: done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
//test_sync_next_state(&sync_s);
return extra;
}
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
index 61ce5a8476e..55da418a0de 100644
--- a/storage/tokudb/PerconaFT/src/ydb.cc
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -3148,7 +3148,7 @@ toku_test_get_latest_lsn(DB_ENV *env) {
return rval.lsn;
}
-void toku_set_test_txn_sync_callback(void (* cb) (uint64_t, void *), void * extra) {
+void toku_set_test_txn_sync_callback(void (* cb) (pthread_t, void *), void * extra) {
set_test_txn_sync_callback(cb, extra);
}
diff --git a/storage/tokudb/PerconaFT/src/ydb.h b/storage/tokudb/PerconaFT/src/ydb.h
index bd2902e6c6e..facbfdc9252 100644
--- a/storage/tokudb/PerconaFT/src/ydb.h
+++ b/storage/tokudb/PerconaFT/src/ydb.h
@@ -60,4 +60,4 @@ extern "C" uint64_t toku_test_get_latest_lsn(DB_ENV *env) __attribute__((__visib
extern "C" int toku_test_get_checkpointing_user_data_status(void) __attribute__((__visibility__("default")));
// test-only function
-extern "C" void toku_set_test_txn_sync_callback(void (* ) (uint64_t, void *), void * extra) __attribute__((__visibility__("default")));
+extern "C" void toku_set_test_txn_sync_callback(void (* ) (pthread_t, void *), void * extra) __attribute__((__visibility__("default")));
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index c97be11310a..afb2d875188 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -31,6 +31,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ha_tokudb.h"
+#if TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
+#endif
+
HASH TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
@@ -161,6 +174,15 @@ void TOKUDB_SHARE::static_init() {
void TOKUDB_SHARE::static_destroy() {
my_hash_free(&_open_tables);
}
+const char* TOKUDB_SHARE::get_state_string(share_state_t state) {
+ static const char* state_string[] = {
+ "CLOSED",
+ "OPENED",
+ "ERROR"
+ };
+ assert_always(state == CLOSED || state == OPENED || state == ERROR);
+ return state_string[state];
+}
void* TOKUDB_SHARE::operator new(size_t sz) {
return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
}
@@ -186,12 +208,24 @@ void TOKUDB_SHARE::init(const char* table_name) {
_database_name,
_table_name,
tmp_dictionary_name);
+
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
void TOKUDB_SHARE::destroy() {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+
assert_always(_use_count == 0);
assert_always(
_state == TOKUDB_SHARE::CLOSED || _state == TOKUDB_SHARE::ERROR);
thr_lock_delete(&_thr_lock);
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
TOKUDB_SHARE* TOKUDB_SHARE::get_share(
const char* table_name,
@@ -207,6 +241,14 @@ TOKUDB_SHARE* TOKUDB_SHARE::get_share(
&_open_tables,
(uchar*)table_name,
length);
+
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_SHARE,
+ "existing share[%s] %s:share[%p]",
+ table_name,
+ share == NULL ? "not found" : "found",
+ share);
+
if (!share) {
if (create_new == false)
goto exit;
@@ -237,25 +279,41 @@ exit:
return share;
}
void TOKUDB_SHARE::drop_share(TOKUDB_SHARE* share) {
+ TOKUDB_TRACE_FOR_FLAGS(
+ TOKUDB_DEBUG_SHARE,
+ "share[%p]:file[%s]:state[%s]:use_count[%d]",
+ share,
+ share->_full_table_name.ptr(),
+ get_state_string(share->_state),
+ share->_use_count);
+
_open_tables_mutex.lock();
my_hash_delete(&_open_tables, (uchar*)share);
_open_tables_mutex.unlock();
}
TOKUDB_SHARE::share_state_t TOKUDB_SHARE::addref() {
+ TOKUDB_SHARE_TRACE_FOR_FLAGS((TOKUDB_DEBUG_ENTER & TOKUDB_DEBUG_SHARE),
+ "file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+
lock();
_use_count++;
- DBUG_PRINT("info", ("0x%p share->_use_count %u", this, _use_count));
-
return _state;
}
int TOKUDB_SHARE::release() {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
+
int error, result = 0;
_mutex.lock();
assert_always(_use_count != 0);
_use_count--;
- DBUG_PRINT("info", ("0x%p share->_use_count %u", this, _use_count));
if (_use_count == 0 && _state == TOKUDB_SHARE::OPENED) {
// number of open DB's may not be equal to number of keys we have
// because add_index may have added some. So, we loop through entire
@@ -299,7 +357,7 @@ int TOKUDB_SHARE::release() {
}
_mutex.unlock();
- return result;
+ TOKUDB_SHARE_DBUG_RETURN(result);
}
void TOKUDB_SHARE::update_row_count(
THD* thd,
@@ -350,34 +408,32 @@ void TOKUDB_SHARE::update_row_count(
unlock();
}
void TOKUDB_SHARE::set_cardinality_counts_in_table(TABLE* table) {
- // if there is nothing new to report, just skip it.
- if (_card_changed) {
- lock();
- uint32_t next_key_part = 0;
- for (uint32_t i = 0; i < table->s->keys; i++) {
- bool is_unique_key =
- (i == table->s->primary_key) ||
- (table->key_info[i].flags & HA_NOSAME);
-
- uint32_t num_key_parts = get_key_parts(&table->key_info[i]);
- for (uint32_t j = 0; j < num_key_parts; j++) {
- assert_always(next_key_part < _rec_per_keys);
- ulong val = _rec_per_key[next_key_part++];
- if (is_unique_key && j == num_key_parts-1) {
- val = 1;
- } else {
- val =
- (val*tokudb::sysvars::cardinality_scale_percent)/100;
- if (val == 0)
- val = 1;
- }
+ lock();
+ uint32_t next_key_part = 0;
+ for (uint32_t i = 0; i < table->s->keys; i++) {
+ KEY* key = &table->key_info[i];
+ bool is_unique_key =
+ (i == table->s->primary_key) || (key->flags & HA_NOSAME);
+
+ for (uint32_t j = 0; j < get_ext_key_parts(key); j++) {
+ if (j >= key->user_defined_key_parts) {
+ // MySQL 'hidden' keys, really needs deeper investigation
+ // into MySQL hidden keys vs TokuDB hidden keys
+ key->rec_per_key[j] = 1;
+ continue;
+ }
- table->key_info[i].rec_per_key[j] = val;
+ assert_always(next_key_part < _rec_per_keys);
+ ulong val = _rec_per_key[next_key_part++];
+ val = (val * tokudb::sysvars::cardinality_scale_percent) / 100;
+ if (val == 0 || _rows == 0 ||
+ (is_unique_key && j == get_ext_key_parts(key) - 1)) {
+ val = 1;
}
+ key->rec_per_key[j] = val;
}
- _card_changed = false;
- unlock();
}
+ unlock();
}
#define HANDLE_INVALID_CURSOR() \
@@ -419,36 +475,17 @@ static inline bool do_ignore_flag_optimization(
bool opt_eligible) {
bool do_opt = false;
- if (opt_eligible) {
- if (is_replace_into(thd) || is_insert_ignore(thd)) {
- uint pk_insert_mode = tokudb::sysvars::pk_insert_mode(thd);
- if ((!table->triggers && pk_insert_mode < 2) ||
- pk_insert_mode == 0) {
- if (mysql_bin_log.is_open() &&
- thd->variables.binlog_format != BINLOG_FORMAT_STMT) {
- do_opt = false;
- } else {
- do_opt = true;
- }
- }
- }
+ if (opt_eligible &&
+ (is_replace_into(thd) || is_insert_ignore(thd)) &&
+ tokudb::sysvars::pk_insert_mode(thd) == 1 &&
+ !table->triggers &&
+ !(mysql_bin_log.is_open() &&
+ thd->variables.binlog_format != BINLOG_FORMAT_STMT)) {
+ do_opt = true;
}
return do_opt;
}
-#if TOKU_INCLUDE_EXTENDED_KEYS
-static inline uint get_ext_key_parts(const KEY *key) {
-#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
- (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
- return key->actual_key_parts;
-#elif defined(MARIADB_BASE_VERSION)
- return key->ext_key_parts;
-#else
-#error
-#endif
-}
-#endif
-
ulonglong ha_tokudb::table_flags() const {
return int_table_flags | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE;
}
@@ -461,10 +498,7 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
TOKUDB_HANDLER_DBUG_ENTER("");
assert_always(table_share);
ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
- HA_KEYREAD_ONLY | HA_READ_RANGE);
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
- flags |= HA_DO_INDEX_COND_PUSHDOWN;
-#endif
+ HA_KEYREAD_ONLY | HA_READ_RANGE | HA_DO_INDEX_COND_PUSHDOWN);
if (key_is_clustering(&table_share->key_info[idx])) {
flags |= HA_CLUSTERED_INDEX;
}
@@ -771,29 +805,36 @@ static int filter_key_part_compare (const void* left, const void* right) {
// if key, table have proper info set. I had to verify by checking
// in the debugger.
//
-void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offset_from_keypart) {
+void set_key_filter(
+ MY_BITMAP* key_filter,
+ KEY* key,
+ TABLE* table,
+ bool get_offset_from_keypart) {
+
FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
uint curr_skip_index = 0;
- for (uint i = 0; i < get_key_parts(key); i++) {
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
//
// horrendous hack due to bugs in mysql, basically
// we cannot always reliably get the offset from the same source
//
- parts[i].offset = get_offset_from_keypart ? key->key_part[i].offset : field_offset(key->key_part[i].field, table);
+ parts[i].offset =
+ get_offset_from_keypart ?
+ key->key_part[i].offset :
+ field_offset(key->key_part[i].field, table);
parts[i].part_index = i;
}
qsort(
parts, // start of array
- get_key_parts(key), //num elements
+ key->user_defined_key_parts, //num elements
sizeof(*parts), //size of each element
- filter_key_part_compare
- );
+ filter_key_part_compare);
for (uint i = 0; i < table->s->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
- if (curr_skip_index < get_key_parts(key)) {
+ if (curr_skip_index < key->user_defined_key_parts) {
uint curr_skip_offset = 0;
curr_skip_offset = parts[curr_skip_index].offset;
if (curr_skip_offset == curr_field_offset) {
@@ -1595,7 +1636,11 @@ exit:
return error;
}
-bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk) {
+bool ha_tokudb::can_replace_into_be_fast(
+ TABLE_SHARE* table_share,
+ KEY_AND_COL_INFO* kc_info,
+ uint pk) {
+
uint curr_num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
bool ret_val;
if (curr_num_DBs == 1) {
@@ -1606,7 +1651,7 @@ bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_I
for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
if (curr_index == pk) continue;
KEY* curr_key_info = &table_share->key_info[curr_index];
- for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
+ for (uint i = 0; i < curr_key_info->user_defined_key_parts; i++) {
uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
ret_val = false;
@@ -1692,7 +1737,8 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
/* Open other keys; These are part of the share structure */
for (uint i = 0; i < table_share->keys; i++) {
- share->_key_descriptors[i]._parts = get_key_parts(&table_share->key_info[i]);
+ share->_key_descriptors[i]._parts =
+ table_share->key_info[i].user_defined_key_parts;
if (i == primary_key) {
share->_key_descriptors[i]._is_unique = true;
share->_key_descriptors[i]._name = tokudb::memory::strdup("primary", 0);
@@ -1732,8 +1778,9 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// the "infinity byte" in keys, and for placing the DBT size in the first four bytes
//
ref_length = sizeof(uint32_t) + sizeof(uchar);
- KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
+ KEY_PART_INFO* key_part = table->key_info[primary_key].key_part;
+ KEY_PART_INFO* end =
+ key_part + table->key_info[primary_key].user_defined_key_parts;
for (; key_part != end; key_part++) {
ref_length += key_part->field->max_packed_col_length(key_part->length);
TOKU_TYPE toku_type = mysql_to_toku_type(key_part->field);
@@ -1901,6 +1948,7 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
if (ret_val == 0) {
share->set_state(TOKUDB_SHARE::OPENED);
} else {
+ free_key_and_col_info(&share->kc_info);
share->set_state(TOKUDB_SHARE::ERROR);
}
share->unlock();
@@ -2616,13 +2664,13 @@ exit:
}
uint32_t ha_tokudb::place_key_into_mysql_buff(
- KEY* key_info,
- uchar * record,
- uchar* data
- )
-{
- KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
- uchar *pos = data;
+ KEY* key_info,
+ uchar* record,
+ uchar* data) {
+
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ uchar* pos = data;
for (; key_part != end; key_part++) {
if (key_part->field->null_bit) {
@@ -2682,15 +2730,14 @@ void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
}
uint32_t ha_tokudb::place_key_into_dbt_buff(
- KEY* key_info,
- uchar * buff,
- const uchar * record,
- bool* has_null,
- int key_length
- )
-{
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
+ KEY* key_info,
+ uchar* buff,
+ const uchar* record,
+ bool* has_null,
+ int key_length) {
+
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
uchar* curr_buff = buff;
*has_null = false;
for (; key_part != end && key_length > 0; key_part++) {
@@ -2870,25 +2917,29 @@ DBT* ha_tokudb::create_dbt_key_for_lookup(
// Returns:
// the parameter key
//
-DBT *ha_tokudb::pack_key(
- DBT * key,
- uint keynr,
- uchar * buff,
- const uchar * key_ptr,
- uint key_length,
- int8_t inf_byte
- )
-{
- TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x inf=%d", key_ptr, key_length, key_length > 0 ? key_ptr[0] : 0, inf_byte);
+DBT* ha_tokudb::pack_key(
+ DBT* key,
+ uint keynr,
+ uchar* buff,
+ const uchar* key_ptr,
+ uint key_length,
+ int8_t inf_byte) {
+
+ TOKUDB_HANDLER_DBUG_ENTER(
+ "key %p %u:%2.2x inf=%d",
+ key_ptr,
+ key_length,
+ key_length > 0 ? key_ptr[0] : 0,
+ inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
if (keynr != primary_key && !tokudb_test(hidden_primary_key)) {
DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
}
#endif
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -2930,31 +2981,30 @@ DBT *ha_tokudb::pack_key(
}
#if TOKU_INCLUDE_EXTENDED_KEYS
-DBT *ha_tokudb::pack_ext_key(
- DBT * key,
- uint keynr,
- uchar * buff,
- const uchar * key_ptr,
- uint key_length,
- int8_t inf_byte
- )
-{
+DBT* ha_tokudb::pack_ext_key(
+ DBT* key,
+ uint keynr,
+ uchar* buff,
+ const uchar* key_ptr,
+ uint key_length,
+ int8_t inf_byte) {
+
TOKUDB_HANDLER_DBUG_ENTER("");
// build a list of PK parts that are in the SK. we will use this list to build the
// extended key if necessary.
- KEY *pk_key_info = &table->key_info[primary_key];
- uint pk_parts = get_key_parts(pk_key_info);
+ KEY* pk_key_info = &table->key_info[primary_key];
+ uint pk_parts = pk_key_info->user_defined_key_parts;
uint pk_next = 0;
struct {
const uchar *key_ptr;
KEY_PART_INFO *key_part;
} pk_info[pk_parts];
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
+ my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -4440,11 +4490,16 @@ cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
-static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint key_len) {
+static bool index_key_is_null(
+ TABLE* table,
+ uint keynr,
+ const uchar* key,
+ uint key_len) {
+
bool key_can_be_null = false;
- KEY *key_info = &table->key_info[keynr];
- KEY_PART_INFO *key_part = key_info->key_part;
- KEY_PART_INFO *end = key_part + get_key_parts(key_info);
+ KEY* key_info = &table->key_info[keynr];
+ KEY_PART_INFO* key_part = key_info->key_part;
+ KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
for (; key_part != end; key_part++) {
if (key_part->null_bit) {
key_can_be_null = true;
@@ -4827,7 +4882,7 @@ int ha_tokudb::read_full_row(uchar * buf) {
// HA_ERR_END_OF_FILE if not found
// error otherwise
//
-int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
+int ha_tokudb::index_next_same(uchar* buf, const uchar* key, uint keylen) {
TOKUDB_HANDLER_DBUG_ENTER("");
ha_statistic_increment(&SSV::ha_read_next_count);
@@ -4845,8 +4900,16 @@ int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
//
// now do the comparison
//
- create_dbt_key_from_table(&found_key,tokudb_active_index,key_buff3,buf,&has_null);
- cmp = tokudb_prefix_cmp_dbt_key(share->key_file[tokudb_active_index], &curr_key, &found_key);
+ create_dbt_key_from_table(
+ &found_key,
+ tokudb_active_index,
+ key_buff3,buf,
+ &has_null);
+ cmp =
+ tokudb_prefix_cmp_dbt_key(
+ share->key_file[tokudb_active_index],
+ &curr_key,
+ &found_key);
if (cmp) {
error = HA_ERR_END_OF_FILE;
}
@@ -5105,17 +5168,27 @@ exit:
return error;
}
-static int
-smart_dbt_bf_callback(DBT const *key, DBT const *row, void *context) {
+static int smart_dbt_bf_callback(
+ DBT const* key,
+ DBT const* row,
+ void* context) {
SMART_DBT_BF_INFO info = (SMART_DBT_BF_INFO)context;
- return info->ha->fill_range_query_buf(info->need_val, key, row, info->direction, info->thd, info->buf, info->key_to_compare);
+ return
+ info->ha->fill_range_query_buf(
+ info->need_val,
+ key,
+ row,
+ info->direction,
+ info->thd,
+ info->buf,
+ info->key_to_compare);
}
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
-enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
-{
+enum icp_result ha_tokudb::toku_handler_index_cond_check(
+ Item* pushed_idx_cond) {
+
enum icp_result res;
- if (end_range ) {
+ if (end_range) {
int cmp;
#ifdef MARIADB_BASE_VERSION
cmp = compare_key2(end_range);
@@ -5125,27 +5198,27 @@ enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
if (cmp > 0) {
return ICP_OUT_OF_RANGE;
}
- }
+ }
res = pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
return res;
}
-#endif
// fill in the range query buf for bulk fetch
int ha_tokudb::fill_range_query_buf(
bool need_val,
- DBT const *key,
- DBT const *row,
+ DBT const* key,
+ DBT const* row,
int direction,
THD* thd,
uchar* buf,
- DBT* key_to_compare
- ) {
+ DBT* key_to_compare) {
+
int error;
//
// first put the value into range_query_buf
//
- uint32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff;
+ uint32_t size_remaining =
+ size_range_query_buff - bytes_used_in_range_query_buff;
uint32_t size_needed;
uint32_t user_defined_size = tokudb::sysvars::read_buf_size(thd);
uchar* curr_pos = NULL;
@@ -5154,8 +5227,7 @@ int ha_tokudb::fill_range_query_buf(
int cmp = tokudb_prefix_cmp_dbt_key(
share->key_file[tokudb_active_index],
key_to_compare,
- key
- );
+ key);
if (cmp) {
icp_went_out_of_range = true;
error = 0;
@@ -5163,26 +5235,38 @@ int ha_tokudb::fill_range_query_buf(
}
}
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
// if we have an index condition pushed down, we check it
- if (toku_pushed_idx_cond && (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
+ if (toku_pushed_idx_cond &&
+ (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
unpack_key(buf, key, tokudb_active_index);
- enum icp_result result = toku_handler_index_cond_check(toku_pushed_idx_cond);
+ enum icp_result result =
+ toku_handler_index_cond_check(toku_pushed_idx_cond);
+
// If we have reason to stop, we set icp_went_out_of_range and get out
+ // otherwise, if we simply see that the current key is no match,
+ // we tell the cursor to continue and don't store
+ // the key locally
if (result == ICP_OUT_OF_RANGE || thd_killed(thd)) {
icp_went_out_of_range = true;
error = 0;
+ DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range");
goto cleanup;
- }
- // otherwise, if we simply see that the current key is no match,
- // we tell the cursor to continue and don't store
- // the key locally
- else if (result == ICP_NO_MATCH) {
+ } else if (result == ICP_NO_MATCH) {
+ // if we are performing a DESC ICP scan and have no end_range
+ // to compare to stop using ICP filtering as there isn't much more
+ // that we can do without going through contortions with remembering
+ // and comparing key parts.
+ if (!end_range &&
+ direction < 0) {
+
+ cancel_pushed_idx_cond();
+ DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate");
+ }
+
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
}
-#endif
// at this point, if ICP is on, we have verified that the key is one
// we are interested in, so we proceed with placing the data
@@ -5191,57 +5275,63 @@ int ha_tokudb::fill_range_query_buf(
if (need_val) {
if (unpack_entire_row) {
size_needed = 2*sizeof(uint32_t) + key->size + row->size;
- }
- else {
+ } else {
// this is an upper bound
- size_needed = sizeof(uint32_t) + // size of key length
- key->size + row->size + //key and row
- num_var_cols_for_query*(sizeof(uint32_t)) + //lengths of varchars stored
- sizeof(uint32_t); //length of blobs
+ size_needed =
+ // size of key length
+ sizeof(uint32_t) +
+ // key and row
+ key->size + row->size +
+ // lengths of varchars stored
+ num_var_cols_for_query * (sizeof(uint32_t)) +
+ // length of blobs
+ sizeof(uint32_t);
}
- }
- else {
+ } else {
size_needed = sizeof(uint32_t) + key->size;
}
if (size_remaining < size_needed) {
- range_query_buff = (uchar *)tokudb::memory::realloc(
- (void *)range_query_buff,
- bytes_used_in_range_query_buff+size_needed,
- MYF(MY_WME)
- );
+ range_query_buff =
+ static_cast<uchar*>(tokudb::memory::realloc(
+ static_cast<void*>(range_query_buff),
+ bytes_used_in_range_query_buff + size_needed,
+ MYF(MY_WME)));
if (range_query_buff == NULL) {
error = ENOMEM;
invalidate_bulk_fetch();
goto cleanup;
}
- size_range_query_buff = bytes_used_in_range_query_buff+size_needed;
+ size_range_query_buff = bytes_used_in_range_query_buff + size_needed;
}
//
// now we know we have the size, let's fill the buffer, starting with the key
//
curr_pos = range_query_buff + bytes_used_in_range_query_buff;
- *(uint32_t *)curr_pos = key->size;
+ *reinterpret_cast<uint32_t*>(curr_pos) = key->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, key->data, key->size);
curr_pos += key->size;
if (need_val) {
if (unpack_entire_row) {
- *(uint32_t *)curr_pos = row->size;
+ *reinterpret_cast<uint32_t*>(curr_pos) = row->size;
curr_pos += sizeof(uint32_t);
memcpy(curr_pos, row->data, row->size);
curr_pos += row->size;
- }
- else {
+ } else {
// need to unpack just the data we care about
- const uchar* fixed_field_ptr = (const uchar *) row->data;
+ const uchar* fixed_field_ptr = static_cast<const uchar*>(row->data);
fixed_field_ptr += table_share->null_bytes;
const uchar* var_field_offset_ptr = NULL;
const uchar* var_field_data_ptr = NULL;
- var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
- var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
+ var_field_offset_ptr =
+ fixed_field_ptr +
+ share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
+ var_field_data_ptr =
+ var_field_offset_ptr +
+ share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
// first the null bytes
memcpy(curr_pos, row->data, table_share->null_bytes);
@@ -5255,8 +5345,7 @@ int ha_tokudb::fill_range_query_buf(
memcpy(
curr_pos,
fixed_field_ptr + share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val,
- share->kc_info.field_lengths[field_index]
- );
+ share->kc_info.field_lengths[field_index]);
curr_pos += share->kc_info.field_lengths[field_index];
}
@@ -5265,7 +5354,8 @@ int ha_tokudb::fill_range_query_buf(
//
for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
uint field_index = var_cols_for_query[i];
- uint32_t var_field_index = share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
+ uint32_t var_field_index =
+ share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
uint32_t data_start_offset;
uint32_t field_len;
@@ -5274,11 +5364,13 @@ int ha_tokudb::fill_range_query_buf(
&data_start_offset,
var_field_index,
var_field_offset_ptr,
- share->kc_info.num_offset_bytes
- );
+ share->kc_info.num_offset_bytes);
memcpy(curr_pos, &field_len, sizeof(field_len));
curr_pos += sizeof(field_len);
- memcpy(curr_pos, var_field_data_ptr + data_start_offset, field_len);
+ memcpy(
+ curr_pos,
+ var_field_data_ptr + data_start_offset,
+ field_len);
curr_pos += field_len;
}
@@ -5292,9 +5384,12 @@ int ha_tokudb::fill_range_query_buf(
&blob_offset,
share->kc_info.mcp_info[tokudb_active_index].len_of_offsets,
var_field_data_ptr,
- share->kc_info.num_offset_bytes
- );
- data_size = row->size - blob_offset - (uint32_t)(var_field_data_ptr - (const uchar *)row->data);
+ share->kc_info.num_offset_bytes);
+ data_size =
+ row->size -
+ blob_offset -
+ static_cast<uint32_t>((var_field_data_ptr -
+ static_cast<const uchar*>(row->data)));
memcpy(curr_pos, &data_size, sizeof(data_size));
curr_pos += sizeof(data_size);
memcpy(curr_pos, var_field_data_ptr + blob_offset, data_size);
@@ -5328,7 +5423,9 @@ int ha_tokudb::fill_range_query_buf(
}
}
- if (bytes_used_in_range_query_buff + table_share->rec_buff_length > user_defined_size) {
+ if (bytes_used_in_range_query_buff +
+ table_share->rec_buff_length >
+ user_defined_size) {
error = 0;
goto cleanup;
}
@@ -5346,11 +5443,9 @@ int ha_tokudb::fill_range_query_buf(
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
- &right_range
- );
+ &right_range);
error = (cmp > 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
- }
- else {
+ } else {
// compare what we got to the left endpoint of prelocked range
// because we are searching keys in descending order
if (prelocked_left_range_size == 0) {
@@ -5364,15 +5459,19 @@ int ha_tokudb::fill_range_query_buf(
int cmp = tokudb_cmp_dbt_key(
share->key_file[tokudb_active_index],
key,
- &left_range
- );
+ &left_range);
error = (cmp < 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
}
cleanup:
return error;
}
-int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read) {
+int ha_tokudb::get_next(
+ uchar* buf,
+ int direction,
+ DBT* key_to_compare,
+ bool do_key_read) {
+
int error = 0;
HANDLE_INVALID_CURSOR();
@@ -5389,17 +5488,18 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// we need to read the val of what we retrieve if
// we do NOT have a covering index AND we are using a clustering secondary
// key
- bool need_val = (do_key_read == 0) &&
- (tokudb_active_index == primary_key || key_is_clustering(&table->key_info[tokudb_active_index]));
+ bool need_val =
+ (do_key_read == 0) &&
+ (tokudb_active_index == primary_key ||
+ key_is_clustering(&table->key_info[tokudb_active_index]));
- if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
+ if ((bytes_used_in_range_query_buff -
+ curr_range_query_buff_offset) > 0) {
error = read_data_from_range_query_buff(buf, need_val, do_key_read);
- }
- else if (icp_went_out_of_range) {
+ } else if (icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
- }
- else {
+ } else {
invalidate_bulk_fetch();
if (doing_bulk_fetch) {
struct smart_dbt_bf_info bf_info;
@@ -5420,16 +5520,28 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// this while loop. icp_out_of_range will be set if we hit a row that
// the index condition states is out of our range. When that hits,
// we know all the data in the buffer is the last data we will retrieve
- while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) {
+ while (bytes_used_in_range_query_buff == 0 &&
+ !icp_went_out_of_range && error == 0) {
if (direction > 0) {
- error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info);
+ error =
+ cursor->c_getf_next(
+ cursor,
+ flags,
+ smart_dbt_bf_callback,
+ &bf_info);
} else {
- error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info);
+ error =
+ cursor->c_getf_prev(
+ cursor,
+ flags,
+ smart_dbt_bf_callback,
+ &bf_info);
}
}
// if there is no data set and we went out of range,
// then there is nothing to return
- if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) {
+ if (bytes_used_in_range_query_buff == 0 &&
+ icp_went_out_of_range) {
icp_went_out_of_range = false;
error = HA_ERR_END_OF_FILE;
}
@@ -5437,26 +5549,46 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
bulk_fetch_iteration++;
}
- error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index);
- if (error) { goto cleanup; }
+ error =
+ handle_cursor_error(
+ error,
+ HA_ERR_END_OF_FILE,
+ tokudb_active_index);
+ if (error) {
+ goto cleanup;
+ }
//
// now that range_query_buff is filled, read an element
//
- error = read_data_from_range_query_buff(buf, need_val, do_key_read);
- }
- else {
+ error =
+ read_data_from_range_query_buff(buf, need_val, do_key_read);
+ } else {
struct smart_dbt_info info;
info.ha = this;
info.buf = buf;
info.keynr = tokudb_active_index;
if (direction > 0) {
- error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
+ error =
+ cursor->c_getf_next(
+ cursor,
+ flags,
+ SMART_DBT_CALLBACK(do_key_read),
+ &info);
} else {
- error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info);
+ error =
+ cursor->c_getf_prev(
+ cursor,
+ flags,
+ SMART_DBT_CALLBACK(do_key_read),
+ &info);
}
- error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
+ error =
+ handle_cursor_error(
+ error,
+ HA_ERR_END_OF_FILE,
+ tokudb_active_index);
}
}
}
@@ -5469,13 +5601,17 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
// read the full row by doing a point query into the
// main table.
//
- if (!error && !do_key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
+ if (!error &&
+ !do_key_read &&
+ (tokudb_active_index != primary_key) &&
+ !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
if (!error) {
THD *thd = ha_thd();
- tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
+ tokudb_trx_data* trx =
+ static_cast<tokudb_trx_data*>(thd_get_ha_data(thd, tokudb_hton));
trx->stmt_progress.queried++;
track_progress(thd);
if (thd_killed(thd))
@@ -5980,7 +6116,6 @@ int ha_tokudb::info(uint flag) {
#endif
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
- // Just to get optimizations right
stats.records = share->row_count() + share->rows_from_locked_table;
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
@@ -6080,6 +6215,22 @@ int ha_tokudb::info(uint flag) {
stats.delete_length += frag_info.unused_bytes;
}
}
+
+ /*
+ The following comment and logic has been taken from InnoDB and
+ an old hack was removed that forced to always set stats.records > 0
+ ---
+ The MySQL optimizer seems to assume in a left join that n_rows
+ is an accurate estimate if it is zero. Of course, it is not,
+ since we do not have any locks on the rows yet at this phase.
+ Since SHOW TABLE STATUS seems to call this function with the
+ HA_STATUS_TIME flag set, while the left join optimizer does not
+ set that flag, we add one to a zero value if the flag is not
+ set. That way SHOW TABLE STATUS will show the best estimate,
+ while the optimizer never sees the table empty. */
+ if (stats.records == 0 && !(flag & HA_STATUS_TIME)) {
+ stats.records++;
+ }
}
if ((flag & HA_STATUS_CONST)) {
stats.max_data_file_length = 9223372036854775807ULL;
@@ -6780,9 +6931,9 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
"key:%d:%s:%d",
i,
key->name,
- get_key_parts(key));
+ key->user_defined_key_parts);
uint p;
- for (p = 0; p < get_key_parts(key); p++) {
+ for (p = 0; p < key->user_defined_key_parts; p++) {
KEY_PART_INFO* key_part = &key->key_part[p];
Field* field = key_part->field;
TOKUDB_HANDLER_TRACE(
@@ -8566,6 +8717,10 @@ int ha_tokudb::delete_all_rows_internal() {
uint curr_num_DBs = 0;
DB_TXN* txn = NULL;
+ // this should be enough to handle locking as the higher level MDL
+ // on this table should prevent any new analyze tasks.
+ share->cancel_background_jobs();
+
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) {
goto cleanup;
@@ -8593,6 +8748,8 @@ int ha_tokudb::delete_all_rows_internal() {
}
}
+ DEBUG_SYNC(ha_thd(), "tokudb_after_truncate_all_dictionarys");
+
// zap the row count
if (error == 0) {
share->set_row_count(0, false);
@@ -8690,6 +8847,11 @@ Item* ha_tokudb::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) {
return idx_cond_arg;
}
+void ha_tokudb::cancel_pushed_idx_cond() {
+ invalidate_icp();
+ handler::cancel_pushed_idx_cond();
+}
+
void ha_tokudb::cleanup_txn(DB_TXN *txn) {
if (transaction == txn && cursor) {
int r = cursor->c_close(cursor);
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 1132b99741d..3d7a3a7fa05 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -61,9 +61,9 @@ typedef struct loader_context {
class TOKUDB_SHARE {
public:
enum share_state_t {
- CLOSED,
- OPENED,
- ERROR
+ CLOSED = 0,
+ OPENED = 1,
+ ERROR = 2
};
// one time, start up init
@@ -88,6 +88,9 @@ public:
// exactly 0 _use_count
static void drop_share(TOKUDB_SHARE* share);
+ // returns state string for logging/reporting
+ static const char* get_state_string(share_state_t state);
+
void* operator new(size_t sz);
void operator delete(void* p);
@@ -306,7 +309,6 @@ private:
// cardinality counts
uint32_t _rec_per_keys;
uint64_t* _rec_per_key;
- bool _card_changed;
void init(const char* table_name);
void destroy();
@@ -315,17 +317,34 @@ inline int TOKUDB_SHARE::use_count() const {
return _use_count;
}
inline void TOKUDB_SHARE::lock() const {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
_mutex.lock();
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline void TOKUDB_SHARE::unlock() const {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count);
_mutex.unlock();
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline TOKUDB_SHARE::share_state_t TOKUDB_SHARE::state() const {
return _state;
}
inline void TOKUDB_SHARE::set_state(TOKUDB_SHARE::share_state_t state) {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:new_state[%s]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count,
+ get_state_string(state));
+
assert_debug(_mutex.is_owned_by_me());
_state = state;
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline const char* TOKUDB_SHARE::full_table_name() const {
return _full_table_name.ptr();
@@ -346,6 +365,13 @@ inline uint TOKUDB_SHARE::table_name_length() const {
return _table_name.length();
}
inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
+ TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:rows[%" PRIu64 "]:locked[%d]",
+ _full_table_name.ptr(),
+ get_state_string(_state),
+ _use_count,
+ rows,
+ locked);
+
if (!locked) {
lock();
} else {
@@ -358,6 +384,7 @@ inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
if (!locked) {
unlock();
}
+ TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline ha_rows TOKUDB_SHARE::row_count() const {
return _rows;
@@ -371,7 +398,6 @@ inline void TOKUDB_SHARE::init_cardinality_counts(
assert_always(_rec_per_key == NULL && _rec_per_keys == 0);
_rec_per_keys = rec_per_keys;
_rec_per_key = rec_per_key;
- _card_changed = true;
}
inline void TOKUDB_SHARE::update_cardinality_counts(
uint32_t rec_per_keys,
@@ -382,7 +408,6 @@ inline void TOKUDB_SHARE::update_cardinality_counts(
assert_always(rec_per_keys == _rec_per_keys);
assert_always(rec_per_key != NULL);
memcpy(_rec_per_key, rec_per_key, _rec_per_keys * sizeof(uint64_t));
- _card_changed = true;
}
inline void TOKUDB_SHARE::disallow_auto_analysis() {
assert_debug(_mutex.is_owned_by_me());
@@ -878,9 +903,8 @@ public:
#endif
- // ICP introduced in MariaDB 5.5
Item* idx_cond_push(uint keyno, class Item* idx_cond);
-
+ void cancel_pushed_idx_cond();
#if TOKU_INCLUDE_ALTER_56
public:
@@ -966,13 +990,13 @@ public:
int fill_range_query_buf(
bool need_val,
- DBT const *key,
- DBT const *row,
+ DBT const* key,
+ DBT const* row,
int direction,
THD* thd,
uchar* buf,
- DBT* key_to_compare
- );
+ DBT* key_to_compare);
+
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type get_row_type() const;
#endif
@@ -982,9 +1006,7 @@ private:
int get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read);
int read_data_from_range_query_buff(uchar* buf, bool need_val, bool do_key_read);
// for ICP, only in MariaDB and MySQL 5.6
-#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
enum icp_result toku_handler_index_cond_check(Item* pushed_idx_cond);
-#endif
void invalidate_bulk_fetch();
void invalidate_icp();
int delete_all_rows_internal();
diff --git a/storage/tokudb/ha_tokudb_admin.cc b/storage/tokudb/ha_tokudb_admin.cc
index 7fa17945f15..db3d6c112d4 100644
--- a/storage/tokudb/ha_tokudb_admin.cc
+++ b/storage/tokudb/ha_tokudb_admin.cc
@@ -374,6 +374,7 @@ void standard_t::on_run() {
_local_txn = false;
}
+ assert_always(_share->key_file[0] != NULL);
_result = _share->key_file[0]->stat64(_share->key_file[0], _txn, &stat64);
if (_result != 0) {
_result = HA_ADMIN_FAILED;
@@ -575,6 +576,7 @@ int standard_t::analyze_key_progress(void) {
int standard_t::analyze_key(uint64_t* rec_per_key_part) {
int error = 0;
DB* db = _share->key_file[_current_key];
+ assert_always(db != NULL);
uint64_t num_key_parts = _share->_key_descriptors[_current_key]._parts;
uint64_t unique_rows[num_key_parts];
bool is_unique = _share->_key_descriptors[_current_key]._is_unique;
@@ -610,8 +612,8 @@ int standard_t::analyze_key(uint64_t* rec_per_key_part) {
analyze_standard_cursor_callback,
this);
- memset(&key, 0, sizeof(DBT)); key.flags = DB_DBT_REALLOC;
- memset(&prev_key, 0, sizeof(DBT)); prev_key.flags = DB_DBT_REALLOC;
+ memset(&key, 0, sizeof(DBT));
+ memset(&prev_key, 0, sizeof(DBT));
copy_key = true;
}
@@ -679,7 +681,6 @@ int standard_t::analyze_key(uint64_t* rec_per_key_part) {
_key_elapsed_time >= _half_time &&
_rows < _half_rows)) {
- tokudb::memory::free(key.data); key.data = NULL;
tokudb::memory::free(prev_key.data); prev_key.data = NULL;
close_error = cursor->c_close(cursor);
assert_always(close_error == 0);
@@ -688,7 +689,6 @@ int standard_t::analyze_key(uint64_t* rec_per_key_part) {
}
}
// cleanup
- if (key.data) tokudb::memory::free(key.data);
if (prev_key.data) tokudb::memory::free(prev_key.data);
if (cursor) close_error = cursor->c_close(cursor);
assert_always(close_error == 0);
@@ -770,10 +770,11 @@ int TOKUDB_SHARE::analyze_standard(THD* thd, DB_TXN* txn) {
int result = HA_ADMIN_OK;
// stub out analyze if optimize is remapped to alter recreate + analyze
- // when not auto analyze
- if (txn &&
- thd_sql_command(thd) != SQLCOM_ANALYZE &&
- thd_sql_command(thd) != SQLCOM_ALTER_TABLE) {
+ // when not auto analyze or if this is an alter
+ if ((txn &&
+ thd_sql_command(thd) != SQLCOM_ANALYZE &&
+ thd_sql_command(thd) != SQLCOM_ALTER_TABLE) ||
+ thd_sql_command(thd) == SQLCOM_ALTER_TABLE) {
TOKUDB_HANDLER_DBUG_RETURN(result);
}
@@ -897,6 +898,7 @@ int ha_tokudb::do_optimize(THD* thd) {
}
DB* db = share->key_file[i];
+ assert_always(db != NULL);
error = db->optimize(db);
if (error) {
goto cleanup;
@@ -1016,7 +1018,8 @@ int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) {
write_status_msg);
}
for (uint i = 0; i < num_DBs; i++) {
- DB *db = share->key_file[i];
+ DB* db = share->key_file[i];
+ assert_always(db != NULL);
const char* kname =
i == primary_key ? "primary" : table_share->key_info[i].name;
snprintf(
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc
index d7b3bcb802a..e005d678953 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -680,7 +680,7 @@ int ha_tokudb::alter_table_add_index(
KEY *key = &key_info[i];
*key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
for (KEY_PART_INFO* key_part = key->key_part;
- key_part < key->key_part + get_key_parts(key);
+ key_part < key->key_part + key->user_defined_key_parts;
key_part++) {
key_part->field = table->field[key_part->fieldnr];
}
@@ -1123,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
// Return true if a field is part of a key
static bool field_in_key(KEY *key, Field *field) {
- for (uint i = 0; i < get_key_parts(key); i++) {
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO *key_part = &key->key_part[i];
if (strcmp(key_part->field->field_name, field->field_name) == 0)
return true;
diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc
index c58f57b4da7..d41a676de1f 100644
--- a/storage/tokudb/ha_tokudb_alter_common.cc
+++ b/storage/tokudb/ha_tokudb_alter_common.cc
@@ -75,8 +75,8 @@ static bool tables_have_same_keys(
if (print_error) {
sql_print_error(
"keys disagree on if they are clustering, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key));
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
@@ -86,18 +86,19 @@ static bool tables_have_same_keys(
if (print_error) {
sql_print_error(
"keys disagree on if they are unique, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key));
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
}
- if (get_key_parts(curr_orig_key) != get_key_parts(curr_altered_key)) {
+ if (curr_orig_key->user_defined_key_parts !=
+ curr_altered_key->user_defined_key_parts) {
if (print_error) {
sql_print_error(
"keys have different number of parts, %d, %d",
- get_key_parts(curr_orig_key),
- get_key_parts(curr_altered_key));
+ curr_orig_key->user_defined_key_parts,
+ curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
@@ -105,7 +106,7 @@ static bool tables_have_same_keys(
//
// now verify that each field in the key is the same
//
- for (uint32_t j = 0; j < get_key_parts(curr_orig_key); j++) {
+ for (uint32_t j = 0; j < curr_orig_key->user_defined_key_parts; j++) {
KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j];
KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j];
Field* curr_orig_field = curr_orig_part->field;
diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc
index fabd1a82d0c..23de81f3d8a 100644
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@ -453,7 +453,7 @@ static bool check_all_update_expressions(
static bool full_field_in_key(TABLE* table, Field* field) {
assert_always(table->s->primary_key < table->s->keys);
KEY* key = &table->s->key_info[table->s->primary_key];
- for (uint i = 0; i < get_key_parts(key); i++) {
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name, key_part->field->field_name) == 0) {
return key_part->length == field->field_length;
@@ -519,7 +519,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false;
KEY *key = &table->s->key_info[table->s->primary_key];
- for (uint i = 0; i < get_key_parts(key); i++)
+ for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
switch (conds->type()) {
diff --git a/storage/tokudb/hatoku_cmp.cc b/storage/tokudb/hatoku_cmp.cc
index a5e0874505a..d400c3f7bd3 100644
--- a/storage/tokudb/hatoku_cmp.cc
+++ b/storage/tokudb/hatoku_cmp.cc
@@ -1010,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar* pos = buf;
uint32_t num_bytes_in_field = 0;
uint32_t charset_num = 0;
- for (uint i = 0; i < get_key_parts(key); i++){
+ for (uint i = 0; i < key->user_defined_key_parts; i++) {
Field* field = key->key_part[i].field;
//
// The first byte states if there is a null byte
@@ -1881,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info(
bool is_constant_offset = true;
uint32_t offset = 0;
- for (uint i = 0; i < get_key_parts(prim_key); i++) {
+ for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
KEY_PART_INFO curr = prim_key->key_part[i];
uint16 curr_field_index = curr.field->field_index;
@@ -2503,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// store number of parts
//
- assert_always(get_key_parts(prim_key) < 128);
- pos[0] = 2 * get_key_parts(prim_key);
+ assert_always(prim_key->user_defined_key_parts < 128);
+ pos[0] = 2 * prim_key->user_defined_key_parts;
pos++;
//
// for each part, store if it is a fixed field or var field
@@ -2514,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
pk_info = pos;
uchar* tmp = pos;
- for (uint i = 0; i < get_key_parts(prim_key); i++) {
+ for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
tmp += pack_desc_pk_info(
tmp,
kc_info,
@@ -2525,11 +2525,11 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// asserting that we moved forward as much as we think we have
//
- assert_always(tmp - pos == (2 * get_key_parts(prim_key)));
+ assert_always(tmp - pos == (2 * prim_key->user_defined_key_parts));
pos = tmp;
}
- for (uint i = 0; i < get_key_parts(key_info); i++) {
+ for (uint i = 0; i < key_info->user_defined_key_parts; i++) {
KEY_PART_INFO curr_kpi = key_info->key_part[i];
uint16 field_index = curr_kpi.field->field_index;
Field* field = table_share->field[field_index];
diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h
index 273ec2d0cee..25991e4c438 100644
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@ -36,10 +36,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "sql_class.h"
#include "sql_show.h"
#include "discover.h"
-
-#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
-#include <binlog.h>
-#endif
+//#include <binlog.h>
+#include "debug_sync.h"
#undef PACKAGE
#undef VERSION
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index 2fef0b81ee7..2a74cc47360 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -394,6 +394,16 @@ static int tokudb_init_func(void *p) {
db_env->set_errcall(db_env, tokudb_print_error);
db_env->set_errpfx(db_env, tokudb_hton_name);
+ // Handle deprecated options
+ if (tokudb::sysvars::pk_insert_mode(NULL) != 1) {
+ TOKUDB_TRACE("Using tokudb_pk_insert_mode is deprecated and the "
+ "parameter may be removed in future releases. "
+ "tokudb_pk_insert_mode=0 is now forbidden. "
+ "See documentation and release notes for details");
+ if (tokudb::sysvars::pk_insert_mode(NULL) < 1)
+ tokudb::sysvars::set_pk_insert_mode(NULL, 1);
+ }
+
//
// set default comparison functions
//
@@ -662,6 +672,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
// count the total number of prepared txn's that we discard
long total_prepared = 0;
#if TOKU_INCLUDE_XA
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "begin XA cleanup");
while (1) {
// get xid's
const long n_xid = 1;
@@ -686,6 +697,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
}
total_prepared += n_prepared;
}
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "end XA cleanup");
#endif
error = db_env->close(
db_env,
@@ -910,19 +922,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
#if TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
// skip sync of log if fsync log period > 0
- if (tokudb::sysvars::fsync_log_period > 0)
+ if (tokudb::sysvars::fsync_log_period > 0) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return false;
- else
+ } else {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return true;
+ }
}
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
// if tokudb_support_xa is disable, just return
if (!tokudb::sysvars::support_xa(thd)) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
@@ -932,7 +950,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
if (txn) {
uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
- TOKUDB_DEBUG_TXN,
+ TOKUDB_DEBUG_XA,
"doing txn prepare:%d:%p",
all,
txn);
@@ -945,15 +963,18 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE(););
} else {
- TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "nothing to prepare %d", all);
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "nothing to prepare %d", all);
}
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
if (len == 0 || xid_list == NULL) {
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", 0);
TOKUDB_DBUG_RETURN(0);
}
long num_returned = 0;
@@ -964,11 +985,13 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
&num_returned,
DB_NEXT);
assert_always(r == 0);
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %ld", num_returned);
TOKUDB_DBUG_RETURN((int)num_returned);
}
static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
@@ -981,11 +1004,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
@@ -998,6 +1023,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
+ TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h
index 37c61be849d..ade7be128a5 100644
--- a/storage/tokudb/hatoku_hton.h
+++ b/storage/tokudb/hatoku_hton.h
@@ -199,14 +199,4 @@ void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out);
const char *tokudb_get_index_name(DB* db);
-inline uint get_key_parts(const KEY *key) {
-#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
- (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
- (100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
- return key->user_defined_key_parts;
-#else
- return key->key_parts;
-#endif
-}
-
#endif //#ifdef _HATOKU_HTON
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
index f4c0279d2fa..59b05ea4dfb 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
@@ -1,4 +1,6 @@
SET SESSION tokudb_pk_insert_mode = 2;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
include/master-slave.inc
[connection master]
==========MASTER==========
diff --git a/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
index 388cf47d241..5769ee74071 100644
--- a/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
+++ b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
Binary files differ
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
index 190581eddae..6f9592ddc1f 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result
@@ -10,8 +10,10 @@ select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
set autocommit=0;
set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000;
insert into t values (1);
set autocommit=0;
+set tokudb_lock_timeout=600000;
insert into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
@@ -38,9 +40,11 @@ locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right lo
select * from information_schema.tokudb_lock_waits;
requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name
set autocommit=0;
+set tokudb_lock_timeout=600000;
set tokudb_prelock_empty=OFF;
replace into t values (1);
set autocommit=0;
+set tokudb_lock_timeout=600000;
replace into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
index 13cdad7a438..ce8f7d2d7ec 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result
@@ -12,7 +12,9 @@ set autocommit=0;
set tokudb_prelock_empty=OFF;
insert into t values (1);
set autocommit=0;
-insert into t values (1);
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=60000;
+replace into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
index a07f7ba52fe..070f42b30de 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result
@@ -3,8 +3,8 @@ set tokudb_prelock_empty=false;
drop table if exists t;
create table t (id int primary key);
set autocommit=0;
-select * from information_schema.tokudb_locks;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
insert into t values (1);
insert into t values (3);
insert into t values (5);
@@ -12,17 +12,17 @@ set autocommit=0;
insert into t values (2);
insert into t values (4);
insert into t values (6);
-select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
-TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0003000000 0003000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0005000000 0005000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0002000000 0002000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0004000000 0004000000 test t main
-TRX_ID MYSQL_ID ./test/t-main 0006000000 0006000000 test t main
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+./test/t-main 0001000000 0001000000 test t main
+./test/t-main 0002000000 0002000000 test t main
+./test/t-main 0003000000 0003000000 test t main
+./test/t-main 0004000000 0004000000 test t main
+./test/t-main 0005000000 0005000000 test t main
+./test/t-main 0006000000 0006000000 test t main
commit;
commit;
-select * from information_schema.tokudb_locks;
-locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
+locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
commit;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
index 0a5862e9322..aa58437fc69 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result
@@ -9,6 +9,8 @@ set autocommit=0;
set tokudb_prelock_empty=OFF;
insert into t values (1);
set autocommit=0;
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000;
insert into t values (1);
select * from information_schema.tokudb_locks;
locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name
diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
index 63e4816e16e..3a9a936a7a6 100644
--- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
+++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_trx.result
@@ -1,23 +1,26 @@
set default_storage_engine='tokudb';
set tokudb_prelock_empty=false;
drop table if exists t;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
set autocommit=0;
create table t (id int primary key);
insert into t values (1);
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
-trx_id trx_mysql_thread_id
-TXN_ID_DEFAULT CLIENT_ID_DEFAULT
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+1
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
set autocommit=0;
insert into t values (2);
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
-trx_id trx_mysql_thread_id
-TXN_ID_A CLIENT_ID_A
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+1
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
+count(trx_mysql_thread_id)
+0
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
trx_id trx_mysql_thread_id
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/t/disabled.def b/storage/tokudb/mysql-test/tokudb/t/disabled.def
index 36e63bddab0..f7413a0edc5 100644
--- a/storage/tokudb/mysql-test/tokudb/t/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb/t/disabled.def
@@ -2,25 +2,27 @@ mvcc-19: tokutek
mvcc-20: tokutek
mvcc-27: tokutek
storage_engine_default: tokudb is not the default storage engine
-fast_update_blobs : https://tokutek.atlassian.net/browse/DB-871
+fast_update_binlog_mixed : https://tokutek.atlassian.net/browse/DB-871
+fast_update_binlog_row : https://tokutek.atlassian.net/browse/DB-871
+fast_update_binlog_statement : https://tokutek.atlassian.net/browse/DB-871
fast_update_blobs_fixed_varchar : https://tokutek.atlassian.net/browse/DB-871
+fast_update_blobs : https://tokutek.atlassian.net/browse/DB-871
fast_update_blobs_with_varchar : https://tokutek.atlassian.net/browse/DB-871
fast_update_char : https://tokutek.atlassian.net/browse/DB-871
-fast_update_decr_floor : https://tokutek.atlassian.net/browse/DB-871
-fast_update_int : https://tokutek.atlassian.net/browse/DB-871
-fast_update_int_bounds : https://tokutek.atlassian.net/browse/DB-871
-fast_update_uint_bounds : https://tokutek.atlassian.net/browse/DB-871
-fast_update_varchar : https://tokutek.atlassian.net/browse/DB-871
-fast_upsert_char : https://tokutek.atlassian.net/browse/DB-871
-fast_upsert_int : https://tokutek.atlassian.net/browse/DB-871
-fast_update_binlog_statement : https://tokutek.atlassian.net/browse/DB-871
fast_update_deadlock : https://tokutek.atlassian.net/browse/DB-871
-fast_update_error : https://tokutek.atlassian.net/browse/DB-871
+fast_update_decr_floor : https://tokutek.atlassian.net/browse/DB-871
fast_update_disable_slow_update : https://tokutek.atlassian.net/browse/DB-871
+fast_update_error : https://tokutek.atlassian.net/browse/DB-871
+fast_update_int_bounds : https://tokutek.atlassian.net/browse/DB-871
+fast_update_int : https://tokutek.atlassian.net/browse/DB-871
fast_update_key : https://tokutek.atlassian.net/browse/DB-871
fast_update_sqlmode : https://tokutek.atlassian.net/browse/DB-871
+fast_update_uint_bounds : https://tokutek.atlassian.net/browse/DB-871
+fast_update_varchar : https://tokutek.atlassian.net/browse/DB-871
fast_upsert_bin_pad : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_char : https://tokutek.atlassian.net/browse/DB-871
fast_upsert_deadlock : https://tokutek.atlassian.net/browse/DB-871
+fast_upsert_int : https://tokutek.atlassian.net/browse/DB-871
fast_upsert_key : https://tokutek.atlassian.net/browse/DB-871
fast_upsert_sqlmode : https://tokutek.atlassian.net/browse/DB-871
fast_upsert_values : https://tokutek.atlassian.net/browse/DB-871
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
index 3fd6ddb88fe..012c0af63b5 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test
@@ -25,10 +25,12 @@ select * from information_schema.tokudb_lock_waits;
connect (conn_a,localhost,root,,);
set autocommit=0;
set tokudb_prelock_empty=OFF; # disable the bulk loader
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send insert into t values (1);
# should find the presence of a lock on 1st transaction
@@ -74,11 +76,13 @@ select * from information_schema.tokudb_lock_waits;
connect (conn_a,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
set tokudb_prelock_empty=OFF; # disable the bulk loader
replace into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send replace into t values (1);
# should find the presence of a lock on 1st transaction
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
index 06923d4ca58..42fb548814f 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test
@@ -21,11 +21,14 @@ insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
-send insert into t values (1);
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=60000; # set lock wait timeout to 1 minute
+
+send replace into t values (1);
# should find the presence of a lock on 1st transaction
connection default;
-let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update';
+let $wait_condition= select count(*)=1 from information_schema.processlist where info='replace into t values (1)' and state='update';
source include/wait_condition.inc;
real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race
@@ -41,7 +44,9 @@ replace_column 1 TRX_ID 2 MYSQL_ID;
select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
connection conn_a;
-sleep 5; # sleep longer than the lock timer to force a lock timeout on txn_b
+real_sleep 45; # sleep till we get close to timeout since wait_condidion will timeout @ 30 seconds
+let $wait_condition= select count(*)=0 from information_schema.processlist where info='replace into t values (1)' and state='update';
+source include/wait_condition.inc;
commit;
# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
index e5a67559b1a..8f205ad7f45 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test
@@ -12,7 +12,7 @@ set autocommit=0;
let $default_id=`select connection_id()`;
# should be empty
-select * from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
insert into t values (1);
insert into t values (3);
@@ -28,8 +28,7 @@ insert into t values (6);
# should find 3 locks for 2 transactions
connection default;
-replace_column 1 TRX_ID 2 MYSQL_ID;
-eval select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left;
+eval select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
connection conn_a;
commit;
@@ -37,9 +36,9 @@ connection default;
commit;
# should be empty
-select * from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right,locks_table_schema,locks_table_name,locks_table_dictionary_name from information_schema.tokudb_locks where locks_table_schema='test' and locks_table_name='t' and locks_table_dictionary_name='main' order by locks_key_left, locks_key_right;
commit;
disconnect conn_a;
-drop table t; \ No newline at end of file
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
index 19f413e79f9..bbb0533e784 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test
@@ -27,6 +27,8 @@ insert into t values (1);
connect (conn_b,localhost,root,,);
set autocommit=0;
+set tokudb_prelock_empty=OFF;
+set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes
send insert into t values (1);
diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
index d3c2636ba54..517280391c4 100644
--- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
+++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test
@@ -8,35 +8,32 @@ drop table if exists t;
enable_warnings;
# should be empty
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
# should have my txn
-let $default_id=`select connection_id()`;
set autocommit=0;
create table t (id int primary key);
insert into t values (1);
-replace_column 1 TXN_ID_DEFAULT 2 CLIENT_ID_DEFAULT;
-eval select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
# should be empty
commit;
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connect(conn_a,localhost,root,,);
-let a_id=`select connection_id()`;
set autocommit=0;
insert into t values (2);
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connection default;
-replace_column 1 TXN_ID_A 2 CLIENT_ID_A;
-eval select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select count(trx_mysql_thread_id) from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
connection conn_a;
commit;
connection default;
# should be empty
-select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx;
+select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx where trx_mysql_thread_id in(connection_id());
disconnect conn_a;
-drop table t; \ No newline at end of file
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result b/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
index 608afa00370..1008764148b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/5585.result
@@ -10,12 +10,6 @@ insert into t1 select t1.file_id+40, t1.file_number+40 from t1;
insert into t1 select t1.file_id+100, t1.file_number+100 from t1;
insert into t1 select t1.file_id+200, t1.file_number+200 from t1;
insert into t1 select t1.file_id+400, t1.file_number+400 from t1;
-insert into t1 select t1.file_id+1000, t1.file_number+1000 from t1;
-insert into t1 select t1.file_id+10000, t1.file_number+10000 from t1;
-insert into t1 select t1.file_id+100000, t1.file_number+100000 from t1;
-insert into t1 select t1.file_id+1000000, t1.file_number+1000000 from t1;
-insert into t1 select t1.file_id+10000000, t1.file_number+10000000 from t1;
-insert into t1 select t1.file_id+100000000, t1.file_number+100000000 from t1;
create table t2 (
file_id bigint unsigned not null,
country char(2) not null,
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
new file mode 100644
index 00000000000..e5808f52e69
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result
@@ -0,0 +1,37 @@
+SET SESSION tokudb_auto_analyze = 0;
+SET SESSION tokudb_analyze_in_background = 0;
+CREATE TABLE t1(
+`id` int(10) unsigned NOT NULL,
+`k` int(10) unsigned NOT NULL DEFAULT '0',
+`c` char(120) NOT NULL DEFAULT '',
+`pad` char(60) NOT NULL DEFAULT '',
+KEY `xid` (`id`),
+KEY `k` (`k`)
+) ENGINE=TokuDB DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4, '4', '4'),
+(5, 5, '5', '5'), (6, 6, '6', '6'), (6, 6, '6', '6'), (7, 7, '7', '7'),
+(8, 8, '8', '8'), (9, 9, '9', '9'), (10, 10, '10', '10'), (11, 11, '11', '11');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
+SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
+set DEBUG_SYNC = 'now WAIT_FOR hit1';
+set DEBUG_SYNC = 'now SIGNAL done1';
+c
+8
+7
+6
+6
+5
+set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
+SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
+set DEBUG_SYNC = 'now WAIT_FOR hit2';
+set DEBUG_SYNC = 'now SIGNAL done2';
+c
+5
+6
+6
+7
+8
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
index da82fa445e8..c8565fb4b2b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_delete_trigger.result
@@ -25,11 +25,11 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE DEL
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
-./test/t1-main ff01000000 0101000000
./test/t1-main 0001000000 0001000000
+./test/t1-main ff01000000 0101000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after delete on t1
@@ -46,11 +46,11 @@ col1 action
0 DUMMY
1 BEFORE DEL
2 AFTER DELE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
-./test/t1-main ff02000000 0102000000
./test/t1-main 0002000000 0002000000
+./test/t1-main ff02000000 0102000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
index 41765a6fcd3..aef99a9adcd 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_insert_trigger.result
@@ -25,10 +25,10 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE INSERT
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
./test/t1-main 0001000000 0001000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after insert on t1
@@ -46,10 +46,10 @@ col1 action
0 DUMMY
1 BEFORE INSERT
2 AFTER INSERT
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
./test/t1-main 0002000000 0002000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
index c197430ad25..d9b944d3849 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db397_update_trigger.result
@@ -25,11 +25,11 @@ select col1,action from t1_audit;
col1 action
0 DUMMY
1 BEFORE UPDATE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0200000000000000 0200000000000000
-./test/t1-main ff01000000 0101000000
./test/t1-main 0001000000 0001000000
+./test/t1-main ff01000000 0101000000
+./test/t1_audit-main 0200000000000000 0200000000000000
commit;
drop trigger t1_trigger;
create trigger t1_trigger after update on t1
@@ -48,11 +48,11 @@ col1 action
0 DUMMY
1 BEFORE UPDATE
2 AFTER UPDATE
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
locks_dname locks_key_left locks_key_right
-./test/t1_audit-main 0300000000000000 0300000000000000
-./test/t1-main ff02000000 0102000000
./test/t1-main 0002000000 0002000000
+./test/t1-main ff02000000 0102000000
+./test/t1_audit-main 0300000000000000 0300000000000000
commit;
drop trigger t1_trigger;
drop table t1, t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
index 2bf141add9a..58a4ed6708a 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db739_replace.result
@@ -100010,5 +100010,7 @@ insert into t (id,a) values (999,98);
insert into t (id,a) values (999,99);
delete from t where id=404;
set tokudb_pk_insert_mode=2;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
replace into t values (404,0,0,0);
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
index 9f452c3f318..54e85a4254c 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
@@ -17,5 +17,5 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
-t 1 x 1 x A 7 NULL NULL YES BTREE
+t 1 x 1 x A 3 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
index a828561081e..de82556aba2 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
@@ -46,5 +46,5 @@ show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A 9 NULL NULL YES BTREE
-t 1 y 1 y A 4 NULL NULL YES BTREE
+t 1 y 1 y A 9 NULL NULL YES BTREE
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
new file mode 100644
index 00000000000..9c29033429e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
@@ -0,0 +1,14 @@
+drop table if exists t1;
+set @orig_table_open_cache = @@global.table_open_cache;
+create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
+lock tables t1 read;
+set @@global.table_open_cache = 1;
+begin;
+insert into t1 values(1),(1);
+select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
+ERROR 42S22: Unknown column 'c' in 'where clause'
+create table t1(c1 binary (1), c2 varbinary(1));
+ERROR 42S01: Table 't1' already exists
+unlock tables;
+drop table t1;
+set @@global.table_open_cache = @orig_table_open_cache;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
new file mode 100644
index 00000000000..6ec3a2c8079
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
@@ -0,0 +1,34 @@
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+set session default_storage_engine = 'tokudb';
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = 1;
+set session tokudb_analyze_mode = tokudb_analyze_standard;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+set global tokudb_cardinality_scale_percent = DEFAULT;
+set global tokudb_debug_pause_background_job_manager = TRUE;
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+database_name table_name job_type job_params scheduler
+test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
+set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
+TRUNCATE TABLE t1;
+set global tokudb_debug_pause_background_job_manager = FALSE;
+set DEBUG_SYNC = 'now SIGNAL done';
+drop table t1;
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result
new file mode 100644
index 00000000000..b576ce3150d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db945.result
@@ -0,0 +1,12 @@
+set default_storage_engine='tokudb';
+drop table if exists t1;
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = true;
+set session tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+create table t1(a int, b text(1), c text(1), filler text(1), primary key(a, b(1)), unique key (a, c(1)));
+lock tables t1 write, t1 as a read, t1 as b read;
+insert into t1(a) values(1);
+alter table t1 drop key a;
+unlock tables;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result
index 2975d7d3116..6cc499389bb 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result
@@ -110,7 +110,7 @@ a b c d e
5 1 10 NULL NULL
show status like '%Handler_read_prev%';
Variable_name Value
-Handler_read_prev 41
+Handler_read_prev 799
flush status;
show status like '%Handler_read_prev%';
Variable_name Value
@@ -142,7 +142,7 @@ a b c d e
20 1 10 NULL NULL
show status like '%Handler_read_prev%';
Variable_name Value
-Handler_read_prev 21
+Handler_read_prev 399
flush status;
show status like '%Handler_read_next%';
Variable_name Value
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test b/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
index 6cc5fb223c0..2489748dfa1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/5585.test
@@ -19,12 +19,6 @@ insert into t1 select t1.file_id+40, t1.file_number+40 from t1;
insert into t1 select t1.file_id+100, t1.file_number+100 from t1;
insert into t1 select t1.file_id+200, t1.file_number+200 from t1;
insert into t1 select t1.file_id+400, t1.file_number+400 from t1;
-insert into t1 select t1.file_id+1000, t1.file_number+1000 from t1;
-insert into t1 select t1.file_id+10000, t1.file_number+10000 from t1;
-insert into t1 select t1.file_id+100000, t1.file_number+100000 from t1;
-insert into t1 select t1.file_id+1000000, t1.file_number+1000000 from t1;
-insert into t1 select t1.file_id+10000000, t1.file_number+10000000 from t1;
-insert into t1 select t1.file_id+100000000, t1.file_number+100000000 from t1;
create table t2 (
file_id bigint unsigned not null,
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
new file mode 100644
index 00000000000..8e4c3b73c09
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test
@@ -0,0 +1,70 @@
+# This test for DB-233 tests that icp descending range scans stop properly once
+# it fails to find a key match instead of continuing to scan all the way to the
+# beginning of the index.
+
+-- source include/have_tokudb.inc
+-- source include/have_debug.inc
+-- source include/have_debug_sync.inc
+
+-- enable_query_log
+
+SET SESSION tokudb_auto_analyze = 0;
+SET SESSION tokudb_analyze_in_background = 0;
+
+CREATE TABLE t1(
+ `id` int(10) unsigned NOT NULL,
+ `k` int(10) unsigned NOT NULL DEFAULT '0',
+ `c` char(120) NOT NULL DEFAULT '',
+ `pad` char(60) NOT NULL DEFAULT '',
+ KEY `xid` (`id`),
+ KEY `k` (`k`)
+) ENGINE=TokuDB DEFAULT CHARSET=latin1;
+
+INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4, '4', '4'),
+(5, 5, '5', '5'), (6, 6, '6', '6'), (6, 6, '6', '6'), (7, 7, '7', '7'),
+(8, 8, '8', '8'), (9, 9, '9', '9'), (10, 10, '10', '10'), (11, 11, '11', '11');
+
+ANALYZE TABLE t1;
+
+# lets flip to another connection
+connect(conn1, localhost, root);
+
+# set up the DEBUG_SYNC point
+set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
+
+# send the query
+send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
+
+# back to default connection
+connection default;
+
+# wait for the ICP reverse scan to invalidate
+set DEBUG_SYNC = 'now WAIT_FOR hit1';
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done1';
+
+connection conn1;
+reap;
+
+# set up the DEBUG_SYNC point again, but for the out of range
+set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
+
+# send the query
+send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
+
+# back to default connection
+connection default;
+
+# wait for the ICP reverse scan to invalidate
+set DEBUG_SYNC = 'now WAIT_FOR hit2';
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done2';
+
+connection conn1;
+reap;
+
+connection default;
+disconnect conn1;
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
index 00751ed2346..79043664607 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test
@@ -28,7 +28,7 @@ start transaction;
delete from t1 where col1 = 1;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -41,10 +41,10 @@ start transaction;
delete from t1 where col1 = 2;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
drop table t1;
-drop table t1_audit; \ No newline at end of file
+drop table t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
index f32496e524d..ffe2face9f2 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test
@@ -27,7 +27,7 @@ start transaction;
insert into t1 (col1, col2) values (1,1);
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -39,10 +39,10 @@ start transaction;
insert into t1 (col1, col2) values (2,2);
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# note the locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
drop table t1;
-drop table t1_audit; \ No newline at end of file
+drop table t1_audit;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
index f1407e6488a..063a88cb4ab 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test
@@ -29,7 +29,7 @@ start transaction;
update t1 set col2=1000 where col1 = 1;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# check locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
@@ -43,7 +43,7 @@ start transaction;
update t1 set col2=1001 where col1 = 2;
select col1,col2 from t1;
select col1,action from t1_audit;
-select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks;
+select locks_dname,locks_key_left,locks_key_right from information_schema.tokudb_locks order by locks_dname,locks_key_left,locks_key_right;
# check locks on t1 and t1_audit
commit;
drop trigger t1_trigger;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
new file mode 100644
index 00000000000..730c91ec838
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
@@ -0,0 +1,22 @@
+# test DB-917
+# test that table/share open lock timeout does not crash the server on subsequent access
+source include/have_tokudb.inc;
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+set @orig_table_open_cache = @@global.table_open_cache;
+create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
+lock tables t1 read;
+set @@global.table_open_cache = 1;
+begin;
+insert into t1 values(1),(1);
+# when the bug is present, this results in a lock wait timeout
+--error ER_BAD_FIELD_ERROR
+select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
+# when the bug exists, this results in the assertion
+# kc_info->cp_info[keynr] == NULL in tokudb/ha_tokudb.cc initialize_col_pack_info
+--error ER_TABLE_EXISTS_ERROR
+create table t1(c1 binary (1), c2 varbinary(1));
+unlock tables;
+drop table t1;
+set @@global.table_open_cache = @orig_table_open_cache;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
new file mode 100644
index 00000000000..f1912faad02
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
@@ -0,0 +1,76 @@
+# This test for DB-938 tests a race condition where a scheduled background job
+# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
+# were set to NULL during a TRUNCATE TABLE operation.
+
+-- source include/have_tokudb.inc
+-- source include/have_debug.inc
+-- source include/have_debug_sync.inc
+
+-- enable_query_log
+
+set @orig_auto_analyze = @@session.tokudb_auto_analyze;
+set @orig_in_background = @@session.tokudb_analyze_in_background;
+set @orig_mode = @@session.tokudb_analyze_mode;
+set @orig_throttle = @@session.tokudb_analyze_throttle;
+set @orig_time = @@session.tokudb_analyze_time;
+set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @orig_default_storage_engine = @@session.default_storage_engine;
+set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
+
+# first, lets set up to auto analyze in the background with about any activity
+set session default_storage_engine = 'tokudb';
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = 1;
+set session tokudb_analyze_mode = tokudb_analyze_standard;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+set global tokudb_cardinality_scale_percent = DEFAULT;
+
+# in debug build, we can prevent the background job manager from running,
+# let's do it to hold a job from running until we get the TRUNCATE TABLE
+# in action
+set global tokudb_debug_pause_background_job_manager = TRUE;
+
+create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
+
+insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
+
+# insert above should have triggered an analyze, but since the bjm is paused,
+# we will see it sitting in the queue
+select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
+
+# lets flip to another connection
+connect(conn1, localhost, root);
+
+# set up the DEBUG_SYNC point
+set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
+
+# send the truncat table
+send TRUNCATE TABLE t1;
+
+# back to default connection
+connection default;
+
+# release the bjm
+set global tokudb_debug_pause_background_job_manager = FALSE;
+
+# if the bug is present, the bjm should crash here within 1/4 of a second
+sleep 5;
+
+# lets release and clean up
+set DEBUG_SYNC = 'now SIGNAL done';
+
+connection conn1;
+reap;
+connection default;
+disconnect conn1;
+drop table t1;
+
+set session tokudb_auto_analyze = @orig_auto_analyze;
+set session tokudb_analyze_in_background = @orig_in_background;
+set session tokudb_analyze_mode = @orig_mode;
+set session tokudb_analyze_throttle = @orig_throttle;
+set session tokudb_analyze_time = @orig_time;
+set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set session default_storage_engine = @orig_default_storage_engine;
+set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
new file mode 100644
index 00000000000..27b0d284484
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test
@@ -0,0 +1,24 @@
+source include/have_tokudb.inc;
+set default_storage_engine='tokudb';
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+
+set session tokudb_auto_analyze = 1;
+set session tokudb_analyze_in_background = true;
+set session tokudb_analyze_mode = TOKUDB_ANALYZE_STANDARD;
+set session tokudb_analyze_throttle = 0;
+set session tokudb_analyze_time = 0;
+
+create table t1(a int, b text(1), c text(1), filler text(1), primary key(a, b(1)), unique key (a, c(1)));
+lock tables t1 write, t1 as a read, t1 as b read;
+insert into t1(a) values(1);
+alter table t1 drop key a;
+unlock tables;
+
+# wait for the bjm queue to empty
+-- disable_query_log
+let $wait_condition=select count(*)=0 from information_schema.tokudb_background_job_status;
+-- source include/wait_condition.inc
+
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
index 828cf03d2fd..e19291991b6 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
@@ -1,6 +1,12 @@
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
index 1642cb7ca1b..f1f96711b89 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
@@ -1,6 +1,12 @@
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
index 312c2d15cd2..c6c11f633e1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
@@ -1,5 +1,11 @@
--source include/have_tokudb.inc
--source include/have_debug.inc
+# Valgrind would report memory leaks on the intentional crashes
+-- source include/not_valgrind.inc
+# Embedded server does not support crashing
+-- source include/not_embedded.inc
+# Avoid CrashReporter popup on Mac
+-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def b/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def
index 68d7693612f..90e599cd035 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb_parts/t/disabled.def
@@ -1,4 +1,2 @@
partition_basic_symlink_tokudb : tokudb_file_per_table is not supported
partition_reorganize_tokudb : tokudb_file_per_table is not supported
-partition_mgm_lc0_tokudb : https://tokutek.atlassian.net/browse/DB-637
-partition_mgm_lc1_tokudb : https://tokutek.atlassian.net/browse/DB-637
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result b/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result
new file mode 100644
index 00000000000..981a833aea5
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/r/rpl_rfr_disable_on_expl_pk_absence.result
@@ -0,0 +1,47 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression("read free replication is disabled for tokudb table");
+CREATE TABLE t (a int(11), b char(20)) ENGINE = TokuDB;
+INSERT INTO t (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e');
+SELECT * FROM t;
+a b
+1 a
+2 b
+3 c
+4 d
+5 e
+UPDATE t SET a = a + 10 WHERE b = 'b';
+SELECT * FROM t;
+a b
+1 a
+12 b
+3 c
+4 d
+5 e
+SELECT * FROM t;
+a b
+1 a
+12 b
+3 c
+4 d
+5 e
+UPDATE t SET a = a + 10 WHERE b = 'b';
+SELECT * FROM t;
+a b
+1 a
+22 b
+3 c
+4 d
+5 e
+SELECT * FROM t;
+a b
+1 a
+22 b
+3 c
+4 d
+5 e
+DROP TABLE t;
+include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt
new file mode 100644
index 00000000000..fb12af6c5bd
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt
@@ -0,0 +1 @@
+--read-only=true --tokudb-rpl-unique-checks=false --tokudb-rpl-lookup-rows=false
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test
new file mode 100644
index 00000000000..67e77c1511c
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence.test
@@ -0,0 +1,48 @@
+# Test case for bug#1536663
+#
+# When read-free-replication is enabled for tokudb and there is no explicit
+# pk for replicated table there can be dublicated records in the table on
+# update operation.
+#
+# Consider this update operation:
+# UPDATE t SET a = a + 10 WHERE b = 'b';
+# The master does rows lookup and updates the rows which values correspond to
+# the condition. The update events are written to binary log with
+# rows values from the master. As rows lookup is forbidden for slave
+# the new rows are added instead of updating corresponding rows.
+#
+# Without the fix there will be several rows with b = 'b' in the table on slave
+# instead of one updated row.
+#
+
+--source include/have_tokudb.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+call mtr.add_suppression("read free replication is disabled for tokudb table");
+
+--connection master
+CREATE TABLE t (a int(11), b char(20)) ENGINE = TokuDB;
+INSERT INTO t (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e');
+
+--sync_slave_with_master
+--sorted_result
+SELECT * FROM t;
+
+--let $i = 2
+--while($i) {
+ --dec $i
+ --connection master
+ UPDATE t SET a = a + 10 WHERE b = 'b';
+ --sorted_result
+ SELECT * FROM t;
+ --sync_slave_with_master
+ --sorted_result
+ SELECT * FROM t;
+}
+
+--connection master
+DROP TABLE t;
+--sync_slave_with_master
+
+--source include/rpl_end.inc
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result
new file mode 100644
index 00000000000..268c4032626
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/r/tokudb_pk_insert_mode_basic.result
@@ -0,0 +1,85 @@
+SET @orig_global = @@global.tokudb_pk_insert_mode;
+SELECT @orig_global;
+@orig_global
+1
+SET @orig_session = @@session.tokudb_pk_insert_mode;
+SELECT @orig_session;
+@orig_session
+1
+SET GLOBAL tokudb_pk_insert_mode = 10;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '10'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+2
+SET GLOBAL tokudb_pk_insert_mode = 0;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode=0 is deprecated and the parameter may be removed in future releases. Only tokudb_pk_insert_mode=1|2 is allowed.Resettig the value to 1.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = DEFAULT;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_pk_insert_mode'
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = 10;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '10'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+2
+SET SESSION tokudb_pk_insert_mode = 0;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode=0 is deprecated and the parameter may be removed in future releases. Only tokudb_pk_insert_mode=1|2 is allowed.Resettig the value to 1.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = DEFAULT;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET SESSION tokudb_pk_insert_mode = 'foobar';
+ERROR 42000: Incorrect argument type to variable 'tokudb_pk_insert_mode'
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = 12;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '12'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SET SESSION tokudb_pk_insert_mode = 13;
+Warnings:
+Warning 1292 Truncated incorrect tokudb_pk_insert_mode value: '13'
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+2
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+2
+SHOW VARIABLES LIKE 'tokudb_pk_insert_mode';
+Variable_name Value
+tokudb_pk_insert_mode 2
+SET SESSION tokudb_pk_insert_mode = @orig_session;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@session.tokudb_pk_insert_mode;
+@@session.tokudb_pk_insert_mode
+1
+SET GLOBAL tokudb_pk_insert_mode = @orig_global;
+Warnings:
+Warning 131 Using tokudb_pk_insert_mode is deprecated and the parameter may be removed in future releases.
+SELECT @@global.tokudb_pk_insert_mode;
+@@global.tokudb_pk_insert_mode
+1
diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test
new file mode 100644
index 00000000000..1669c7842a9
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/tokudb_pk_insert_mode_basic.test
@@ -0,0 +1,51 @@
+--source include/have_tokudb.inc
+--enable_warnings
+
+# Check the default value
+SET @orig_global = @@global.tokudb_pk_insert_mode;
+SELECT @orig_global;
+
+SET @orig_session = @@session.tokudb_pk_insert_mode;
+SELECT @orig_session;
+
+# Test global
+SET GLOBAL tokudb_pk_insert_mode = 10;
+SELECT @@global.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = 0;
+SELECT @@global.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = DEFAULT;
+SELECT @@global.tokudb_pk_insert_mode;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET GLOBAL tokudb_pk_insert_mode = 'foobar';
+SELECT @@global.tokudb_pk_insert_mode;
+
+# Test session
+SET SESSION tokudb_pk_insert_mode = 10;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET SESSION tokudb_pk_insert_mode = 0;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET SESSION tokudb_pk_insert_mode = DEFAULT;
+SELECT @@session.tokudb_pk_insert_mode;
+
+-- error ER_WRONG_TYPE_FOR_VAR
+SET SESSION tokudb_pk_insert_mode = 'foobar';
+SELECT @@session.tokudb_pk_insert_mode;
+
+# both
+SET GLOBAL tokudb_pk_insert_mode = 12;
+SET SESSION tokudb_pk_insert_mode = 13;
+SELECT @@global.tokudb_pk_insert_mode;
+SELECT @@session.tokudb_pk_insert_mode;
+SHOW VARIABLES LIKE 'tokudb_pk_insert_mode';
+
+# Clean up
+SET SESSION tokudb_pk_insert_mode = @orig_session;
+SELECT @@session.tokudb_pk_insert_mode;
+
+SET GLOBAL tokudb_pk_insert_mode = @orig_global;
+SELECT @@global.tokudb_pk_insert_mode;
diff --git a/storage/tokudb/tokudb_card.h b/storage/tokudb/tokudb_card.h
index fdf18d4ab12..f649c2d887f 100644
--- a/storage/tokudb/tokudb_card.h
+++ b/storage/tokudb/tokudb_card.h
@@ -27,7 +27,7 @@ namespace tokudb {
uint compute_total_key_parts(TABLE_SHARE *table_share) {
uint total_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
- total_key_parts += get_key_parts(&table_share->key_info[i]);
+ total_key_parts += table_share->key_info[i].user_defined_key_parts;
}
return total_key_parts;
}
@@ -156,13 +156,14 @@ namespace tokudb {
uint orig_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
orig_key_offset[i] = orig_key_parts;
- orig_key_parts += get_key_parts(&table_share->key_info[i]);
+ orig_key_parts += table_share->key_info[i].user_defined_key_parts;
}
// if orig card data exists, then use it to compute new card data
if (error == 0) {
uint next_key_parts = 0;
for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) {
- uint ith_key_parts = get_key_parts(&altered_table_share->key_info[i]);
+ uint ith_key_parts =
+ altered_table_share->key_info[i].user_defined_key_parts;
uint orig_key_index;
if (find_index_of_key(
altered_table_share->key_info[i].name,
diff --git a/storage/tokudb/tokudb_debug.h b/storage/tokudb/tokudb_debug.h
index db66cab050c..46bd65c605a 100644
--- a/storage/tokudb/tokudb_debug.h
+++ b/storage/tokudb/tokudb_debug.h
@@ -50,6 +50,8 @@ static void tokudb_backtrace(void);
#define TOKUDB_DEBUG_UPSERT (1<<12)
#define TOKUDB_DEBUG_CHECK (1<<13)
#define TOKUDB_DEBUG_ANALYZE (1<<14)
+#define TOKUDB_DEBUG_XA (1<<15)
+#define TOKUDB_DEBUG_SHARE (1<<16)
#define TOKUDB_TRACE(_fmt, ...) { \
fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \
@@ -124,7 +126,6 @@ static void tokudb_backtrace(void);
DBUG_RETURN(r); \
}
-
#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
TOKUDB_HANDLER_TRACE("return"); \
@@ -132,6 +133,61 @@ static void tokudb_backtrace(void);
DBUG_VOID_RETURN; \
}
+#define TOKUDB_SHARE_TRACE(_fmt, ...) \
+ fprintf(stderr, "%u %p %s:%u TOUDB_SHARE::%s " _fmt "\n", \
+ tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
+ __FUNCTION__, ##__VA_ARGS__);
+
+#define TOKUDB_SHARE_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
+ if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
+ TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+}
+
+#define TOKUDB_SHARE_DBUG_ENTER(_fmt, ...) { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
+ } \
+} \
+ DBUG_ENTER(__FUNCTION__);
+
+#define TOKUDB_SHARE_DBUG_RETURN(r) { \
+ int rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE) || \
+ (rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
+ TOKUDB_SHARE_TRACE("return %d", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_SHARE_DBUG_RETURN_DOUBLE(r) { \
+ double rr = (r); \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return %f", rr); \
+ } \
+ DBUG_RETURN(rr); \
+}
+
+#define TOKUDB_SHARE_DBUG_RETURN_PTR(r) { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return 0x%p", r); \
+ } \
+ DBUG_RETURN(r); \
+}
+
+#define TOKUDB_SHARE_DBUG_VOID_RETURN() { \
+ if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
+ (tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
+ TOKUDB_SHARE_TRACE("return"); \
+ } \
+ DBUG_VOID_RETURN; \
+}
+
+
#define TOKUDB_DBUG_DUMP(s, p, len) \
{ \
TOKUDB_TRACE("%s", s); \
diff --git a/storage/tokudb/tokudb_information_schema.cc b/storage/tokudb/tokudb_information_schema.cc
index 90755834eef..e69a7899b45 100644
--- a/storage/tokudb/tokudb_information_schema.cc
+++ b/storage/tokudb/tokudb_information_schema.cc
@@ -1121,9 +1121,9 @@ void background_job_status_callback(
table->field[3]->store(type, strlen(type), system_charset_info);
table->field[4]->store(params, strlen(params), system_charset_info);
if (user_scheduled)
- table->field[5]->store("USER", sizeof("USER"), system_charset_info);
+ table->field[5]->store("USER", strlen("USER"), system_charset_info);
else
- table->field[5]->store("AUTO", sizeof("AUTO"), system_charset_info);
+ table->field[5]->store("AUTO", strlen("AUTO"), system_charset_info);
field_store_time_t(table->field[6], scheduled_time);
field_store_time_t(table->field[7], started_time);
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
index 728b9505219..3ec7a0e1f05 100644
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -734,12 +734,45 @@ static MYSQL_THDVAR_ULONGLONG(
~0ULL,
1);
+static const char* deprecated_tokudb_pk_insert_mode =
+ "Using tokudb_pk_insert_mode is deprecated and the "
+ "parameter may be removed in future releases.";
+static const char* deprecated_tokudb_pk_insert_mode_zero =
+ "Using tokudb_pk_insert_mode=0 is deprecated and the "
+ "parameter may be removed in future releases. "
+ "Only tokudb_pk_insert_mode=1|2 is allowed."
+ "Resettig the value to 1.";
+
+static void pk_insert_mode_update(
+ THD* thd,
+ st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save) {
+ const uint* new_pk_insert_mode = static_cast<const uint*>(save);
+ uint* pk_insert_mode = static_cast<uint*>(var_ptr);
+ if (*new_pk_insert_mode == 0) {
+ push_warning(
+ thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND,
+ deprecated_tokudb_pk_insert_mode_zero);
+ *pk_insert_mode = 1;
+ } else {
+ push_warning(
+ thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND,
+ deprecated_tokudb_pk_insert_mode);
+ *pk_insert_mode = *new_pk_insert_mode;
+ }
+}
+
static MYSQL_THDVAR_UINT(
pk_insert_mode,
0,
"set the primary key insert mode",
NULL,
- NULL,
+ pk_insert_mode_update,
1,
0,
2,
@@ -1066,6 +1099,9 @@ ulonglong optimize_throttle(THD* thd) {
uint pk_insert_mode(THD* thd) {
return THDVAR(thd, pk_insert_mode);
}
+void set_pk_insert_mode(THD* thd, uint mode) {
+ THDVAR(thd, pk_insert_mode) = mode;
+}
my_bool prelock_empty(THD* thd) {
return (THDVAR(thd, prelock_empty) != 0);
}
diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h
index 30aac151ac0..b67cf8aa0e2 100644
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@ -148,6 +148,7 @@ double optimize_index_fraction(THD* thd);
const char* optimize_index_name(THD* thd);
ulonglong optimize_throttle(THD* thd);
uint pk_insert_mode(THD* thd);
+void set_pk_insert_mode(THD* thd, uint mode);
my_bool prelock_empty(THD* thd);
uint read_block_size(THD* thd);
uint read_buf_size(THD* thd);
diff --git a/storage/tokudb/tokudb_thread.h b/storage/tokudb/tokudb_thread.h
index ab1633a16ca..dcb1fd6ec63 100644
--- a/storage/tokudb/tokudb_thread.h
+++ b/storage/tokudb/tokudb_thread.h
@@ -34,6 +34,55 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
namespace tokudb {
namespace thread {
+#if (defined(__MACH__) || defined(__APPLE__)) && _POSIX_TIMERS <= 0
+
+#define _x_min(a, b) ((a) < (b) ? (a) : (b))
+
+#define timed_lock_define(timed_func_name, lock_type_name, lock_func_name) \
+inline int timed_func_name(lock_type_name *mutex, \
+ const struct timespec *abs_timeout) { \
+ int pthread_rc; \
+ struct timespec remaining, slept, ts; \
+ static const int sleep_step = 1000000; \
+ \
+ remaining = *abs_timeout; \
+ while ((pthread_rc = lock_func_name(mutex)) == EBUSY) { \
+ ts.tv_sec = 0; \
+ ts.tv_nsec = (remaining.tv_sec > 0 ? \
+ sleep_step : \
+ _x_min(remaining.tv_nsec,sleep_step)); \
+ nanosleep(&ts, &slept); \
+ ts.tv_nsec -= slept.tv_nsec; \
+ if (ts.tv_nsec <= remaining.tv_nsec) { \
+ remaining.tv_nsec -= ts.tv_nsec; \
+ } else { \
+ remaining.tv_sec--; \
+ remaining.tv_nsec = \
+ (sleep_step - (ts.tv_nsec - remaining.tv_nsec)); \
+ } \
+ if (remaining.tv_sec < 0 || \
+ (!remaining.tv_sec && remaining.tv_nsec <= 0)) { \
+ return ETIMEDOUT; \
+ } \
+ } \
+ \
+ return pthread_rc; \
+}
+
+timed_lock_define(pthread_mutex_timedlock,
+ pthread_mutex_t,
+ pthread_mutex_trylock);
+
+timed_lock_define(pthread_rwlock_timedrdlock,
+ pthread_rwlock_t,
+ pthread_rwlock_tryrdlock);
+
+timed_lock_define(pthread_rwlock_timedwrlock,
+ pthread_rwlock_t,
+ pthread_rwlock_trywrlock);
+
+#endif //(defined(__MACH__) || defined(__APPLE__)) && _POSIX_TIMERS <= 0
+
uint my_tid(void);
// Your basic mutex