summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2021-06-11 14:50:25 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-11 05:24:33 +0000
commit5d0d19f42302f4da6c8e3abb708aefcfeae5ba96 (patch)
tree56e62e96f84fa090b490c8fc97da4dbc894d00e9
parentcfffcff7de7417c29a1d95d7a856c7874b9eebd1 (diff)
downloadmongo-5d0d19f42302f4da6c8e3abb708aefcfeae5ba96.tar.gz
Import wiredtiger: f34a5afbb1d807ac90627555c3452fdec750b592 from branch mongodb-4.4
ref: 7374df6c34..f34a5afbb1 for: 4.4.7 WT-6737 Add explicit checkpoints in test_hs14 to improve predictability WT-7228 Do not call next if we don't find the key in the history store WT-7319 Implement a checkpoint_manager class that creates checkpoints at configurable intervals WT-7320 Add the ability to check database size to the test framework WT-7437 Upgrade documentation to doxygen 1.8.17 WT-7480 Cleanup thread_contexts in the test framework WT-7514 Let tiered subsystem open files on behalf of block manager WT-7523 Test to verify multiple prepared updates either commit/rollback WT-7528 Fix WT_SESSION alter returns EBUSY WT-7531 Treat update restore eviction as a progress WT-7535 Complete CMake Windows support WT-7577 Add sync configuration to flush_tier WT-7579 Disable prefix testing in compatibility test due to snapshot isolation search mismatch WT-7588 Make tiered object id numbers 32 bits WT-7594 Use key_consistent mode on format TS runs WT-7625 Updating throttle configuration to be more user friendly in testing framework WT-7633 Switch doc-update Evergreen task to newer Ubuntu 20.04 distro WT-7634 Disable man page generation in Doxygen WT-7642 Fix insert search flag in history store cursor key order check WT-7643 Update checkpoint decode tool for tiered storage WT-7651 Add synchronization for flush_tier calls WT-7656 Destroy tiered condvar after thread is joined
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf.c12
-rw-r--r--src/third_party/wiredtiger/build_cmake/README.md13
-rw-r--r--src/third_party/wiredtiger/build_cmake/configs/auto.cmake11
-rw-r--r--src/third_party/wiredtiger/build_cmake/configs/base.cmake36
-rw-r--r--src/third_party/wiredtiger/build_cmake/configs/modes.cmake147
-rw-r--r--src/third_party/wiredtiger/build_cmake/configs/x86/windows/config.cmake40
-rw-r--r--src/third_party/wiredtiger/build_cmake/helpers.cmake2
-rw-r--r--src/third_party/wiredtiger/build_cmake/strict/cl_strict.cmake21
-rw-r--r--src/third_party/wiredtiger/build_posix/Make.base6
-rw-r--r--src/third_party/wiredtiger/build_posix/configure.ac.in19
-rw-r--r--src/third_party/wiredtiger/dist/api_data.py18
-rw-r--r--src/third_party/wiredtiger/dist/filelist1
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_all19
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_docs24
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_docs_plantuml2
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_export7
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_string2
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_tags2
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_void1
-rw-r--r--src/third_party/wiredtiger/dist/stat_data.py2
-rw-r--r--src/third_party/wiredtiger/dist/test_data.py65
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/block/block_addr.c7
-rw-r--r--src/third_party/wiredtiger/src/block/block_ckpt.c7
-rw-r--r--src/third_party/wiredtiger/src/block/block_mgr.c60
-rw-r--r--src/third_party/wiredtiger/src/block/block_open.c15
-rw-r--r--src/third_party/wiredtiger/src/block/block_read.c39
-rw-r--r--src/third_party/wiredtiger/src/block/block_tiered.c121
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_handle.c48
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_import.c4
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_split.c6
-rw-r--r--src/third_party/wiredtiger/src/config/config_def.c109
-rw-r--r--src/third_party/wiredtiger/src/config/test_config.c127
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_dhandle.c19
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_handle.c2
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_tiered.c147
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_hs.c51
-rw-r--r--src/third_party/wiredtiger/src/docs/Doxyfile2653
-rw-r--r--src/third_party/wiredtiger/src/docs/build-posix.dox2
-rw-r--r--src/third_party/wiredtiger/src/docs/introduction.dox50
-rw-r--r--src/third_party/wiredtiger/src/docs/spell.ok19
-rwxr-xr-xsrc/third_party/wiredtiger/src/docs/tools/doxfilter.py1
-rw-r--r--src/third_party/wiredtiger/src/include/block.h29
-rw-r--r--src/third_party/wiredtiger/src/include/connection.h3
-rw-r--r--src/third_party/wiredtiger/src/include/dhandle.h4
-rw-r--r--src/third_party/wiredtiger/src/include/extern.h32
-rw-r--r--src/third_party/wiredtiger/src/include/stat.h2
-rw-r--r--src/third_party/wiredtiger/src/include/tiered.h22
-rw-r--r--src/third_party/wiredtiger/src/include/wiredtiger.in428
-rw-r--r--src/third_party/wiredtiger/src/include/wt_internal.h2
-rw-r--r--src/third_party/wiredtiger/src/support/stat.c6
-rw-r--r--src/third_party/wiredtiger/src/tiered/tiered_handle.c51
-rw-r--r--src/third_party/wiredtiger/src/tiered/tiered_opener.c99
-rw-r--r--src/third_party/wiredtiger/src/tiered/tiered_work.c41
-rw-r--r--src/third_party/wiredtiger/src/txn/txn.c10
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_recover.c8
-rw-r--r--src/third_party/wiredtiger/test/bloom/test_bloom.c12
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt5
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt7
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/checkpoint_manager.h73
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/core/throttle.h45
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h99
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/test.h16
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/thread_manager.h30
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/timestamp_manager.h4
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/util/api_const.h8
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h63
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h69
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h199
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h4
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h39
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h78
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/tests/example_test.cxx13
-rw-r--r--src/third_party/wiredtiger/test/ctest_helpers.cmake129
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml13
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh5
-rw-r--r--src/third_party/wiredtiger/test/format/wts.c2
-rw-r--r--src/third_party/wiredtiger/test/manydbs/manydbs.c3
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/run.py2
-rw-r--r--src/third_party/wiredtiger/test/suite/test_hs14.py6
-rw-r--r--src/third_party/wiredtiger/test/suite/test_prepare11.py16
-rw-r--r--src/third_party/wiredtiger/test/suite/test_prepare16.py122
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_tiered02.py67
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_tiered04.py35
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_tiered05.py1
-rwxr-xr-xsrc/third_party/wiredtiger/tools/wt_ckpt_decode.py61
86 files changed, 3877 insertions, 2025 deletions
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf.c b/src/third_party/wiredtiger/bench/wtperf/wtperf.c
index b8a36408962..032acd855ac 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf.c
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf.c
@@ -2583,14 +2583,10 @@ stop_threads(u_int num, WTPERF_THREAD *threads)
static void
recreate_dir(const char *name)
{
- char *buf;
- size_t len;
-
- len = strlen(name) * 2 + 100;
- buf = dmalloc(len);
- testutil_check(__wt_snprintf(buf, len, "rm -rf %s && mkdir %s", name, name));
- testutil_checkfmt(system(buf), "system: %s", buf);
- free(buf);
+ /* Clean the directory if it already exists. */
+ testutil_clean_work_dir(name);
+ /* Recreate the directory. */
+ testutil_make_work_dir(name);
}
static int
diff --git a/src/third_party/wiredtiger/build_cmake/README.md b/src/third_party/wiredtiger/build_cmake/README.md
index 42842bbe524..827242f722d 100644
--- a/src/third_party/wiredtiger/build_cmake/README.md
+++ b/src/third_party/wiredtiger/build_cmake/README.md
@@ -1,5 +1,4 @@
# Building WiredTiger with CMake
-> *CMake support for building wiredtiger is an active work-in-progress. As of this time CMake can **only** build the WiredTiger library for POSIX platforms (Linux & Darwin). We suggest you continue using the SCons buildsystem when compiling for Windows.*
### Build Dependencies
@@ -38,6 +37,16 @@ brew install python
brew install swig
```
+###### Install commands for Windows (using Chocolatey)
+
+```bash
+choco install cmake
+choco install ninja
+choco install ccache --version=3.7.9
+choco install swig
+choco install python --pre
+```
+
### Building the WiredTiger Library
> *The below commands are written for Linux and Darwin hosts. Windows instructions coming soon!*
@@ -105,7 +114,7 @@ $ ccmake .
*The configuration options can also be viewed in `build_cmake/configs/base.cmake`*.
-###### Switching between GCC and Clang
+###### Switching between GCC and Clang (POSIX only)
By default CMake will use your default system compiler (`cc`). If you want to use a specific toolchain you can pass a toolchain file! We have provided a toolchain file for both GCC (`build_cmake/toolchains/gcc.cmake`) and Clang (`build_cmake/toolchains/clang.cmake`). To use either toolchain you can pass the `-DCMAKE_TOOLCHAIN_FILE=` to the CMake configuration step. For example:
diff --git a/src/third_party/wiredtiger/build_cmake/configs/auto.cmake b/src/third_party/wiredtiger/build_cmake/configs/auto.cmake
index 1df2578ed53..01029fd092f 100644
--- a/src/third_party/wiredtiger/build_cmake/configs/auto.cmake
+++ b/src/third_party/wiredtiger/build_cmake/configs/auto.cmake
@@ -27,10 +27,17 @@ if(${u_intmax_size} STREQUAL "")
endif()
endif()
+set(default_offt_def)
+if("${WT_OS}" STREQUAL "windows")
+ set(default_offt_def "typedef int64_t wt_off_t\\;")
+else()
+ set(default_offt_def "typedef off_t wt_off_t\\;")
+endif()
+
config_string(
off_t_decl
"off_t type declaration."
- DEFAULT "typedef off_t wt_off_t\\;"
+ DEFAULT "${default_offt_def}"
INTERNAL
)
@@ -297,7 +304,7 @@ set(wiredtiger_includes_decl)
if(HAVE_SYS_TYPES_H)
list(APPEND wiredtiger_includes_decl "#include <sys/types.h>")
endif()
-if(HAVE_INTTYPES_H)
+if(HAVE_INTTYPES_H AND (NOT "${WT_OS}" STREQUAL "windows"))
list(APPEND wiredtiger_includes_decl "#include <inttypes.h>")
endif()
if(HAVE_STDARG_H)
diff --git a/src/third_party/wiredtiger/build_cmake/configs/base.cmake b/src/third_party/wiredtiger/build_cmake/configs/base.cmake
index a572fcb901e..95919f70f5e 100644
--- a/src/third_party/wiredtiger/build_cmake/configs/base.cmake
+++ b/src/third_party/wiredtiger/build_cmake/configs/base.cmake
@@ -70,7 +70,6 @@ config_bool(
ENABLE_PYTHON
"Configure the python API"
DEFAULT OFF
- DEPENDS "NOT ENABLE_STATIC"
)
config_bool(
@@ -85,6 +84,13 @@ config_bool(
DEFAULT OFF
)
+config_bool(
+ DYNAMIC_CRT
+ "Link with the MSVCRT DLL version"
+ DEFAULT OFF
+ DEPENDS "WT_WIN"
+)
+
config_choice(
SPINLOCK_TYPE
"Set a spinlock type"
@@ -145,10 +151,16 @@ config_bool(
DEPENDS_ERROR ON "Failed to find tcmalloc library"
)
+set(default_optimize_level)
+if("${WT_OS}" STREQUAL "windows")
+ set(default_optimize_level "/O2")
+else()
+ set(default_optimize_level "-O3")
+endif()
config_string(
CC_OPTIMIZE_LEVEL
"CC optimization level"
- DEFAULT "-O3"
+ DEFAULT "${default_optimize_level}"
)
config_string(
@@ -180,7 +192,25 @@ config_string(
if(HAVE_DIAGNOSTIC AND (NOT "${CMAKE_BUILD_TYPE}" STREQUAL "Debug"))
# Avoid setting diagnostic flags if we are building with Debug mode.
# CMakes Debug config sets compilation with debug symbols by default.
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g")
+ if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
+ # Produce full symbolic debugging information.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Z7")
+ # Ensure a PDB file can be generated for debugging symbols.
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG")
+ else()
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g")
+ endif()
+endif()
+
+if(WT_WIN)
+ # Check if we a using the dynamic or static run-time library.
+ if(DYNAMIC_CRT)
+ # Use the multithread-specific and DLL-specific version of the run-time library (MSVCRT.lib).
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MD")
+ else()
+ # Use the multithread, static version of the run-time library.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MT")
+ endif()
endif()
if(NOT "${CMAKE_BUILD_TYPE}" STREQUAL "Release")
diff --git a/src/third_party/wiredtiger/build_cmake/configs/modes.cmake b/src/third_party/wiredtiger/build_cmake/configs/modes.cmake
index 29b5672da6d..74bf5cf8188 100644
--- a/src/third_party/wiredtiger/build_cmake/configs/modes.cmake
+++ b/src/third_party/wiredtiger/build_cmake/configs/modes.cmake
@@ -8,72 +8,105 @@
# Establishes build configuration modes we can use when compiling.
-# Create an ASAN build variant
+include(CheckCCompilerFlag)
-# Clang and GCC have slightly different linker names for the ASAN library.
-set(libasan)
-if("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang")
- set(libasan "-static-libsan")
+set(build_modes None Debug Release)
+if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
+ set(no_omit_frame_flag "/Oy-")
else()
- set(libasan "-static-libasan")
+ set(no_omit_frame_flag "-fno-omit-frame-pointer")
endif()
-set(CMAKE_C_FLAGS_ASAN
- "${CMAKE_C_FLAGS_DEBUG} -fsanitize=address -fno-omit-frame-pointer" CACHE STRING
- "Flags used by the C compiler for ASan build type or configuration." FORCE)
-
-set(CMAKE_CXX_FLAGS_ASAN
- "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address -fno-omit-frame-pointer" CACHE STRING
- "Flags used by the C++ compiler for ASan build type or configuration." FORCE)
-
-set(CMAKE_EXE_LINKER_FLAGS_ASAN
- "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} -fsanitize=address ${libasan}" CACHE STRING
- "Linker flags to be used to create executables for ASan build type." FORCE)
-
-set(CMAKE_SHARED_LINKER_FLAGS_ASAN
- "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} -fsanitize=address ${libasan}" CACHE STRING
- "Linker lags to be used to create shared libraries for ASan build type." FORCE)
+# Create an ASAN build variant
+if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
+ set(asan_link_flags "/fsanitize=address")
+ set(asan_compiler_flag "/fsanitize=address")
+elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang")
+ set(asan_link_flags "-fsanitize=address -static-libsan")
+ set(asan_compiler_flag "-fsanitize=address")
+else()
+ set(asan_link_flags "-fsanitize=address -static-libasan")
+ set(asan_compiler_flag "-fsanitize=address")
+endif()
-mark_as_advanced(
- CMAKE_CXX_FLAGS_ASAN
- CMAKE_C_FLAGS_ASAN
- CMAKE_EXE_LINKER_FLAGS_ASAN
- CMAKE_SHARED_LINKER_FLAGS_ASAN
-)
+# Needs to validate linker flags for the test to also pass.
+set(CMAKE_REQUIRED_FLAGS "${asan_link_flags}")
+# Check if the ASAN compiler flag is available.
+check_c_compiler_flag("${asan_compiler_flag}" HAVE_ADDRESS_SANITIZER)
+unset(CMAKE_REQUIRED_FLAGS)
+
+if(HAVE_ADDRESS_SANITIZER)
+ set(CMAKE_C_FLAGS_ASAN
+ "${CMAKE_C_FLAGS_DEBUG} ${asan_compiler_flag} ${no_omit_frame_flag}" CACHE STRING
+ "Flags used by the C compiler for ASan build type or configuration." FORCE)
+
+ set(CMAKE_CXX_FLAGS_ASAN
+ "${CMAKE_CXX_FLAGS_DEBUG} ${asan_compiler_flag} ${no_omit_frame_flag}" CACHE STRING
+ "Flags used by the C++ compiler for ASan build type or configuration." FORCE)
+
+ set(CMAKE_EXE_LINKER_FLAGS_ASAN
+ "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} ${asan_link_flags}" CACHE STRING
+ "Linker flags to be used to create executables for ASan build type." FORCE)
+
+ set(CMAKE_SHARED_LINKER_FLAGS_ASAN
+ "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} ${asan_link_flags}" CACHE STRING
+ "Linker lags to be used to create shared libraries for ASan build type." FORCE)
+
+ mark_as_advanced(
+ CMAKE_CXX_FLAGS_ASAN
+ CMAKE_C_FLAGS_ASAN
+ CMAKE_EXE_LINKER_FLAGS_ASAN
+ CMAKE_SHARED_LINKER_FLAGS_ASAN
+ )
+ list(APPEND build_modes "ASan")
+endif()
# Create an UBSAN build variant
-
-# Clang doesn't need to link ubsan, this is only a GCC requirement.
-set(libubsan "")
-if("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
- set(libubsan "-lubsan")
+if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
+ set(ubsan_link_flags "/fsanitize=undefined")
+ set(ubsan_compiler_flag "/fsanitize=undefined")
+elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang")
+ set(ubsan_link_flags "-fsanitize=undefined")
+ set(ubsan_compiler_flag "-fsanitize=undefined")
+else()
+ set(ubsan_link_flags "-fsanitize=undefined -lubsan")
+ set(ubsan_compiler_flag "-fsanitize=undefined")
endif()
-set(CMAKE_C_FLAGS_UBSAN
- "${CMAKE_C_FLAGS_DEBUG} -fsanitize=undefined -fno-omit-frame-pointer" CACHE STRING
- "Flags used by the C compiler for UBSan build type or configuration." FORCE)
-
-set(CMAKE_CXX_FLAGS_UBSAN
- "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=undefined -fno-omit-frame-pointer" CACHE STRING
- "Flags used by the C++ compiler for UBSan build type or configuration." FORCE)
-
-set(CMAKE_EXE_LINKER_FLAGS_UBSAN
- "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} -fsanitize=undefined ${libubsan}" CACHE STRING
- "Linker flags to be used to create executables for UBSan build type." FORCE)
-
-set(CMAKE_SHARED_LINKER_FLAGS_UBSAN
- "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} -fsanitize=undefined ${libubsan}" CACHE STRING
- "Linker lags to be used to create shared libraries for UBSan build type." FORCE)
-
-mark_as_advanced(
- CMAKE_CXX_FLAGS_UBSAN
- CMAKE_C_FLAGS_UBSAN
- CMAKE_EXE_LINKER_FLAGS_UBSAN
- CMAKE_SHARED_LINKER_FLAGS_UBSAN
-)
-
+# Needs to validate linker flags for the test to also pass.
+set(CMAKE_REQUIRED_FLAGS "${ubsan_link_flags}")
+# Check if the UBSAN compiler flag is available.
+check_c_compiler_flag("${ubsan_compiler_flag}" HAVE_UB_SANITIZER)
+unset(CMAKE_REQUIRED_FLAGS)
+
+if(HAVE_UB_SANITIZER)
+ set(CMAKE_C_FLAGS_UBSAN
+ "${CMAKE_C_FLAGS_DEBUG} ${ubsan_compiler_flag} ${no_omit_frame_flag}" CACHE STRING
+ "Flags used by the C compiler for UBSan build type or configuration." FORCE)
+
+ set(CMAKE_CXX_FLAGS_UBSAN
+ "${CMAKE_CXX_FLAGS_DEBUG} ${ubsan_compiler_flag} ${no_omit_frame_flag}" CACHE STRING
+ "Flags used by the C++ compiler for UBSan build type or configuration." FORCE)
+
+ set(CMAKE_EXE_LINKER_FLAGS_UBSAN
+ "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} ${ubsan_link_flags}" CACHE STRING
+ "Linker flags to be used to create executables for UBSan build type." FORCE)
+
+ set(CMAKE_SHARED_LINKER_FLAGS_UBSAN
+ "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} ${ubsan_link_flags}" CACHE STRING
+ "Linker lags to be used to create shared libraries for UBSan build type." FORCE)
+
+ mark_as_advanced(
+ CMAKE_CXX_FLAGS_UBSAN
+ CMAKE_C_FLAGS_UBSAN
+ CMAKE_EXE_LINKER_FLAGS_UBSAN
+ CMAKE_SHARED_LINKER_FLAGS_UBSAN
+ )
+ list(APPEND build_modes "UBSan")
+endif()
if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "None" CACHE STRING "Choose the type of build, options are: None Debug Release ASan UBSan." FORCE)
+ string(REPLACE ";" " " build_modes_doc "${build_modes}")
+ set(CMAKE_BUILD_TYPE "None" CACHE STRING "Choose the type of build, options are: ${build_modes_doc}." FORCE)
endif()
-set(CMAKE_CONFIGURATION_TYPES None Debug Release ASan UBSan)
+set(CMAKE_CONFIGURATION_TYPES ${build_modes})
diff --git a/src/third_party/wiredtiger/build_cmake/configs/x86/windows/config.cmake b/src/third_party/wiredtiger/build_cmake/configs/x86/windows/config.cmake
new file mode 100644
index 00000000000..842708a1f9d
--- /dev/null
+++ b/src/third_party/wiredtiger/build_cmake/configs/x86/windows/config.cmake
@@ -0,0 +1,40 @@
+#
+# Public Domain 2014-present MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+# All rights reserved.
+#
+# See the file LICENSE for redistribution information
+#
+
+set(WT_ARCH "x86" CACHE STRING "")
+set(WT_OS "windows" CACHE STRING "")
+set(WT_POSIX OFF CACHE BOOL "")
+set(SPINLOCK_TYPE "msvc" CACHE STRING "" FORCE)
+# We force a static compilation to generate a ".lib" file. We can then
+# additionally generate a dll file using a *DEF file.
+set(ENABLE_STATIC ON CACHE BOOL "" FORCE)
+
+# Compile as C code .
+add_compile_options(/TC)
+# Inline expansion.
+add_compile_options(/Ob1)
+# Enable string pooling.
+add_compile_options(/GF)
+# Extern "C" does not throw.
+add_compile_options(/EHsc)
+# Separate functions for linker.
+add_compile_options(/Gy)
+# Conformance: wchar_t is a native type, not a typedef.
+add_compile_options(/Zc:wchar_t)
+# Use the __cdecl calling convention for all functions.
+add_compile_options(/Gd)
+
+# Disable incremental linking.
+string(APPEND win_link_flags " /INCREMENTAL:NO")
+# Remove dead code.
+string(APPEND win_link_flags " /OPT:REF")
+# Allow executables to be randomly rebased at load time (enables virtual address allocation randomization).
+string(APPEND win_link_flags " /DYNAMICBASE")
+# Executable is compatible with the Windows Data Execution Prevention.
+string(APPEND win_link_flags " /NXCOMPAT")
+set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${win_link_flags}")
diff --git a/src/third_party/wiredtiger/build_cmake/helpers.cmake b/src/third_party/wiredtiger/build_cmake/helpers.cmake
index d353c04aa8a..2b65f537afa 100644
--- a/src/third_party/wiredtiger/build_cmake/helpers.cmake
+++ b/src/third_party/wiredtiger/build_cmake/helpers.cmake
@@ -251,7 +251,7 @@ function(config_bool config_name description)
endif()
endif()
# Config doesn't meet dependency requirements, set its default state and flag it as disabled.
- set(${config_name} ${CONFIG_BOOL_DEFAULT} CACHE STRING "${description}" FORCE)
+ set(${config_name} OFF CACHE STRING "${description}" FORCE)
set(${config_name}_DISABLED ON CACHE INTERNAL "" FORCE)
endif()
endfunction()
diff --git a/src/third_party/wiredtiger/build_cmake/strict/cl_strict.cmake b/src/third_party/wiredtiger/build_cmake/strict/cl_strict.cmake
new file mode 100644
index 00000000000..625bf9e88d5
--- /dev/null
+++ b/src/third_party/wiredtiger/build_cmake/strict/cl_strict.cmake
@@ -0,0 +1,21 @@
+#
+# Public Domain 2014-present MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+# All rights reserved.
+#
+# See the file LICENSE for redistribution information.
+#
+
+# Warning level 3.
+list(APPEND win_c_flags "/WX")
+# Ignore warning about mismatched const qualifiers.
+list(APPEND win_c_flags "/wd4090")
+# Ignore deprecated functions.
+list(APPEND win_c_flags "/wd4996")
+# Complain about unreferenced format parameter.
+list(APPEND win_c_flags "/we4100")
+# Enable security check.
+list(APPEND win_c_flags "/GS")
+
+# Set our base compiler flags that can be used by the rest of our build.
+set(COMPILER_DIAGNOSTIC_FLAGS "${COMPILER_DIAGNOSTIC_FLAGS};${win_c_flags}" CACHE INTERNAL "" FORCE)
diff --git a/src/third_party/wiredtiger/build_posix/Make.base b/src/third_party/wiredtiger/build_posix/Make.base
index 5b74c2ce6e5..043120ba943 100644
--- a/src/third_party/wiredtiger/build_posix/Make.base
+++ b/src/third_party/wiredtiger/build_posix/Make.base
@@ -36,9 +36,6 @@ wt_SOURCES =\
src/utilities/util_verify.c \
src/utilities/util_write.c
-man1_MANS = $(MAN1_PAGES)
-man3_MANS = $(MAN3_PAGES)
-
include_HEADERS= wiredtiger.h src/include/wiredtiger_ext.h
AM_CPPFLAGS = -I$(srcdir)/src/include
@@ -54,9 +51,6 @@ libtool: $(LIBTOOL_DEPS)
$(srcdir)/docs/index.html:
@cd $(srcdir)/dist && sh s_docs
-$(srcdir)/docs/man/man1/wt.1: $(srcdir)/docs/index.html
-$(srcdir)/docs/man/man3/wiredtiger.3: $(srcdir)/docs/index.html
-
libwiredtiger_la_LIBADD =
if HAVE_BUILTIN_EXTENSION_LZ4
libwiredtiger_la_LIBADD += ext/compressors/lz4/libwiredtiger_lz4.la
diff --git a/src/third_party/wiredtiger/build_posix/configure.ac.in b/src/third_party/wiredtiger/build_posix/configure.ac.in
index 7bcbae594cc..6681e0e5650 100644
--- a/src/third_party/wiredtiger/build_posix/configure.ac.in
+++ b/src/third_party/wiredtiger/build_posix/configure.ac.in
@@ -245,25 +245,6 @@ if test "$wt_cv_enable_diagnostic" = "yes"; then
[DIAGNOSTIC BUILDS ARE NOT RECOMMENDED FOR PRODUCTION DEPLOYMENT.])
fi
-# If we are building in a tree without documentation, check if doxygen is
-# available.
-if test -f "$srcdir/docs/index.html" ; then
- wt_cv_docs_exist=yes
-else
- wt_cv_docs_exist=no
-fi
-
-if test "$wt_cv_docs_exist" = "no"; then
- AC_CHECK_PROG([DOXYGEN], [doxygen], [doxygen], [false])
-fi
-
-if test "$wt_cv_docs_exist" = "yes" -o "$DOXYGEN" = "doxygen" ; then
- MAN1_PAGES="$srcdir/docs/man/man1/wt.1"
- AC_SUBST(MAN1_PAGES)
- MAN3_PAGES="$srcdir/docs/man/man3/wiredtiger.3"
- AC_SUBST(MAN3_PAGES)
-fi
-
# Output files
AC_CONFIG_HEADERS([wiredtiger_config.h:build_posix/config.hin])
diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py
index 377fa72013a..b23448940e1 100644
--- a/src/third_party/wiredtiger/dist/api_data.py
+++ b/src/third_party/wiredtiger/dist/api_data.py
@@ -272,6 +272,10 @@ file_runtime_config = common_runtime_config + [
the file is read-only. All methods that may modify a file are
disabled. See @ref readonly for more information''',
type='boolean'),
+ Config('tiered_object', 'false', r'''
+ this file is a tiered object. When opened on its own, it is marked as
+ readonly and may be restricted in other ways''',
+ type='boolean', undoc=True),
]
# Per-file configuration
@@ -451,7 +455,7 @@ lsm_meta = file_config + lsm_config + [
obsolete chunks in the LSM tree'''),
]
-tiered_meta = common_meta + tiered_config + [
+tiered_meta = file_config + tiered_config + [
Config('last', '0', r'''
the last allocated object ID'''),
Config('tiers', '', r'''
@@ -1557,6 +1561,18 @@ methods = {
Config('force', 'false', r'''
force sharing of all data''',
type='boolean'),
+ Config('lock_wait', 'true', r'''
+ wait for locks, if \c lock_wait=false, fail if any required locks are
+ not available immediately''',
+ type='boolean'),
+ Config('sync', 'on', r'''
+ wait for all objects to be flushed to the shared storage to the level
+ specified. The \c off setting does not wait for any
+ objects to be written to the tiered storage system but returns immediately after
+ generating the objects and work units for an internal thread. The
+ \c on setting causes the caller to wait until all work queued for this call to
+ be completely processed before returning''',
+ choices=['off', 'on']),
]),
'WT_SESSION.strerror' : Method([]),
diff --git a/src/third_party/wiredtiger/dist/filelist b/src/third_party/wiredtiger/dist/filelist
index 5bdbe137fa2..b1fe227cdfc 100644
--- a/src/third_party/wiredtiger/dist/filelist
+++ b/src/third_party/wiredtiger/dist/filelist
@@ -212,6 +212,7 @@ src/support/timestamp.c
src/support/update_vector.c
src/tiered/tiered_config.c
src/tiered/tiered_handle.c
+src/tiered/tiered_opener.c
src/tiered/tiered_work.c
src/txn/txn.c
src/txn/txn_ckpt.c
diff --git a/src/third_party/wiredtiger/dist/s_all b/src/third_party/wiredtiger/dist/s_all
index 8b36c09aa66..50d1909eea0 100755
--- a/src/third_party/wiredtiger/dist/s_all
+++ b/src/third_party/wiredtiger/dist/s_all
@@ -22,12 +22,12 @@ while :
-A) # Reconfigure the library build.
reconf=1
shift;;
- -f) # Force versions to be updated
- force="-f"
- shift;;
-E) # Return an error code on failure
errmode=1
shift;;
+ -f) # Force versions to be updated
+ force="-f"
+ shift;;
*)
break;;
esac
@@ -52,15 +52,12 @@ errchk()
sed -e 's/^/ /' $2
echo "#######################"
- rm -f $2
+ # If the test was skipped, ignore the failure.
+ if ! `grep "$1.*skipped" $2 > /dev/null`; then
+ errfound=1;
+ fi
- # Some tests shouldn't return an error, we exclude them here.
- case "$1" in
- *s_export|*s_tags)
- ;;
- *)
- errfound=1;;
- esac
+ rm -f $2
}
run()
diff --git a/src/third_party/wiredtiger/dist/s_docs b/src/third_party/wiredtiger/dist/s_docs
index a5c10127d34..1077ce5f332 100755
--- a/src/third_party/wiredtiger/dist/s_docs
+++ b/src/third_party/wiredtiger/dist/s_docs
@@ -1,4 +1,4 @@
-#! /bin/sh
+#! /bin/bash
t=__wt.$$
trap 'rm -f $t' 0 1 2 3 13 15
@@ -8,7 +8,12 @@ test -n "$WT_RELEASE_BUILD" && exit 0
# We require doxygen which may not be installed.
type doxygen > /dev/null 2>&1 || {
- echo 'skipped: doxygen not found'
+ echo "$0 skipped: doxygen not found"
+ exit 0
+}
+v=`(echo "1.8.17" && doxygen --version) | sort -V | head -1`
+test "$v" = "1.8.17" || {
+ echo "$0 skipped: unsupported version of doxygen"
exit 0
}
@@ -194,21 +199,6 @@ EOF
done
)
fi
-
- # Fixup the man pages generated by Doxygen. We want the command line
- # documentation to be the main man page, but also install a man page
- # for the WiredTiger header into the library section.
- [ "$additional_languages" -eq 1 ] &&
- (cd ../docs && mkdir -p man/man1 &&
- mv man/man3/command_line.3 man/man1/wt.1 &&
- sed -i~ -e 's/command_line/wt/g' man/man1/wt.1 &&
- sed -i~ -e 's/Version Version/Version/g' man/man1/wt.1 &&
- rm -f man/man1/wt.1~ &&
- mv man/man3/basic_api.3 man/ && rm -f man/man3/* &&
- mv man/basic_api.3 man/man3/wiredtiger.3 &&
- sed -i~ -e 's/basic_api/WiredTiger/g' man/man3/wiredtiger.3 &&
- sed -i~ -e 's/Version Version/Version/g' man/man3/wiredtiger.3 &&
- rm -f man/man3/wiredtiger.3~)
}
clean=0
diff --git a/src/third_party/wiredtiger/dist/s_docs_plantuml b/src/third_party/wiredtiger/dist/s_docs_plantuml
index 43c669591a7..479c79f994d 100755
--- a/src/third_party/wiredtiger/dist/s_docs_plantuml
+++ b/src/third_party/wiredtiger/dist/s_docs_plantuml
@@ -13,7 +13,7 @@ PLANTUML_URL="https://downloads.sourceforge.net/project/plantuml/plantuml.jar?r=
# We require java which may not be installed.
type java > /dev/null 2>&1 || {
- echo 'skipped: java not found'
+ echo "$0 skipped: java not found"
exit 0
}
diff --git a/src/third_party/wiredtiger/dist/s_export b/src/third_party/wiredtiger/dist/s_export
index c60d2ccd1d6..b3322ffe64e 100755
--- a/src/third_party/wiredtiger/dist/s_export
+++ b/src/third_party/wiredtiger/dist/s_export
@@ -12,7 +12,10 @@ Darwin)
*)
# We require GNU nm, which may not be installed.
type nm > /dev/null 2>&1 &&
- (nm --version | grep 'GNU nm') > /dev/null 2>&1 || exit 0
+ (nm --version | grep 'GNU nm') > /dev/null 2>&1 || {
+ echo "$0 skipped: GNU nm not found"
+ exit 0
+ }
NM='nm --extern-only --defined-only --print-file-name $f | egrep -v "__bss_start|_edata|_end|_fini|_init"'
;;
esac
@@ -51,5 +54,5 @@ for d in .. ../build_posix; do
done
done
-echo "skipped: libwiredtiger.[a|so|dylib] not found"
+echo "$0 skipped: libwiredtiger.[a|so|dylib] not found"
exit 0
diff --git a/src/third_party/wiredtiger/dist/s_string b/src/third_party/wiredtiger/dist/s_string
index 735490dafc2..4aab8a16f38 100755
--- a/src/third_party/wiredtiger/dist/s_string
+++ b/src/third_party/wiredtiger/dist/s_string
@@ -11,7 +11,7 @@ export LC_ALL
# If aspell has not been installed, quit
type aspell > /dev/null 2>&1 || {
- echo 'skipped: aspell not found'
+ echo "$0 skipped: aspell not found"
exit 0
}
diff --git a/src/third_party/wiredtiger/dist/s_tags b/src/third_party/wiredtiger/dist/s_tags
index 4785300f336..52449aca582 100755
--- a/src/third_party/wiredtiger/dist/s_tags
+++ b/src/third_party/wiredtiger/dist/s_tags
@@ -14,7 +14,7 @@ test -f s_tags || {
# We require ctags which may not be installed.
type ctags > /dev/null 2>&1 || {
- echo 'skipped: ctags not found'
+ echo "$0 skipped: ctags not found"
exit 0
}
diff --git a/src/third_party/wiredtiger/dist/s_void b/src/third_party/wiredtiger/dist/s_void
index ab46f05c593..c679f5d3b0a 100755
--- a/src/third_party/wiredtiger/dist/s_void
+++ b/src/third_party/wiredtiger/dist/s_void
@@ -58,6 +58,7 @@ func_ok()
-e '/int __wt_block_compact_end$/d' \
-e '/int __wt_block_compact_start$/d' \
-e '/int __wt_block_manager_size$/d' \
+ -e '/int __wt_block_tiered_load$/d' \
-e '/int __wt_block_write_size$/d' \
-e '/int __wt_buf_catfmt$/d' \
-e '/int __wt_buf_fmt$/d' \
diff --git a/src/third_party/wiredtiger/dist/stat_data.py b/src/third_party/wiredtiger/dist/stat_data.py
index e1bf0ebcc4e..de5cde1f494 100644
--- a/src/third_party/wiredtiger/dist/stat_data.py
+++ b/src/third_party/wiredtiger/dist/stat_data.py
@@ -501,7 +501,9 @@ connection_stats = [
##########################################
# Tiered storage statistics
##########################################
+ StorageStat('flush_state_races', 'flush state races'),
StorageStat('flush_tier', 'flush_tier operation calls'),
+ StorageStat('flush_tier_busy', 'flush_tier busy retries'),
##########################################
# Thread Count statistics
diff --git a/src/third_party/wiredtiger/dist/test_data.py b/src/third_party/wiredtiger/dist/test_data.py
index 23667a35751..48f51675514 100644
--- a/src/third_party/wiredtiger/dist/test_data.py
+++ b/src/third_party/wiredtiger/dist/test_data.py
@@ -45,20 +45,15 @@ class Config:
# A generic configuration used by some components to define their tick rate.
#
throttle_config = [
- Config('op_count', 1, r'''
- The number of operations to be performed within the defined interval, e.g.
- 20 op_count with an interval of a second is equal to 20 ops per second.''',
- min=1, max=10000),
- Config('interval', 's', r'''
- The interval to considered, either second, minute or hour.
- The default interval is seconds.''',
- choices=['s', 'm', 'h'])
+ Config('op_rate', '1s', r'''
+ The rate at which a given operation will happen. Can be either s, ms, or m, combined with an
+ integer. E.g. 20ms means do this operation every 20ms.''')
]
#
-# Record config specifies the format of the keys and values used in the database
+# Record config specifies the size of the keys and values to be generated by default.
#
-record_config = throttle_config + [
+record_config = [
Config('key_size', 5, r'''
The size of the keys created''', min=0, max=10000),
Config('value_size', 5, r'''
@@ -68,7 +63,7 @@ record_config = throttle_config + [
#
# The populate config defines how large the initially loaded database will be.
#
-populate_config = [
+populate_config = record_config + [
Config('collection_count', 1, r'''
The number of collections the workload generator operates over''', min=0, max=200000),
Config('key_count', 0, r'''
@@ -105,7 +100,7 @@ range_config = [
The maximum a value can be in a range''')
]
-component_config = enabled_config_true + throttle_config
+component_config = throttle_config
transaction_config = [
Config('ops_per_transaction', '', r'''
@@ -114,20 +109,36 @@ transaction_config = [
type='category',subconfig=range_config),
]
+thread_count = [
+ Config('thread_count', 1, r'''
+ Specifies the number of threads that will be used to perform a certain function.''')
+]
+
+read_thread_config = thread_count + throttle_config + transaction_config
+update_insert_thread_config = thread_count + transaction_config + throttle_config + record_config
+
+#
+# Configuration for the checkpoint_manager component.
+#
+checkpoint_manager = enabled_config_false + component_config
+
#
# Configuration that applies to the runtime monitor component, this should be a list of statistics
# that need to be checked by the component.
#
-runtime_monitor = component_config + [
+runtime_monitor = enabled_config_true + component_config + [
Config('stat_cache_size', '', '''
The maximum cache percentage that can be hit while running.''',
+ type='category', subconfig=limit_stat),
+ Config('stat_db_size', '', '''
+ The maximum on-disk database size in bytes that can be hit while running.''',
type='category', subconfig=limit_stat)
]
#
# Configuration that applies to the timestamp_manager component.
#
-timestamp_manager = component_config + [
+timestamp_manager = enabled_config_true + component_config + [
Config('oldest_lag', 1, r'''
The duration between the stable and oldest timestamps''', min=0, max=1000000),
Config('stable_lag', 1, r'''
@@ -137,30 +148,28 @@ timestamp_manager = component_config + [
#
# Configuration that applies to the workload tracking component.
#
-workload_tracking = component_config
+workload_tracking = enabled_config_true + component_config
#
# Configuration that applies to the workload_generator component.
#
-workload_generator = component_config + transaction_config + record_config + populate_config + [
- Config('read_threads', 0, r'''
- The number of threads performing read operations''', min=0, max=100),
- Config('insert_threads', 0, r'''
- The number of threads performing insert operations''', min=0, max=20),
+workload_generator = enabled_config_true + component_config + populate_config + [
+ Config('read_config', '', r'''
+ Config that specifies the number of read threads and their behaviour.''',
+ type='category', subconfig=read_thread_config),
Config('insert_config', '', r'''
- The definition of the record being inserted, if record config is empty the top level
- record_config will be used.''',
- type='category', subconfig=record_config),
- Config('update_threads', 0, r'''
- The number of threads performing update operations''', min=0, max=20),
+ Config that specifies the number of insert threads and their behaviour.''',
+ type='category', subconfig=update_insert_thread_config),
Config('update_config', '',r'''
- The definition of the record being updated, if record config is empty the top level
- record_config will be used.''',
- type='category', subconfig=record_config)
+ Config that specifies the number of update threads and their behaviour.''',
+ type='category', subconfig=update_insert_thread_config)
]
test_config = [
# Component configurations.
+ Config('checkpoint_manager', '', r'''
+ Configuration options for the checkpoint manager''',
+ type='category', subconfig=checkpoint_manager),
Config('runtime_monitor', '', r'''
Configuration options for the runtime_monitor''',
type='category', subconfig=runtime_monitor),
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index 69c05298149..a51dde2abd6 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-4.4",
- "commit": "7374df6c344587d433853d01f0c6241428ab7a80"
+ "commit": "f34a5afbb1d807ac90627555c3452fdec750b592"
}
diff --git a/src/third_party/wiredtiger/src/block/block_addr.c b/src/third_party/wiredtiger/src/block/block_addr.c
index f594d30e83e..181ed8ad77f 100644
--- a/src/third_party/wiredtiger/src/block/block_addr.c
+++ b/src/third_party/wiredtiger/src/block/block_addr.c
@@ -40,6 +40,8 @@ __block_buffer_to_addr(WT_BLOCK *block, const uint8_t **pp, uint32_t *objectidp,
*offsetp = 0;
*objectidp = *sizep = *checksump = 0;
} else {
+ if (block->has_objects && l == 0 && o != WT_BLOCK_INVALID_OFFSET)
+ WT_RET_MSG(NULL, EINVAL, "address cookie decoding for Btree with objects has object 0");
*objectidp = (uint32_t)l;
*offsetp = (wt_off_t)(o + 1) * block->allocsize;
*sizep = (uint32_t)s * block->allocsize;
@@ -68,8 +70,11 @@ __wt_block_addr_to_buffer(WT_BLOCK *block, uint8_t **pp, uint32_t objectid, wt_o
s = size / block->allocsize;
c = checksum;
}
- if (block->has_objects)
+ if (block->has_objects) {
+ if (l == 0 && o != WT_BLOCK_INVALID_OFFSET)
+ WT_RET_MSG(NULL, EINVAL, "address cookie encoding for Btree with objects has object 0");
WT_RET(__wt_vpack_uint(pp, 0, l));
+ }
WT_RET(__wt_vpack_uint(pp, 0, o));
WT_RET(__wt_vpack_uint(pp, 0, s));
WT_RET(__wt_vpack_uint(pp, 0, c));
diff --git a/src/third_party/wiredtiger/src/block/block_ckpt.c b/src/third_party/wiredtiger/src/block/block_ckpt.c
index 8194b8212de..f8c06b84714 100644
--- a/src/third_party/wiredtiger/src/block/block_ckpt.c
+++ b/src/third_party/wiredtiger/src/block/block_ckpt.c
@@ -746,13 +746,6 @@ live_update:
ci->ckpt_discard = ci->discard;
WT_ERR(__wt_block_extlist_init(session, &ci->discard, "live", "discard", false));
- /*
- * TODO: tiered: for now we are switching files on a checkpoint, we'll want to do it only on
- * flush_tier.
- */
- if (block->has_objects)
- WT_ERR(__wt_block_tiered_newfile(session, block));
-
#ifdef HAVE_DIAGNOSTIC
/*
* The first checkpoint in the system should always have an empty discard list. If we've read
diff --git a/src/third_party/wiredtiger/src/block/block_mgr.c b/src/third_party/wiredtiger/src/block/block_mgr.c
index 4be319fe79c..433d7342295 100644
--- a/src/third_party/wiredtiger/src/block/block_mgr.c
+++ b/src/third_party/wiredtiger/src/block/block_mgr.c
@@ -289,30 +289,6 @@ __bm_compact_start_readonly(WT_BM *bm, WT_SESSION_IMPL *session)
}
/*
- * __bm_flush_tier --
- * Flush the underlying file to the shared tier.
- */
-static int
-__bm_flush_tier(WT_BM *bm, WT_SESSION_IMPL *session, uint8_t **flush_cookie, size_t *cookie_size)
-{
- return (__wt_block_tiered_flush(session, bm->block, flush_cookie, cookie_size));
-}
-
-/*
- * __bm_flush_tier_readonly --
- * Flush the underlying file to the shared tier; readonly version.
- */
-static int
-__bm_flush_tier_readonly(
- WT_BM *bm, WT_SESSION_IMPL *session, uint8_t **flush_cookie, size_t *cookie_size)
-{
- WT_UNUSED(flush_cookie);
- WT_UNUSED(cookie_size);
-
- return (__bm_readonly(bm, session));
-}
-
-/*
* __bm_free --
* Free a block of space to the underlying file.
*/
@@ -463,6 +439,29 @@ __bm_stat(WT_BM *bm, WT_SESSION_IMPL *session, WT_DSRC_STATS *stats)
}
/*
+ * __bm_switch_object --
+ * Modify the tiered object.
+ */
+static int
+__bm_switch_object(WT_BM *bm, WT_SESSION_IMPL *session, uint32_t object_id, uint32_t flags)
+{
+ return (__wt_block_switch_object(session, bm->block, object_id, flags));
+}
+
+/*
+ * __bm_switch_object_readonly --
+ * Modify the tiered object; readonly version.
+ */
+static int
+__bm_switch_object_readonly(WT_BM *bm, WT_SESSION_IMPL *session, uint32_t object_id, uint32_t flags)
+{
+ WT_UNUSED(object_id);
+ WT_UNUSED(flags);
+
+ return (__bm_readonly(bm, session));
+}
+
+/*
* __bm_sync --
* Flush a file to disk.
*/
@@ -589,7 +588,6 @@ __bm_method_set(WT_BM *bm, bool readonly)
bm->compact_skip = __bm_compact_skip;
bm->compact_start = __bm_compact_start;
bm->corrupt = __wt_bm_corrupt;
- bm->flush_tier = __bm_flush_tier;
bm->free = __bm_free;
bm->is_mapped = __bm_is_mapped;
bm->map_discard = __bm_map_discard;
@@ -601,6 +599,7 @@ __bm_method_set(WT_BM *bm, bool readonly)
bm->salvage_valid = __bm_salvage_valid;
bm->size = __wt_block_manager_size;
bm->stat = __bm_stat;
+ bm->switch_object = __bm_switch_object;
bm->sync = __bm_sync;
bm->verify_addr = __bm_verify_addr;
bm->verify_end = __bm_verify_end;
@@ -616,12 +615,12 @@ __bm_method_set(WT_BM *bm, bool readonly)
bm->compact_page_skip = __bm_compact_page_skip_readonly;
bm->compact_skip = __bm_compact_skip_readonly;
bm->compact_start = __bm_compact_start_readonly;
- bm->flush_tier = __bm_flush_tier_readonly;
bm->free = __bm_free_readonly;
bm->salvage_end = __bm_salvage_end_readonly;
bm->salvage_next = __bm_salvage_next_readonly;
bm->salvage_start = __bm_salvage_start_readonly;
bm->salvage_valid = __bm_salvage_valid_readonly;
+ bm->switch_object = __bm_switch_object_readonly;
bm->sync = __bm_sync_readonly;
bm->write = __bm_write_readonly;
bm->write_size = __bm_write_size_readonly;
@@ -633,8 +632,9 @@ __bm_method_set(WT_BM *bm, bool readonly)
* Open a file.
*/
int
-__wt_block_manager_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[],
- bool forced_salvage, bool readonly, uint32_t allocsize, WT_BM **bmp)
+__wt_block_manager_open(WT_SESSION_IMPL *session, const char *filename,
+ WT_BLOCK_FILE_OPENER *opener, const char *cfg[], bool forced_salvage, bool readonly,
+ uint32_t allocsize, WT_BM **bmp)
{
WT_BM *bm;
WT_DECL_RET;
@@ -644,8 +644,8 @@ __wt_block_manager_open(WT_SESSION_IMPL *session, const char *filename, const ch
WT_RET(__wt_calloc_one(session, &bm));
__bm_method_set(bm, false);
- WT_ERR(
- __wt_block_open(session, filename, cfg, forced_salvage, readonly, allocsize, &bm->block));
+ WT_ERR(__wt_block_open(
+ session, filename, opener, cfg, forced_salvage, readonly, allocsize, &bm->block));
*bmp = bm;
return (0);
diff --git a/src/third_party/wiredtiger/src/block/block_open.c b/src/third_party/wiredtiger/src/block/block_open.c
index c41de4aaaaa..355226789dc 100644
--- a/src/third_party/wiredtiger/src/block/block_open.c
+++ b/src/third_party/wiredtiger/src/block/block_open.c
@@ -138,8 +138,8 @@ __wt_block_configure_first_fit(WT_BLOCK *block, bool on)
* Open a block handle.
*/
int
-__wt_block_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[],
- bool forced_salvage, bool readonly, uint32_t allocsize, WT_BLOCK **blockp)
+__wt_block_open(WT_SESSION_IMPL *session, const char *filename, WT_BLOCK_FILE_OPENER *opener,
+ const char *cfg[], bool forced_salvage, bool readonly, uint32_t allocsize, WT_BLOCK **blockp)
{
WT_BLOCK *block;
WT_CONFIG_ITEM cval;
@@ -176,12 +176,16 @@ __wt_block_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[
block->ref = 1;
block->name_hash = hash;
block->allocsize = allocsize;
+ block->opener = opener;
WT_CONN_BLOCK_INSERT(conn, block, bucket);
WT_ERR(__wt_strdup(session, filename, &block->name));
WT_ERR(__wt_config_gets(session, cfg, "block_allocation", &cval));
block->allocfirst = WT_STRING_MATCH("first", cval.str, cval.len);
+ block->has_objects = (opener != NULL);
+ if (block->has_objects)
+ block->objectid = opener->current_object_id(opener);
/* Configuration: optional OS buffer cache maximum size. */
WT_ERR(__wt_config_gets(session, cfg, "os_cache_max", &cval));
@@ -211,7 +215,12 @@ __wt_block_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[
if (!readonly && FLD_ISSET(conn->direct_io, WT_DIRECT_IO_DATA))
LF_SET(WT_FS_OPEN_DIRECTIO);
block->file_flags = flags;
- WT_ERR(__wt_open(session, filename, WT_FS_OPEN_FILE_TYPE_DATA, block->file_flags, &block->fh));
+ if (block->has_objects)
+ WT_ERR(opener->open(opener, session, WT_TIERED_CURRENT_ID, WT_FS_OPEN_FILE_TYPE_DATA,
+ block->file_flags, &block->fh));
+ else
+ WT_ERR(
+ __wt_open(session, filename, WT_FS_OPEN_FILE_TYPE_DATA, block->file_flags, &block->fh));
/* Set the file's size. */
WT_ERR(__wt_filesize(session, block->fh, &block->size));
diff --git a/src/third_party/wiredtiger/src/block/block_read.c b/src/third_party/wiredtiger/src/block/block_read.c
index 08c1ce8067f..d5c1d90718a 100644
--- a/src/third_party/wiredtiger/src/block/block_read.c
+++ b/src/third_party/wiredtiger/src/block/block_read.c
@@ -213,43 +213,12 @@ err:
int
__wt_block_fh(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t objectid, WT_FH **fhp)
{
- WT_DECL_ITEM(tmp);
- WT_DECL_RET;
- const char *filename;
-
- if (!block->has_objects || objectid == block->objectid) {
+ if (!block->has_objects)
*fhp = block->fh;
- return (0);
- }
-
- /* TODO: tiered: fh readlock; we may want a reference count on each file handle given out. */
- if (objectid * sizeof(WT_FILE_HANDLE *) < block->ofh_alloc &&
- (*fhp = block->ofh[objectid]) != NULL)
- return (0);
-
- /* TODO: tiered: fh writelock */
- /* Ensure the array goes far enough. */
- WT_RET(__wt_realloc_def(session, &block->ofh_alloc, objectid + 1, &block->ofh));
- if (objectid >= block->max_objectid)
- block->max_objectid = objectid + 1;
- if ((*fhp = block->ofh[objectid]) != NULL)
- return (0);
+ else
+ WT_RET(__wt_block_tiered_fh(session, block, objectid, fhp));
- WT_RET(__wt_scr_alloc(session, 0, &tmp));
- if (objectid == 0)
- filename = block->name;
- else {
- WT_ERR(__wt_buf_fmt(session, tmp, "%s.%08" PRIu32, block->name, objectid));
- filename = tmp->data;
- }
- WT_ERR(__wt_open(session, filename, WT_FS_OPEN_FILE_TYPE_DATA,
- WT_FS_OPEN_READONLY | block->file_flags, &block->ofh[objectid]));
- *fhp = block->ofh[objectid];
- WT_ASSERT(session, *fhp != NULL);
-
-err:
- __wt_scr_free(session, &tmp);
- return (ret);
+ return (0);
}
/*
diff --git a/src/third_party/wiredtiger/src/block/block_tiered.c b/src/third_party/wiredtiger/src/block/block_tiered.c
index b275ccd95a7..ffc3a35a147 100644
--- a/src/third_party/wiredtiger/src/block/block_tiered.c
+++ b/src/third_party/wiredtiger/src/block/block_tiered.c
@@ -9,77 +9,100 @@
#include "wt_internal.h"
/*
- * __wt_block_tiered_flush --
- * Flush this file, start another file.
+ * __block_switch_writeable --
+ * Switch a new writeable object.
*/
-int
-__wt_block_tiered_flush(
- WT_SESSION_IMPL *session, WT_BLOCK *block, uint8_t **flush_cookie, size_t *cookie_size)
+static int
+__block_switch_writeable(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t object_id)
{
- /* TODO: tiered: fill in the cookie. */
- (void)flush_cookie;
- (void)cookie_size;
+ WT_DECL_RET;
- return (__wt_block_tiered_newfile(session, block));
-}
+ WT_ERR(__wt_close(session, &block->fh));
-/*
- * __wt_block_tiered_load --
- * Set up log-structured processing when loading a new root page.
- */
-int
-__wt_block_tiered_load(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_BLOCK_CKPT *ci)
-{
/*
- * TODO: tiered: this call currently advances the object id, that's probably not appropriate for
- * readonly opens. Perhaps it's also not appropriate for opening at an older checkpoint?
+ * FIXME-WT-7470: write lock while opening a new write handle.
*/
- if (block->has_objects) {
- block->objectid = ci->root_objectid;
+ WT_ERR(block->opener->open(
+ block->opener, session, object_id, WT_FS_OPEN_FILE_TYPE_DATA, block->file_flags, &block->fh));
- /* Advance to the next file for future changes. */
- WT_RET(__wt_block_tiered_newfile(session, block));
- }
- return (0);
+err:
+ return (ret);
}
/*
- * __wt_block_tiered_newfile --
- * Switch a log-structured block object to a new file.
+ * __wt_block_tiered_fh --
+ * Open an object from the shared tier.
*/
int
-__wt_block_tiered_newfile(WT_SESSION_IMPL *session, WT_BLOCK *block)
+__wt_block_tiered_fh(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t object_id, WT_FH **fhp)
{
WT_DECL_ITEM(tmp);
WT_DECL_RET;
- const char *filename;
-
- /* Get the old file name again. */
- WT_ERR(__wt_scr_alloc(session, 0, &tmp));
/*
- * TODO: tiered: We will get rid of the log id, and this name generation will be replaced by the
- * name generated by __tiered_switch.
+ * FIXME-WT-7470: take a read lock to get a handle, and a write lock to open a handle or extend
+ * the array.
+ *
+ * If the object id isn't larger than the array of file handles, see if it's already opened.
*/
- WT_ERR(__wt_close(session, &block->fh));
-
- /* Bump to a new file ID. */
- ++block->objectid;
- WT_ERR(__wt_buf_fmt(session, tmp, "%s.%08" PRIu32, block->name, block->objectid));
- filename = tmp->data;
+ if (object_id * sizeof(WT_FILE_HANDLE *) < block->ofh_alloc &&
+ (*fhp = block->ofh[object_id]) != NULL)
+ return (0);
- WT_WITH_BUCKET_STORAGE(session->bucket_storage, session, {
- ret = __wt_open(session, filename, WT_FS_OPEN_FILE_TYPE_DATA,
- WT_FS_OPEN_CREATE | block->file_flags, &block->fh);
- });
- WT_ERR(ret);
- WT_ERR(__wt_desc_write(session, block->fh, block->allocsize));
+ /* Ensure the array is big enough. */
+ WT_RET(__wt_realloc_def(session, &block->ofh_alloc, object_id + 1, &block->ofh));
+ if (object_id >= block->max_objectid)
+ block->max_objectid = object_id + 1;
+ if ((*fhp = block->ofh[object_id]) != NULL)
+ return (0);
- block->size = block->allocsize;
- __wt_block_ckpt_destroy(session, &block->live);
- WT_ERR(__wt_block_ckpt_init(session, &block->live, "live"));
+ WT_RET(__wt_scr_alloc(session, 0, &tmp));
+ WT_ERR(block->opener->open(block->opener, session, object_id, WT_FS_OPEN_FILE_TYPE_DATA,
+ WT_FS_OPEN_READONLY | block->file_flags, &block->ofh[object_id]));
+ *fhp = block->ofh[object_id];
+ WT_ASSERT(session, *fhp != NULL);
err:
__wt_scr_free(session, &tmp);
return (ret);
}
+
+/*
+ * __wt_block_switch_object --
+ * Modify an object.
+ */
+int
+__wt_block_switch_object(
+ WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t object_id, uint32_t flags)
+{
+ WT_UNUSED(flags);
+
+ /*
+ * FIXME-WT-7596 the flags argument will be used in the future to perform various tasks,
+ * to efficiently mark objects in transition (that is during a switch):
+ * - mark this file as the writeable file (what currently happens)
+ * - disallow writes to this object (reads still allowed, we're about to switch)
+ * - close this object (about to move it, don't allow reopens yet)
+ * - allow opens on this object again
+ */
+ return (__block_switch_writeable(session, block, object_id));
+}
+
+/*
+ * __wt_block_tiered_load --
+ * Set up object file processing when loading a new root page.
+ */
+int
+__wt_block_tiered_load(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_BLOCK_CKPT *ci)
+{
+ WT_UNUSED(session);
+
+ if (block->has_objects)
+ block->objectid = ci->root_objectid;
+
+ /*
+ * FIXME-WT-7589: There is probably more work here, perhaps in switching the current file, and
+ * setting the live checkpoint to the argument checkpoint.
+ */
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/src/btree/bt_handle.c b/src/third_party/wiredtiger/src/btree/bt_handle.c
index 5e6444dc202..81a580e9829 100644
--- a/src/third_party/wiredtiger/src/btree/bt_handle.c
+++ b/src/third_party/wiredtiger/src/btree/bt_handle.c
@@ -57,6 +57,7 @@ __btree_clear(WT_SESSION_IMPL *session)
int
__wt_btree_open(WT_SESSION_IMPL *session, const char *op_cfg[])
{
+ WT_BLOCK_FILE_OPENER *opener;
WT_BM *bm;
WT_BTREE *btree;
WT_CKPT ckpt;
@@ -110,15 +111,17 @@ __wt_btree_open(WT_SESSION_IMPL *session, const char *op_cfg[])
/* Initialize and configure the WT_BTREE structure. */
WT_ERR(__btree_conf(session, &ckpt));
- /* Connect to the underlying block manager. */
- filename = dhandle->name;
- if (!WT_PREFIX_SKIP(filename, "file:"))
- WT_ERR_MSG(session, EINVAL, "expected a 'file:' URI");
+ /*
+ * Get an opener abstraction that the block manager can use to open any of the files that
+ * represent a btree. In the case of a tiered Btree, that would allow opening different files
+ * according to an object id in a reference. For a non-tiered Btree, the opener will know to
+ * always open a single file (given by the filename).
+ */
+ WT_ERR(__wt_tiered_opener(session, dhandle, &opener, &filename));
- WT_WITH_BUCKET_STORAGE(btree->bstorage, session,
- ret = __wt_block_manager_open(session, filename, dhandle->cfg, forced_salvage,
- F_ISSET(btree, WT_BTREE_READONLY), btree->allocsize, &btree->bm));
- WT_ERR(ret);
+ /* Connect to the underlying block manager. */
+ WT_ERR(__wt_block_manager_open(session, filename, opener, dhandle->cfg, forced_salvage,
+ F_ISSET(btree, WT_BTREE_READONLY), btree->allocsize, &btree->bm));
bm = btree->bm;
@@ -401,6 +404,12 @@ __btree_conf(WT_SESSION_IMPL *session, WT_CKPT *ckpt)
else
F_SET(btree, WT_BTREE_NO_LOGGING);
+ WT_RET(__wt_config_gets(session, cfg, "tiered_object", &cval));
+ if (cval.val)
+ F_SET(btree, WT_BTREE_NO_CHECKPOINT);
+ else
+ F_CLR(btree, WT_BTREE_NO_CHECKPOINT);
+
/* Checksums */
WT_RET(__wt_config_gets(session, cfg, "checksum", &cval));
if (WT_STRING_MATCH("on", cval.str, cval.len))
@@ -1003,3 +1012,26 @@ __wt_btree_immediately_durable(WT_SESSION_IMPL *session)
(F_ISSET(S2C(session), WT_CONN_IN_MEMORY))) &&
!F_ISSET(btree, WT_BTREE_NO_LOGGING));
}
+
+/*
+ * __wt_btree_switch_object --
+ * Switch to a writeable object for a tiered btree.
+ */
+int
+__wt_btree_switch_object(WT_SESSION_IMPL *session, uint32_t object_id, uint32_t flags)
+{
+ WT_BM *bm;
+ WT_DECL_RET;
+
+ bm = S2BT(session)->bm;
+
+ /*
+ * When initially opening a tiered Btree, a tier switch is done internally without the btree
+ * being fully opened. That's okay, the btree will be told later about the current object
+ * number.
+ */
+ if (bm != NULL)
+ ret = bm->switch_object(bm, session, object_id, flags);
+
+ return (ret);
+}
diff --git a/src/third_party/wiredtiger/src/btree/bt_import.c b/src/third_party/wiredtiger/src/btree/bt_import.c
index 6a650cf0647..cb60e9aa14a 100644
--- a/src/third_party/wiredtiger/src/btree/bt_import.c
+++ b/src/third_party/wiredtiger/src/btree/bt_import.c
@@ -46,7 +46,7 @@ __wt_import_repair(WT_SESSION_IMPL *session, const char *uri, char **configp)
* size, but 512B allows us to read the descriptor block and that's all we care about.
*/
F_SET(session, WT_SESSION_IMPORT_REPAIR);
- WT_ERR(__wt_block_manager_open(session, filename, cfg, false, true, 512, &bm));
+ WT_ERR(__wt_block_manager_open(session, filename, NULL, cfg, false, true, 512, &bm));
ret = bm->checkpoint_last(bm, session, &metadata, &checkpoint_list, checkpoint);
WT_TRET(bm->close(bm, session));
F_CLR(session, WT_SESSION_IMPORT_REPAIR);
@@ -118,7 +118,7 @@ __wt_import_repair(WT_SESSION_IMPL *session, const char *uri, char **configp)
* size. When we did this earlier, we were able to read the descriptor block properly but the
* checkpoint's byte representation was wrong because it was using the wrong allocation size.
*/
- WT_ERR(__wt_block_manager_open(session, filename, cfg, false, true, allocsize, &bm));
+ WT_ERR(__wt_block_manager_open(session, filename, NULL, cfg, false, true, allocsize, &bm));
__wt_free(session, checkpoint_list);
__wt_free(session, metadata);
ret = bm->checkpoint_last(bm, session, &metadata, &checkpoint_list, checkpoint);
diff --git a/src/third_party/wiredtiger/src/btree/bt_split.c b/src/third_party/wiredtiger/src/btree/bt_split.c
index 6fc62f0a52b..b5b997054ef 100644
--- a/src/third_party/wiredtiger/src/btree/bt_split.c
+++ b/src/third_party/wiredtiger/src/btree/bt_split.c
@@ -2257,10 +2257,12 @@ __wt_split_rewrite(WT_SESSION_IMPL *session, WT_REF *ref, WT_MULTI *multi)
* Pages with unresolved changes are not marked clean during reconciliation, do it now.
*
* Don't count this as eviction making progress, we did a one-for-one rewrite of a page in
- * memory, typical in the case of cache pressure.
+ * memory, typical in the case of cache pressure unless the cache is configured for scrub and
+ * page doesn't have any skipped updates.
*/
__wt_page_modify_clear(session, page);
- F_SET_ATOMIC(page, WT_PAGE_EVICT_NO_PROGRESS);
+ if (!F_ISSET(S2C(session)->cache, WT_CACHE_EVICT_SCRUB) || multi->supd_restore)
+ F_SET_ATOMIC(page, WT_PAGE_EVICT_NO_PROGRESS);
__wt_ref_out(session, ref);
/* Swap the new page into place. */
diff --git a/src/third_party/wiredtiger/src/config/config_def.c b/src/third_party/wiredtiger/src/config/config_def.c
index 908e02e8688..b9914861015 100644
--- a/src/third_party/wiredtiger/src/config/config_def.c
+++ b/src/third_party/wiredtiger/src/config/config_def.c
@@ -192,6 +192,7 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_alter[] = {
{"log", "category", NULL, NULL, confchk_WT_SESSION_create_log_subconfigs, 1},
{"os_cache_dirty_max", "int", NULL, "min=0", NULL, 0},
{"os_cache_max", "int", NULL, "min=0", NULL, 0}, {"readonly", "boolean", NULL, NULL, NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"verbose", "list", NULL, "choices=[\"write_timestamp\"]", NULL, 0},
{"write_timestamp_usage", "string", NULL,
"choices=[\"always\",\"key_consistent\",\"mixed_mode\","
@@ -305,6 +306,7 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_create[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"type", "string", NULL, NULL, NULL, 0},
@@ -323,7 +325,8 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_drop[] = {
static const WT_CONFIG_CHECK confchk_WT_SESSION_flush_tier[] = {
{"flush_timestamp", "string", NULL, NULL, NULL, 0}, {"force", "boolean", NULL, NULL, NULL, 0},
- {NULL, NULL, NULL, NULL, NULL, 0}};
+ {"lock_wait", "boolean", NULL, NULL, NULL, 0},
+ {"sync", "string", NULL, "choices=[\"off\",\"on\"]", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_WT_SESSION_join[] = {
{"bloom_bit_count", "int", NULL, "min=2,max=1000", NULL, 0},
@@ -455,6 +458,7 @@ static const WT_CONFIG_CHECK confchk_file_config[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
@@ -503,6 +507,7 @@ static const WT_CONFIG_CHECK confchk_file_meta[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
@@ -567,6 +572,7 @@ static const WT_CONFIG_CHECK confchk_lsm_meta[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
@@ -616,6 +622,7 @@ static const WT_CONFIG_CHECK confchk_object_meta[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
@@ -680,6 +687,7 @@ static const WT_CONFIG_CHECK confchk_tier_meta[] = {
{"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
{"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
{"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
@@ -692,13 +700,45 @@ static const WT_CONFIG_CHECK confchk_tier_meta[] = {
{NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_tiered_meta[] = {
+ {"access_pattern_hint", "string", NULL, "choices=[\"none\",\"random\",\"sequential\"]", NULL, 0},
+ {"allocation_size", "int", NULL, "min=512B,max=128MB", NULL, 0},
{"app_metadata", "string", NULL, NULL, NULL, 0},
{"assert", "category", NULL, NULL, confchk_assert_subconfigs, 4},
+ {"block_allocation", "string", NULL, "choices=[\"best\",\"first\"]", NULL, 0},
+ {"block_compressor", "string", NULL, NULL, NULL, 0},
+ {"cache_resident", "boolean", NULL, NULL, NULL, 0},
+ {"checksum", "string", NULL, "choices=[\"on\",\"off\",\"uncompressed\"]", NULL, 0},
{"collator", "string", NULL, NULL, NULL, 0}, {"columns", "list", NULL, NULL, NULL, 0},
- {"last", "string", NULL, NULL, NULL, 0},
+ {"dictionary", "int", NULL, "min=0", NULL, 0},
+ {"encryption", "category", NULL, NULL, confchk_WT_SESSION_create_encryption_subconfigs, 2},
+ {"format", "string", NULL, "choices=[\"btree\"]", NULL, 0},
+ {"huffman_key", "string", NULL, NULL, NULL, 0}, {"huffman_value", "string", NULL, NULL, NULL, 0},
+ {"ignore_in_memory_cache_size", "boolean", NULL, NULL, NULL, 0},
+ {"internal_item_max", "int", NULL, "min=0", NULL, 0},
+ {"internal_key_max", "int", NULL, "min=0", NULL, 0},
+ {"internal_key_truncate", "boolean", NULL, NULL, NULL, 0},
+ {"internal_page_max", "int", NULL, "min=512B,max=512MB", NULL, 0},
+ {"key_format", "format", __wt_struct_confchk, NULL, NULL, 0},
+ {"key_gap", "int", NULL, "min=0", NULL, 0}, {"last", "string", NULL, NULL, NULL, 0},
+ {"leaf_item_max", "int", NULL, "min=0", NULL, 0}, {"leaf_key_max", "int", NULL, "min=0", NULL, 0},
+ {"leaf_page_max", "int", NULL, "min=512B,max=512MB", NULL, 0},
+ {"leaf_value_max", "int", NULL, "min=0", NULL, 0},
+ {"log", "category", NULL, NULL, confchk_WT_SESSION_create_log_subconfigs, 1},
+ {"memory_page_image_max", "int", NULL, "min=0", NULL, 0},
+ {"memory_page_max", "int", NULL, "min=512B,max=10TB", NULL, 0},
+ {"os_cache_dirty_max", "int", NULL, "min=0", NULL, 0},
+ {"os_cache_max", "int", NULL, "min=0", NULL, 0},
+ {"prefix_compression", "boolean", NULL, NULL, NULL, 0},
+ {"prefix_compression_min", "int", NULL, "min=0", NULL, 0},
+ {"readonly", "boolean", NULL, NULL, NULL, 0},
+ {"split_deepen_min_child", "int", NULL, NULL, NULL, 0},
+ {"split_deepen_per_child", "int", NULL, NULL, NULL, 0},
+ {"split_pct", "int", NULL, "min=50,max=100", NULL, 0},
+ {"tiered_object", "boolean", NULL, NULL, NULL, 0},
{"tiered_storage", "category", NULL, NULL, confchk_WT_SESSION_create_tiered_storage_subconfigs,
6},
{"tiers", "list", NULL, NULL, NULL, 0},
+ {"value_format", "format", __wt_struct_confchk, NULL, NULL, 0},
{"verbose", "list", NULL, "choices=[\"write_timestamp\"]", NULL, 0},
{"write_timestamp_usage", "string", NULL,
"choices=[\"always\",\"key_consistent\",\"mixed_mode\","
@@ -1101,9 +1141,9 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"assert=(commit_timestamp=none,durable_timestamp=none,"
"read_timestamp=none,write_timestamp=off),cache_resident=false,"
"checkpoint=,exclusive_refreshed=true,log=(enabled=true),"
- "os_cache_dirty_max=0,os_cache_max=0,readonly=false,verbose=[],"
- "write_timestamp_usage=none",
- confchk_WT_SESSION_alter, 12},
+ "os_cache_dirty_max=0,os_cache_max=0,readonly=false,"
+ "tiered_object=false,verbose=[],write_timestamp_usage=none",
+ confchk_WT_SESSION_alter, 13},
{"WT_SESSION.begin_transaction",
"ignore_prepare=false,isolation=,name=,operation_timeout_ms=0,"
"priority=0,read_before_oldest=false,read_timestamp=,"
@@ -1137,15 +1177,17 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,"
"prefix_compression=false,prefix_compression_min=4,readonly=false"
",source=,split_deepen_min_child=0,split_deepen_per_child=0,"
- "split_pct=90,tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
- "local_retention=300,name=,object_target_size=10M),type=file,"
- "value_format=u,verbose=[],write_timestamp_usage=none",
- confchk_WT_SESSION_create, 49},
+ "split_pct=90,tiered_object=false,tiered_storage=(auth_token=,"
+ "bucket=,bucket_prefix=,local_retention=300,name=,"
+ "object_target_size=10M),type=file,value_format=u,verbose=[],"
+ "write_timestamp_usage=none",
+ confchk_WT_SESSION_create, 50},
{"WT_SESSION.drop",
"checkpoint_wait=true,force=false,lock_wait=true,"
"remove_files=true",
confchk_WT_SESSION_drop, 4},
- {"WT_SESSION.flush_tier", "flush_timestamp=,force=false", confchk_WT_SESSION_flush_tier, 2},
+ {"WT_SESSION.flush_tier", "flush_timestamp=,force=false,lock_wait=true,sync=on",
+ confchk_WT_SESSION_flush_tier, 4},
{"WT_SESSION.join",
"bloom_bit_count=16,bloom_false_positives=false,"
"bloom_hash_count=8,compare=\"eq\",count=,operation=\"and\","
@@ -1203,11 +1245,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"log=(enabled=true),memory_page_image_max=0,memory_page_max=5MB,"
"os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,"
"prefix_compression_min=4,readonly=false,split_deepen_min_child=0"
- ",split_deepen_per_child=0,split_pct=90,"
+ ",split_deepen_per_child=0,split_pct=90,tiered_object=false,"
"tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
"local_retention=300,name=,object_target_size=10M),value_format=u"
",verbose=[],write_timestamp_usage=none",
- confchk_file_config, 41},
+ confchk_file_config, 42},
{"file.meta",
"access_pattern_hint=none,allocation_size=4KB,app_metadata=,"
"assert=(commit_timestamp=none,durable_timestamp=none,"
@@ -1223,11 +1265,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"log=(enabled=true),memory_page_image_max=0,memory_page_max=5MB,"
"os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,"
"prefix_compression_min=4,readonly=false,split_deepen_min_child=0"
- ",split_deepen_per_child=0,split_pct=90,"
+ ",split_deepen_per_child=0,split_pct=90,tiered_object=false,"
"tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
"local_retention=300,name=,object_target_size=10M),value_format=u"
",verbose=[],version=(major=0,minor=0),write_timestamp_usage=none",
- confchk_file_meta, 46},
+ confchk_file_meta, 47},
{"index.meta",
"app_metadata=,assert=(commit_timestamp=none,"
"durable_timestamp=none,read_timestamp=none,write_timestamp=off),"
@@ -1254,11 +1296,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"memory_page_max=5MB,old_chunks=,os_cache_dirty_max=0,"
"os_cache_max=0,prefix_compression=false,prefix_compression_min=4"
",readonly=false,split_deepen_min_child=0,"
- "split_deepen_per_child=0,split_pct=90,"
+ "split_deepen_per_child=0,split_pct=90,tiered_object=false,"
"tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
"local_retention=300,name=,object_target_size=10M),value_format=u"
",verbose=[],write_timestamp_usage=none",
- confchk_lsm_meta, 45},
+ confchk_lsm_meta, 46},
{"object.meta",
"access_pattern_hint=none,allocation_size=4KB,app_metadata=,"
"assert=(commit_timestamp=none,durable_timestamp=none,"
@@ -1274,11 +1316,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"log=(enabled=true),memory_page_image_max=0,memory_page_max=5MB,"
"os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,"
"prefix_compression_min=4,readonly=false,split_deepen_min_child=0"
- ",split_deepen_per_child=0,split_pct=90,"
+ ",split_deepen_per_child=0,split_pct=90,tiered_object=false,"
"tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
"local_retention=300,name=,object_target_size=10M),value_format=u"
",verbose=[],version=(major=0,minor=0),write_timestamp_usage=none",
- confchk_object_meta, 47},
+ confchk_object_meta, 48},
{"table.meta",
"app_metadata=,assert=(commit_timestamp=none,"
"durable_timestamp=none,read_timestamp=none,write_timestamp=off),"
@@ -1301,17 +1343,30 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
"memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,"
"prefix_compression=false,prefix_compression_min=4,readonly=false"
",split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,"
- "tiered_storage=(auth_token=,bucket=,bucket_prefix=,"
- "local_retention=300,name=,object_target_size=10M),value_format=u"
- ",verbose=[],version=(major=0,minor=0),write_timestamp_usage=none",
- confchk_tier_meta, 48},
+ "tiered_object=false,tiered_storage=(auth_token=,bucket=,"
+ "bucket_prefix=,local_retention=300,name=,object_target_size=10M)"
+ ",value_format=u,verbose=[],version=(major=0,minor=0),"
+ "write_timestamp_usage=none",
+ confchk_tier_meta, 49},
{"tiered.meta",
- "app_metadata=,assert=(commit_timestamp=none,"
- "durable_timestamp=none,read_timestamp=none,write_timestamp=off),"
- "collator=,columns=,last=0,tiered_storage=(auth_token=,bucket=,"
+ "access_pattern_hint=none,allocation_size=4KB,app_metadata=,"
+ "assert=(commit_timestamp=none,durable_timestamp=none,"
+ "read_timestamp=none,write_timestamp=off),block_allocation=best,"
+ "block_compressor=,cache_resident=false,checksum=uncompressed,"
+ "collator=,columns=,dictionary=0,encryption=(keyid=,name=),"
+ "format=btree,huffman_key=,huffman_value=,"
+ "ignore_in_memory_cache_size=false,internal_item_max=0,"
+ "internal_key_max=0,internal_key_truncate=true,"
+ "internal_page_max=4KB,key_format=u,key_gap=10,last=0,"
+ "leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,"
+ "leaf_value_max=0,log=(enabled=true),memory_page_image_max=0,"
+ "memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,"
+ "prefix_compression=false,prefix_compression_min=4,readonly=false"
+ ",split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,"
+ "tiered_object=false,tiered_storage=(auth_token=,bucket=,"
"bucket_prefix=,local_retention=300,name=,object_target_size=10M)"
- ",tiers=,verbose=[],write_timestamp_usage=none",
- confchk_tiered_meta, 9},
+ ",tiers=,value_format=u,verbose=[],write_timestamp_usage=none",
+ confchk_tiered_meta, 44},
{"wiredtiger_open",
"buffer_alignment=-1,builtin_extension_config=,cache_cursors=true"
",cache_max_wait_ms=0,cache_overhead=8,cache_size=100MB,"
diff --git a/src/third_party/wiredtiger/src/config/test_config.c b/src/third_party/wiredtiger/src/config/test_config.c
index c517ba96f5a..7108eccbc20 100644
--- a/src/third_party/wiredtiger/src/config/test_config.c
+++ b/src/third_party/wiredtiger/src/config/test_config.c
@@ -2,109 +2,120 @@
#include "wt_internal.h"
+static const WT_CONFIG_CHECK confchk_checkpoint_manager_subconfigs[] = {
+ {"enabled", "boolean", NULL, NULL, NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
+ {NULL, NULL, NULL, NULL, NULL, 0}};
+
static const WT_CONFIG_CHECK confchk_stat_cache_size_subconfigs[] = {
{"enabled", "boolean", NULL, NULL, NULL, 0}, {"limit", "int", NULL, "min=0", NULL, 0},
{NULL, NULL, NULL, NULL, NULL, 0}};
+static const WT_CONFIG_CHECK confchk_stat_db_size_subconfigs[] = {
+ {"enabled", "boolean", NULL, NULL, NULL, 0}, {"limit", "int", NULL, "min=0", NULL, 0},
+ {NULL, NULL, NULL, NULL, NULL, 0}};
+
static const WT_CONFIG_CHECK confchk_runtime_monitor_subconfigs[] = {
- {"enabled", "boolean", NULL, NULL, NULL, 0},
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
+ {"enabled", "boolean", NULL, NULL, NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
{"stat_cache_size", "category", NULL, NULL, confchk_stat_cache_size_subconfigs, 2},
+ {"stat_db_size", "category", NULL, NULL, confchk_stat_db_size_subconfigs, 2},
{NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_timestamp_manager_subconfigs[] = {
{"enabled", "boolean", NULL, NULL, NULL, 0},
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
{"oldest_lag", "int", NULL, "min=0,max=1000000", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
+ {"op_rate", "string", NULL, NULL, NULL, 0},
{"stable_lag", "int", NULL, "min=0,max=1000000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
-static const WT_CONFIG_CHECK confchk_insert_config_subconfigs[] = {
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
- {"key_size", "int", NULL, "min=0,max=10000", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
- {"value_size", "int", NULL, "min=0,max=1000000000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
-
static const WT_CONFIG_CHECK confchk_ops_per_transaction_subconfigs[] = {
{"max", "string", NULL, NULL, NULL, 0}, {"min", "int", NULL, "min=0", NULL, 0},
{NULL, NULL, NULL, NULL, NULL, 0}};
+static const WT_CONFIG_CHECK confchk_insert_config_subconfigs[] = {
+ {"key_size", "int", NULL, "min=0,max=10000", NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
+ {"ops_per_transaction", "category", NULL, NULL, confchk_ops_per_transaction_subconfigs, 2},
+ {"thread_count", "string", NULL, NULL, NULL, 0},
+ {"value_size", "int", NULL, "min=0,max=1000000000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
+
+static const WT_CONFIG_CHECK confchk_read_config_subconfigs[] = {
+ {"op_rate", "string", NULL, NULL, NULL, 0},
+ {"ops_per_transaction", "category", NULL, NULL, confchk_ops_per_transaction_subconfigs, 2},
+ {"thread_count", "string", NULL, NULL, NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
+
static const WT_CONFIG_CHECK confchk_update_config_subconfigs[] = {
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
- {"key_size", "int", NULL, "min=0,max=10000", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
+ {"key_size", "int", NULL, "min=0,max=10000", NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
+ {"ops_per_transaction", "category", NULL, NULL, confchk_ops_per_transaction_subconfigs, 2},
+ {"thread_count", "string", NULL, NULL, NULL, 0},
{"value_size", "int", NULL, "min=0,max=1000000000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_workload_generator_subconfigs[] = {
{"collection_count", "int", NULL, "min=0,max=200000", NULL, 0},
{"enabled", "boolean", NULL, NULL, NULL, 0},
- {"insert_config", "category", NULL, NULL, confchk_insert_config_subconfigs, 4},
- {"insert_threads", "int", NULL, "min=0,max=20", NULL, 0},
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
+ {"insert_config", "category", NULL, NULL, confchk_insert_config_subconfigs, 5},
{"key_count", "int", NULL, "min=0,max=1000000", NULL, 0},
- {"key_size", "int", NULL, "min=0,max=10000", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0},
- {"ops_per_transaction", "category", NULL, NULL, confchk_ops_per_transaction_subconfigs, 2},
- {"read_threads", "int", NULL, "min=0,max=100", NULL, 0},
- {"update_config", "category", NULL, NULL, confchk_update_config_subconfigs, 4},
- {"update_threads", "int", NULL, "min=0,max=20", NULL, 0},
+ {"key_size", "int", NULL, "min=0,max=10000", NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
+ {"read_config", "category", NULL, NULL, confchk_read_config_subconfigs, 3},
+ {"update_config", "category", NULL, NULL, confchk_update_config_subconfigs, 5},
{"value_size", "int", NULL, "min=0,max=1000000000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_workload_tracking_subconfigs[] = {
- {"enabled", "boolean", NULL, NULL, NULL, 0},
- {"interval", "string", NULL, "choices=[\"s\",\"m\",\"h\"]", NULL, 0},
- {"op_count", "int", NULL, "min=1,max=10000", NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}};
+ {"enabled", "boolean", NULL, NULL, NULL, 0}, {"op_rate", "string", NULL, NULL, NULL, 0},
+ {NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_example_test[] = {
{"cache_size_mb", "int", NULL, "min=0,max=100000000000", NULL, 0},
+ {"checkpoint_manager", "category", NULL, NULL, confchk_checkpoint_manager_subconfigs, 2},
{"duration_seconds", "int", NULL, "min=0,max=1000000", NULL, 0},
{"enable_logging", "boolean", NULL, NULL, NULL, 0},
{"runtime_monitor", "category", NULL, NULL, confchk_runtime_monitor_subconfigs, 4},
- {"timestamp_manager", "category", NULL, NULL, confchk_timestamp_manager_subconfigs, 5},
- {"workload_generator", "category", NULL, NULL, confchk_workload_generator_subconfigs, 15},
- {"workload_tracking", "category", NULL, NULL, confchk_workload_tracking_subconfigs, 3},
+ {"timestamp_manager", "category", NULL, NULL, confchk_timestamp_manager_subconfigs, 4},
+ {"workload_generator", "category", NULL, NULL, confchk_workload_generator_subconfigs, 9},
+ {"workload_tracking", "category", NULL, NULL, confchk_workload_tracking_subconfigs, 2},
{NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_poc_test[] = {
{"cache_size_mb", "int", NULL, "min=0,max=100000000000", NULL, 0},
+ {"checkpoint_manager", "category", NULL, NULL, confchk_checkpoint_manager_subconfigs, 2},
{"duration_seconds", "int", NULL, "min=0,max=1000000", NULL, 0},
{"enable_logging", "boolean", NULL, NULL, NULL, 0},
{"runtime_monitor", "category", NULL, NULL, confchk_runtime_monitor_subconfigs, 4},
- {"timestamp_manager", "category", NULL, NULL, confchk_timestamp_manager_subconfigs, 5},
- {"workload_generator", "category", NULL, NULL, confchk_workload_generator_subconfigs, 15},
- {"workload_tracking", "category", NULL, NULL, confchk_workload_tracking_subconfigs, 3},
+ {"timestamp_manager", "category", NULL, NULL, confchk_timestamp_manager_subconfigs, 4},
+ {"workload_generator", "category", NULL, NULL, confchk_workload_generator_subconfigs, 9},
+ {"workload_tracking", "category", NULL, NULL, confchk_workload_tracking_subconfigs, 2},
{NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_ENTRY config_entries[] = {
{"example_test",
- "cache_size_mb=0,duration_seconds=0,enable_logging=false,"
- "runtime_monitor=(enabled=true,interval=s,op_count=1,"
- "stat_cache_size=(enabled=false,limit=0)),"
- "timestamp_manager=(enabled=true,interval=s,oldest_lag=1,"
- "op_count=1,stable_lag=1),workload_generator=(collection_count=1,"
- "enabled=true,insert_config=(interval=s,key_size=5,op_count=1,"
- "value_size=5),insert_threads=0,interval=s,interval=s,key_count=0"
- ",key_size=5,op_count=1,op_count=1,ops_per_transaction=(max=1,"
- "min=0),read_threads=0,update_config=(interval=s,key_size=5,"
- "op_count=1,value_size=5),update_threads=0,value_size=5),"
- "workload_tracking=(enabled=true,interval=s,op_count=1)",
- confchk_example_test, 7},
+ "cache_size_mb=0,checkpoint_manager=(enabled=false,op_rate=1s),"
+ "duration_seconds=0,enable_logging=false,"
+ "runtime_monitor=(enabled=true,op_rate=1s,"
+ "stat_cache_size=(enabled=false,limit=0),"
+ "stat_db_size=(enabled=false,limit=0)),"
+ "timestamp_manager=(enabled=true,oldest_lag=1,op_rate=1s,"
+ "stable_lag=1),workload_generator=(collection_count=1,"
+ "enabled=true,insert_config=(key_size=5,op_rate=1s,"
+ "ops_per_transaction=(max=1,min=0),thread_count=1,value_size=5),"
+ "key_count=0,key_size=5,op_rate=1s,read_config=(op_rate=1s,"
+ "ops_per_transaction=(max=1,min=0),thread_count=1),"
+ "update_config=(key_size=5,op_rate=1s,ops_per_transaction=(max=1,"
+ "min=0),thread_count=1,value_size=5),value_size=5),"
+ "workload_tracking=(enabled=true,op_rate=1s)",
+ confchk_example_test, 8},
{"poc_test",
- "cache_size_mb=0,duration_seconds=0,enable_logging=false,"
- "runtime_monitor=(enabled=true,interval=s,op_count=1,"
- "stat_cache_size=(enabled=false,limit=0)),"
- "timestamp_manager=(enabled=true,interval=s,oldest_lag=1,"
- "op_count=1,stable_lag=1),workload_generator=(collection_count=1,"
- "enabled=true,insert_config=(interval=s,key_size=5,op_count=1,"
- "value_size=5),insert_threads=0,interval=s,interval=s,key_count=0"
- ",key_size=5,op_count=1,op_count=1,ops_per_transaction=(max=1,"
- "min=0),read_threads=0,update_config=(interval=s,key_size=5,"
- "op_count=1,value_size=5),update_threads=0,value_size=5),"
- "workload_tracking=(enabled=true,interval=s,op_count=1)",
- confchk_poc_test, 7},
+ "cache_size_mb=0,checkpoint_manager=(enabled=false,op_rate=1s),"
+ "duration_seconds=0,enable_logging=false,"
+ "runtime_monitor=(enabled=true,op_rate=1s,"
+ "stat_cache_size=(enabled=false,limit=0),"
+ "stat_db_size=(enabled=false,limit=0)),"
+ "timestamp_manager=(enabled=true,oldest_lag=1,op_rate=1s,"
+ "stable_lag=1),workload_generator=(collection_count=1,"
+ "enabled=true,insert_config=(key_size=5,op_rate=1s,"
+ "ops_per_transaction=(max=1,min=0),thread_count=1,value_size=5),"
+ "key_count=0,key_size=5,op_rate=1s,read_config=(op_rate=1s,"
+ "ops_per_transaction=(max=1,min=0),thread_count=1),"
+ "update_config=(key_size=5,op_rate=1s,ops_per_transaction=(max=1,"
+ "min=0),thread_count=1,value_size=5),value_size=5),"
+ "workload_tracking=(enabled=true,op_rate=1s)",
+ confchk_poc_test, 8},
{NULL, NULL, NULL, 0}};
/*
diff --git a/src/third_party/wiredtiger/src/conn/conn_dhandle.c b/src/third_party/wiredtiger/src/conn/conn_dhandle.c
index 5e56a841de4..41654780528 100644
--- a/src/third_party/wiredtiger/src/conn/conn_dhandle.c
+++ b/src/third_party/wiredtiger/src/conn/conn_dhandle.c
@@ -70,6 +70,7 @@ __conn_dhandle_config_set(WT_SESSION_IMPL *session)
WT_ERR(__wt_calloc_def(session, 3, &dhandle->cfg));
switch (dhandle->type) {
case WT_DHANDLE_TYPE_BTREE:
+ case WT_DHANDLE_TYPE_TIERED:
/*
* We are stripping out all checkpoint related information from the config string. We save
* the rest of the metadata string, that is essentially static and unchanging and then
@@ -105,9 +106,6 @@ __conn_dhandle_config_set(WT_SESSION_IMPL *session)
case WT_DHANDLE_TYPE_TABLE:
WT_ERR(__wt_strdup(session, WT_CONFIG_BASE(session, table_meta), &dhandle->cfg[0]));
break;
- case WT_DHANDLE_TYPE_TIERED:
- WT_ERR(__wt_strdup(session, WT_CONFIG_BASE(session, tiered_meta), &dhandle->cfg[0]));
- break;
case WT_DHANDLE_TYPE_TIERED_TREE:
WT_ERR(__wt_strdup(session, WT_CONFIG_BASE(session, tier_meta), &dhandle->cfg[0]));
break;
@@ -148,6 +146,7 @@ __conn_dhandle_destroy(WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle)
ret = __wt_schema_close_table(session, (WT_TABLE *)dhandle);
break;
case WT_DHANDLE_TYPE_TIERED:
+ WT_WITH_DHANDLE(session, dhandle, ret = __wt_btree_discard(session));
ret = __wt_tiered_close(session, (WT_TIERED *)dhandle);
break;
case WT_DHANDLE_TYPE_TIERED_TREE:
@@ -401,6 +400,8 @@ __wt_conn_dhandle_close(WT_SESSION_IMPL *session, bool final, bool mark_dead)
WT_TRET(__wt_schema_close_table(session, (WT_TABLE *)dhandle));
break;
case WT_DHANDLE_TYPE_TIERED:
+ WT_TRET(__wt_btree_close(session));
+ F_CLR(btree, WT_BTREE_SPECIAL_FLAGS);
WT_TRET(__wt_tiered_close(session, (WT_TIERED *)dhandle));
break;
case WT_DHANDLE_TYPE_TIERED_TREE:
@@ -562,6 +563,18 @@ __wt_conn_dhandle_open(WT_SESSION_IMPL *session, const char *cfg[], uint32_t fla
WT_ERR(__wt_schema_open_table(session));
break;
case WT_DHANDLE_TYPE_TIERED:
+ /* Set any special flags on the btree handle. */
+ F_SET(btree, LF_MASK(WT_BTREE_SPECIAL_FLAGS));
+
+ /*
+ * Allocate data-source statistics memory. We don't allocate that memory when allocating the
+ * data handle because not all data handles need statistics (for example, handles used for
+ * checkpoint locking). If we are reopening the handle, then it may already have statistics
+ * memory, check to avoid the leak.
+ */
+ if (dhandle->stat_array == NULL)
+ WT_ERR(__wt_stat_dsrc_init(session, dhandle));
+
WT_ERR(__wt_tiered_open(session, cfg));
break;
case WT_DHANDLE_TYPE_TIERED_TREE:
diff --git a/src/third_party/wiredtiger/src/conn/conn_handle.c b/src/third_party/wiredtiger/src/conn/conn_handle.c
index 1e64aa61846..bb5b7e27b1e 100644
--- a/src/third_party/wiredtiger/src/conn/conn_handle.c
+++ b/src/third_party/wiredtiger/src/conn/conn_handle.c
@@ -51,6 +51,7 @@ __wt_connection_init(WT_CONNECTION_IMPL *conn)
WT_SPIN_INIT_TRACKED(session, &conn->checkpoint_lock, checkpoint);
WT_RET(__wt_spin_init(session, &conn->encryptor_lock, "encryptor"));
WT_RET(__wt_spin_init(session, &conn->fh_lock, "file list"));
+ WT_RET(__wt_spin_init(session, &conn->flush_tier_lock, "flush tier"));
WT_SPIN_INIT_TRACKED(session, &conn->metadata_lock, metadata);
WT_RET(__wt_spin_init(session, &conn->reconfig_lock, "reconfigure"));
WT_SPIN_INIT_SESSION_TRACKED(session, &conn->schema_lock, schema);
@@ -116,6 +117,7 @@ __wt_connection_destroy(WT_CONNECTION_IMPL *conn)
__wt_rwlock_destroy(session, &conn->dhandle_lock);
__wt_spin_destroy(session, &conn->encryptor_lock);
__wt_spin_destroy(session, &conn->fh_lock);
+ __wt_spin_destroy(session, &conn->flush_tier_lock);
__wt_rwlock_destroy(session, &conn->hot_backup_lock);
__wt_spin_destroy(session, &conn->metadata_lock);
__wt_spin_destroy(session, &conn->reconfig_lock);
diff --git a/src/third_party/wiredtiger/src/conn/conn_tiered.c b/src/third_party/wiredtiger/src/conn/conn_tiered.c
index 24a4a5a16f7..a443802197c 100644
--- a/src/third_party/wiredtiger/src/conn/conn_tiered.c
+++ b/src/third_party/wiredtiger/src/conn/conn_tiered.c
@@ -20,25 +20,68 @@
#endif
/*
+ * __flush_tier_wait --
+ * Wait for all previous work units queued to be processed.
+ */
+static void
+__flush_tier_wait(WT_SESSION_IMPL *session)
+{
+ WT_CONNECTION_IMPL *conn;
+ int yield_count;
+
+ conn = S2C(session);
+ yield_count = 0;
+ /*
+ * The internal thread needs the schema lock to perform its operations and flush tier also
+ * acquires the schema lock. We cannot be waiting in this function while holding that lock or no
+ * work will get done.
+ */
+ WT_ASSERT(session, !FLD_ISSET(session->lock_flags, WT_SESSION_LOCKED_SCHEMA));
+
+ /*
+ * It may be worthwhile looking at the add and decrement values and make choices of whether to
+ * yield or wait based on how much of the workload has been performed. Flushing operations could
+ * take a long time so yielding may not be effective.
+ *
+ * TODO: We should consider a maximum wait value as a configuration setting. If we add one, then
+ * this function returns an int and this loop would check how much time we've waited and break
+ * out with EBUSY.
+ */
+ while (!WT_FLUSH_STATE_DONE(conn->flush_state)) {
+ if (++yield_count < WT_THOUSAND)
+ __wt_yield();
+ else
+ __wt_cond_wait(session, conn->flush_cond, 200, NULL);
+ }
+}
+
+/*
* __flush_tier_once --
* Perform one iteration of tiered storage maintenance.
*/
static int
-__flush_tier_once(WT_SESSION_IMPL *session, bool force)
+__flush_tier_once(WT_SESSION_IMPL *session, uint32_t flags)
{
WT_CURSOR *cursor;
WT_DECL_RET;
const char *key, *value;
- WT_UNUSED(force);
+ WT_UNUSED(flags);
__wt_verbose(session, WT_VERB_TIERED, "%s", "FLUSH_TIER_ONCE: Called");
+
+ cursor = NULL;
/*
- * - See if there is any "merging" work to do to prepare and create an object that is
+ * For supporting splits and merge:
+ * - See if there is any merging work to do to prepare and create an object that is
* suitable for placing onto tiered storage.
* - Do the work to create said objects.
* - Move the objects.
*/
- cursor = NULL;
+ S2C(session)->flush_state = 0;
+
+ /*
+ * XXX: Is it sufficient to walk the metadata cursor? If it is, why doesn't checkpoint do that?
+ */
WT_RET(__wt_metadata_cursor(session, &cursor));
while (cursor->next(cursor) == 0) {
cursor->get_key(cursor, &key);
@@ -46,6 +89,7 @@ __flush_tier_once(WT_SESSION_IMPL *session, bool force)
/* For now just switch tiers which just does metadata manipulation. */
if (WT_PREFIX_MATCH(key, "tiered:")) {
__wt_verbose(session, WT_VERB_TIERED, "FLUSH_TIER_ONCE: %s %s", key, value);
+ /* Is this instantiating every handle even if it is not opened or in use? */
WT_ERR(__wt_session_get_dhandle(session, key, NULL, NULL, WT_DHANDLE_EXCLUSIVE));
/*
* When we call wt_tiered_switch the session->dhandle points to the tiered: entry and
@@ -134,9 +178,9 @@ __tier_flush_meta(
uint64_t now;
char *newconfig, *obj_value;
const char *cfg[3] = {NULL, NULL, NULL};
- bool tracking;
+ bool release, tracking;
- tracking = false;
+ release = tracking = false;
WT_RET(__wt_scr_alloc(session, 512, &buf));
dhandle = &tiered->iface;
@@ -145,6 +189,7 @@ __tier_flush_meta(
tracking = true;
WT_ERR(__wt_session_get_dhandle(session, dhandle->name, NULL, NULL, WT_DHANDLE_EXCLUSIVE));
+ release = true;
/*
* Once the flush call succeeds we want to first remove the file: entry from the metadata and
* then update the object: metadata to indicate the flush is complete.
@@ -162,7 +207,8 @@ __tier_flush_meta(
err:
__wt_free(session, newconfig);
- WT_TRET(__wt_session_release_dhandle(session));
+ if (release)
+ WT_TRET(__wt_session_release_dhandle(session));
__wt_scr_free(session, &buf);
if (tracking)
WT_TRET(__wt_meta_track_off(session, true, ret != 0));
@@ -180,6 +226,7 @@ __wt_tier_do_flush(
WT_DECL_RET;
WT_FILE_SYSTEM *bucket_fs;
WT_STORAGE_SOURCE *storage_source;
+ uint32_t msec, retry;
const char *local_name, *obj_name;
storage_source = tiered->bstorage->storage_source;
@@ -194,8 +241,21 @@ __wt_tier_do_flush(
WT_RET(storage_source->ss_flush(
storage_source, &session->iface, bucket_fs, local_name, obj_name, NULL));
- WT_WITH_CHECKPOINT_LOCK(session,
- WT_WITH_SCHEMA_LOCK(session, ret = __tier_flush_meta(session, tiered, local_uri, obj_uri)));
+ /*
+ * Flushing the metadata grabs the data handle with exclusive access, and the data handle may be
+ * held by the thread that queues the flush tier work item. As a result, the handle may be busy,
+ * so retry as needed, up to a few seconds.
+ */
+ for (msec = 10, retry = 0; msec < 3000; msec *= 2, retry++) {
+ if (retry != 0)
+ __wt_sleep(0, msec * WT_THOUSAND);
+ WT_WITH_CHECKPOINT_LOCK(session,
+ WT_WITH_SCHEMA_LOCK(
+ session, ret = __tier_flush_meta(session, tiered, local_uri, obj_uri)));
+ if (ret != EBUSY)
+ break;
+ WT_STAT_CONN_INCR(session, flush_tier_busy);
+ }
WT_RET(ret);
/*
@@ -212,7 +272,7 @@ __wt_tier_do_flush(
* Given an ID generate the URI names and call the flush code.
*/
int
-__wt_tier_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+__wt_tier_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
{
WT_DECL_RET;
const char *local_uri, *obj_uri;
@@ -254,13 +314,13 @@ __tier_storage_copy(WT_SESSION_IMPL *session)
/*
* We are responsible for freeing the work unit when we're done with it.
*/
- __wt_free(session, entry);
+ __wt_tiered_work_free(session, entry);
entry = NULL;
}
err:
if (entry != NULL)
- __wt_free(session, entry);
+ __wt_tiered_work_free(session, entry);
return (ret);
}
@@ -290,22 +350,61 @@ int
__wt_flush_tier(WT_SESSION_IMPL *session, const char *config)
{
WT_CONFIG_ITEM cval;
+ WT_CONNECTION_IMPL *conn;
WT_DECL_RET;
+ uint32_t flags;
const char *cfg[3];
- bool force;
+ bool wait;
+ conn = S2C(session);
WT_STAT_CONN_INCR(session, flush_tier);
- if (FLD_ISSET(S2C(session)->server_flags, WT_CONN_SERVER_TIERED_MGR))
+ if (FLD_ISSET(conn->server_flags, WT_CONN_SERVER_TIERED_MGR))
WT_RET_MSG(
session, EINVAL, "Cannot call flush_tier when storage manager thread is configured");
+ flags = 0;
cfg[0] = WT_CONFIG_BASE(session, WT_SESSION_flush_tier);
cfg[1] = (char *)config;
cfg[2] = NULL;
WT_RET(__wt_config_gets(session, cfg, "force", &cval));
- force = cval.val != 0;
+ if (cval.val)
+ LF_SET(WT_FLUSH_TIER_FORCE);
+ WT_RET(__wt_config_gets_def(session, cfg, "sync", 0, &cval));
+ if (WT_STRING_MATCH("off", cval.str, cval.len))
+ LF_SET(WT_FLUSH_TIER_OFF);
+ else if (WT_STRING_MATCH("on", cval.str, cval.len))
+ LF_SET(WT_FLUSH_TIER_ON);
+
+ WT_RET(__wt_config_gets(session, cfg, "lock_wait", &cval));
+ if (cval.val)
+ wait = true;
+ else
+ wait = false;
+
+ /*
+ * We have to hold the lock around both the wait call for a previous flush tier and the
+ * execution of the current flush tier call.
+ */
+ if (wait)
+ __wt_spin_lock(session, &conn->flush_tier_lock);
+ else
+ WT_RET(__wt_spin_trylock(session, &conn->flush_tier_lock));
- WT_WITH_SCHEMA_LOCK(session, ret = __flush_tier_once(session, force));
+ /*
+ * We cannot perform another flush tier until any earlier ones are done. Often threads will wait
+ * after the flush tier based on the sync setting so this check will be fast. But if sync is
+ * turned off then any following call must wait and will do so here. We have to wait while not
+ * holding the schema lock.
+ */
+ __flush_tier_wait(session);
+ if (wait)
+ WT_WITH_SCHEMA_LOCK(session, ret = __flush_tier_once(session, flags));
+ else
+ WT_WITH_SCHEMA_LOCK_NOWAIT(session, ret, ret = __flush_tier_once(session, flags));
+ __wt_spin_unlock(session, &conn->flush_tier_lock);
+
+ if (ret == 0 && LF_ISSET(WT_FLUSH_TIER_ON))
+ __flush_tier_wait(session);
return (ret);
}
@@ -455,8 +554,10 @@ __tiered_mgr_server(void *arg)
/*
* Here is where we do work. Work we expect to do:
*/
- WT_WITH_SCHEMA_LOCK(session, ret = __flush_tier_once(session, false));
+ WT_WITH_SCHEMA_LOCK(session, ret = __flush_tier_once(session, 0));
WT_ERR(ret);
+ if (ret == 0)
+ __flush_tier_wait(session);
WT_ERR(__tier_storage_remove(session, false));
}
@@ -507,6 +608,7 @@ __wt_tiered_storage_create(WT_SESSION_IMPL *session, const char *cfg[])
WT_RET(__tiered_manager_config(session, cfg, &start));
/* Start the internal thread. */
+ WT_ERR(__wt_cond_alloc(session, "flush tier", &conn->flush_cond));
WT_ERR(__wt_cond_alloc(session, "storage server", &conn->tiered_cond));
FLD_SET(conn->server_flags, WT_CONN_SERVER_TIERED);
@@ -543,17 +645,19 @@ __wt_tiered_storage_destroy(WT_SESSION_IMPL *session)
conn = S2C(session);
/* Stop the internal server thread. */
+ if (conn->flush_cond != NULL)
+ __wt_cond_signal(session, conn->flush_cond);
FLD_CLR(conn->server_flags, WT_CONN_SERVER_TIERED | WT_CONN_SERVER_TIERED_MGR);
if (conn->tiered_tid_set) {
+ WT_ASSERT(session, conn->tiered_cond != NULL);
__wt_cond_signal(session, conn->tiered_cond);
WT_TRET(__wt_thread_join(session, &conn->tiered_tid));
conn->tiered_tid_set = false;
while ((entry = TAILQ_FIRST(&conn->tieredqh)) != NULL) {
TAILQ_REMOVE(&conn->tieredqh, entry, q);
- __wt_free(session, entry);
+ __wt_tiered_work_free(session, entry);
}
}
- __wt_cond_destroy(session, &conn->tiered_cond);
if (conn->tiered_session != NULL) {
WT_TRET(__wt_session_close_internal(conn->tiered_session));
conn->tiered_session = NULL;
@@ -561,11 +665,16 @@ __wt_tiered_storage_destroy(WT_SESSION_IMPL *session)
/* Stop the storage manager thread. */
if (conn->tiered_mgr_tid_set) {
+ WT_ASSERT(session, conn->tiered_mgr_cond != NULL);
__wt_cond_signal(session, conn->tiered_mgr_cond);
WT_TRET(__wt_thread_join(session, &conn->tiered_mgr_tid));
conn->tiered_mgr_tid_set = false;
}
+ /* Destroy all condition variables after threads have stopped. */
+ __wt_cond_destroy(session, &conn->tiered_cond);
__wt_cond_destroy(session, &conn->tiered_mgr_cond);
+ /* The flush condition variable must be last because any internal thread could be using it. */
+ __wt_cond_destroy(session, &conn->flush_cond);
if (conn->tiered_mgr_session != NULL) {
WT_TRET(__wt_session_close_internal(conn->tiered_mgr_session));
diff --git a/src/third_party/wiredtiger/src/cursor/cur_hs.c b/src/third_party/wiredtiger/src/cursor/cur_hs.c
index b494090af14..299c06a4fa7 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_hs.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_hs.c
@@ -887,7 +887,19 @@ __curhs_insert(WT_CURSOR *cursor)
*/
__wt_cursor_disable_bulk(session);
- /* Allocate a tombstone only when there is a valid stop time point. */
+ /*
+ * The actual record to be inserted into the history store. Set the current update start time
+ * point as the commit time point to the history store record.
+ */
+ WT_ERR(__wt_upd_alloc(session, &file_cursor->value, WT_UPDATE_STANDARD, &hs_upd, NULL));
+ hs_upd->start_ts = hs_cursor->time_window.start_ts;
+ hs_upd->durable_ts = hs_cursor->time_window.durable_start_ts;
+ hs_upd->txnid = hs_cursor->time_window.start_txn;
+
+ /*
+ * Allocate a tombstone only when there is a valid stop time point, and insert the standard
+ * update as the update after the tombstone.
+ */
if (WT_TIME_WINDOW_HAS_STOP(&hs_cursor->time_window)) {
/*
* Insert a delete record to represent stop time point for the actual record to be inserted.
@@ -897,32 +909,20 @@ __curhs_insert(WT_CURSOR *cursor)
hs_tombstone->start_ts = hs_cursor->time_window.stop_ts;
hs_tombstone->durable_ts = hs_cursor->time_window.durable_stop_ts;
hs_tombstone->txnid = hs_cursor->time_window.stop_txn;
- }
- /*
- * Append to the delete record, the actual record to be inserted into the history store. Set the
- * current update start time point as the commit time point to the history store record.
- */
- WT_ERR(__wt_upd_alloc(session, &file_cursor->value, WT_UPDATE_STANDARD, &hs_upd, NULL));
- hs_upd->start_ts = hs_cursor->time_window.start_ts;
- hs_upd->durable_ts = hs_cursor->time_window.durable_start_ts;
- hs_upd->txnid = hs_cursor->time_window.start_txn;
-
- /* Insert the standard update as next update if there is a tombstone. */
- if (hs_tombstone != NULL) {
hs_tombstone->next = hs_upd;
hs_upd = hs_tombstone;
}
-retry:
- /* Search the page and insert the updates. */
- WT_WITH_PAGE_INDEX(session, ret = __curhs_search(cbt, true));
- WT_ERR(ret);
- ret = __wt_hs_modify(cbt, hs_upd);
- if (ret == WT_RESTART)
- goto retry;
+ do {
+ WT_WITH_PAGE_INDEX(session, ret = __curhs_search(cbt, true));
+ WT_ERR(ret);
+ } while ((ret = __wt_hs_modify(cbt, hs_upd)) == WT_RESTART);
WT_ERR(ret);
+ /* We no longer own the update memory, the page does; don't free it under any circumstances. */
+ hs_tombstone = hs_upd = NULL;
+
/*
* Mark the insert as successful. Even if one of the calls below fails, some callers will still
* need to know whether the actual insert went through or not.
@@ -931,17 +931,16 @@ retry:
#ifdef HAVE_DIAGNOSTIC
/* Do a search again and call next to check the key order. */
- WT_WITH_PAGE_INDEX(session, ret = __curhs_search(cbt, true));
+ WT_WITH_PAGE_INDEX(session, ret = __curhs_search(cbt, false));
WT_ASSERT(session, ret == 0);
- WT_ERR_NOTFOUND_OK(__curhs_file_cursor_next(session, file_cursor), false);
+ if (cbt->compare == 0)
+ WT_ERR_NOTFOUND_OK(__curhs_file_cursor_next(session, file_cursor), false);
#endif
/* Insert doesn't maintain a position across calls, clear resources. */
- if (0) {
err:
- __wt_free(session, hs_tombstone);
- __wt_free(session, hs_upd);
- }
+ __wt_free(session, hs_tombstone);
+ __wt_free(session, hs_upd);
WT_TRET(cursor->reset(cursor));
API_END_RET(session, ret);
}
diff --git a/src/third_party/wiredtiger/src/docs/Doxyfile b/src/third_party/wiredtiger/src/docs/Doxyfile
index 80b1fc3b2f6..97487950586 100644
--- a/src/third_party/wiredtiger/src/docs/Doxyfile
+++ b/src/third_party/wiredtiger/src/docs/Doxyfile
@@ -1,104 +1,130 @@
-# Doxyfile 1.8.2
+# Doxyfile 1.8.17
# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
+# doxygen (www.doxygen.org) for a project.
#
-# All text after a hash (#) is considered a comment and will be ignored
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
PROJECT_NAME = WiredTiger
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
PROJECT_NUMBER = "Version 1.0"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
PROJECT_LOGO = images/LogoFinal-header.png
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
OUTPUT_DIRECTORY = ../../docs
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
CREATE_SUBDIRS = NO
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
OUTPUT_LANGUAGE = English
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
+# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all generated output in the proper direction.
+# Possible values are: None, LTR, RTL and Context.
+# The default value is: None.
+
+OUTPUT_TEXT_DIRECTION = None
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
BRIEF_MEMBER_DESC = YES
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
+# The default value is: YES.
REPEAT_BRIEF = YES
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
@@ -113,8 +139,9 @@ ABBREVIATE_BRIEF = "The $name class" \
the
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
+# doxygen will generate a detailed section even if there is only a brief
# description.
+# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
@@ -122,267 +149,345 @@ ALWAYS_DETAILED_SEC = NO
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
+# The default value is: NO.
INLINE_INHERITED_MEMB = NO
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
FULL_PATH_NAMES = NO
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
-# started.
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
STRIP_FROM_INC_PATH = ../include/
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
SHORT_NAMES = NO
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
QT_AUTOBRIEF = YES
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behavior.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behavior instead.
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
INHERIT_DOCS = YES
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES = \
- arch_page_table{2}="<div class="arch_head"><table><tr><th rowspan=2 style=\"width:10%;\">@htmlonly <div><a href=\"arch-index.html\"><img class=\"arch_thumbnail\" src=\"wt_diagram.png\" style=\"background-image: url(wt_diagram.png)\"></a></div>@endhtmlonly</th><th style=\"width:44%\">Data Structures</th><th style=\"width:45%\">Source Location</th></tr><tr><td><code>\1</code></td><td><code>\2</code></td></tr></table></div>" \
- arch_page_top{2}="@page \1 \2 (Architecture Guide)" \
- config{3}=" @row{<tt>\1</tt>,\2,\3}" \
- configempty{2}="@param config configuration string, see @ref config_strings. No values currently permitted." \
- configend=" </table>" \
- configstart{2}="@param config configuration string, see @ref config_strings. Permitted values:\n <table>@hrow{Name,Effect,Values}" \
- ebusy_errors="@returns zero on success, EBUSY if the object is not available for exclusive access, and a non-zero error code on failure. See @ref error_handling \"Error handling\" for details." \
- errors="@returns zero on success and a non-zero error code on failure. See @ref error_handling \"Error handling\" for details." \
- exclusive="This method requires exclusive access to the specified data source(s). If any cursors are open with the specified name(s) or a data source is otherwise in use, the call will fail and return \c EBUSY.\n\n" \
- not_transactional="This method is not transactional, and will not guarantee ACID properties, see @ref transactions for more details." \
- ex_ref{1}="@ref \1 \"\1\"" \
- hrow{1}="<tr><th>\1</th></tr>" \
- hrow{2}="<tr><th>\1</th><th>\2</th></tr>" \
- hrow{3}="<tr><th>\1</th><th>\2</th><th>\3</th></tr>" \
- hrow{4}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th></tr>" \
- hrow{5}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th></tr>" \
- hrow{6}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th></tr>" \
- hrow{7}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th></tr>" \
- hrow{8}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th><th>\8</th></tr>" \
- hrow{9}="<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th><th>\8</th><th>\9</th></tr>" \
- notyet{1}="Note: <b>"\1"</b> not yet supported in WiredTiger.\n@todo fix when \1 supported\n\n" \
- plantuml_end="PlantUML template end -->" \
- plantuml_start{1}="\image html \1\n\image latex \1\n<!-- PlantUML template begins" \
- requires_notransaction="This method must not be called on a session with an active transaction.\n\n" \
- requires_transaction="This method must be called on a session with an active transaction.\n\n" \
- ref_single="@ref" \
- row{1}="<tr><td>\1</td></tr>" \
- row{2}="<tr><td>\1</td><td>\2</td></tr>" \
- row{3}="<tr><td>\1</td><td>\2</td><td>\3</td></tr>" \
- row{4}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td></tr>" \
- row{5}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td></tr>" \
- row{6}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td></tr>" \
- row{7}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td></tr>" \
- row{8}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td><td>\8</td></tr>" \
- row{9}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td><td>\8</td><td>\9</td></tr>" \
- subpage_single="@subpage" \
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines (in the resulting output). You can put ^^ in the value part of an
+# alias to insert a newline as if a physical newline was in the original file.
+# When you need a literal { or } or , in the value part of an alias you have to
+# escape them by means of a backslash (\), this can lead to conflicts with the
+# commands \{ and \} for these it is advised to use the version @{ and @} or use
+# a double escape (\\{ and \\})
+
+ALIASES = "arch_page_table{2}=<div class= arch_head><table><tr><th rowspan=2 style=\"width:10%;\">@htmlonly <div><a href=\"arch-index.html\"><img class=\"arch_thumbnail\" src=\"wt_diagram.png\" style=\"background-image: url(wt_diagram.png)\"></a></div>@endhtmlonly</th><th style=\"width:44%\">Data Structures</th><th style=\"width:45%\">Source Location</th></tr><tr><td><code>\1</code></td><td><code>\2</code></td></tr></table></div>" \
+ "arch_page_top{2}=@page \1 \2 (Architecture Guide)" \
+ "arch_page_caution=<b>Caution: the Architecture Guide is not updated in lockstep with the code base and is not necessarily correct or complete for any specific release.</b>" \
+ "config{3}= @row{<tt>\1</tt>,\2,\3}" \
+ "configempty{2}=@param config configuration string, see @ref config_strings. No values currently permitted." \
+ "configend= </table>" \
+ "configstart{2}=@param config configuration string, see @ref config_strings. Permitted values:\n <table>@hrow{Name,Effect,Values}" \
+ "ebusy_errors=@returns zero on success, EBUSY if the object is not available for exclusive access, and a non-zero error code on failure. See @ref error_handling \"Error handling\" for details." \
+ "errors=@returns zero on success and a non-zero error code on failure. See @ref error_handling \"Error handling\" for details." \
+ "exclusive=This method requires exclusive access to the specified data source(s). If any cursors are open with the specified name(s) or a data source is otherwise in use, the call will fail and return \c EBUSY.\n\n" \
+ "not_transactional=This method is not transactional, and will not guarantee ACID properties, see @ref transactions for more details." \
+ "ex_ref{1}=@ref \1 \"\1\"" \
+ "hrow{1}=<tr><th>\1</th></tr>" \
+ "hrow{2}=<tr><th>\1</th><th>\2</th></tr>" \
+ "hrow{3}=<tr><th>\1</th><th>\2</th><th>\3</th></tr>" \
+ "hrow{4}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th></tr>" \
+ "hrow{5}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th></tr>" \
+ "hrow{6}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th></tr>" \
+ "hrow{7}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th></tr>" \
+ "hrow{8}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th><th>\8</th></tr>" \
+ "hrow{9}=<tr><th>\1</th><th>\2</th><th>\3</th><th>\4</th><th>\5</th><th>\6</th><th>\7</th><th>\8</th><th>\9</th></tr>" \
+ "notyet{1}=Note: <b> \1</b> not yet supported in WiredTiger.\n@todo fix when \1 supported\n\n" \
+ "plantuml_end=PlantUML template end -->" \
+ "plantuml_start{1}=\image html \1^^\image latex \1^^<!-- PlantUML template begins" \
+ "requires_notransaction=This method must not be called on a session with an active transaction.\n\n" \
+ "requires_transaction=This method must be called on a session with an active transaction.\n\n" \
+ "ref_single=@ref" \
+ "row{1}=<tr><td>\1</td></tr>" \
+ "row{2}=<tr><td>\1</td><td>\2</td></tr>" \
+ "row{3}=<tr><td>\1</td><td>\2</td><td>\3</td></tr>" \
+ "row{4}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td></tr>" \
+ "row{5}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td></tr>" \
+ "row{6}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td></tr>" \
+ "row{7}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td></tr>" \
+ "row{8}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td><td>\8</td></tr>" \
+ "row{9}=<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td><td>\8</td><td>\9</td></tr>" \
+ "subpage_single=@subpage"
# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
TCL_SUBST =
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = YES
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE = NO
+
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
-# files are not read by doxygen.
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice,
+# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat
+# .inc files as Fortran files (default is PHP), and .f files as C (default is
+# Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
EXTENSION_MAPPING = in=C
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
MARKDOWN_SUPPORT = YES
-# When enabled doxygen tries to link words that correspond to documented classes,
-# or namespaces to their corresponding documentation. Such a link can be
-# prevented in individual cases by by putting a % sign in front of the word or
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 5
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
+# The default value is: NO.
CPP_CLI_SUPPORT = NO
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES (the
-# default) will make doxygen replace the get and set methods by a property in
-# the documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
+# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
+# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
SUBGROUPING = YES
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
INLINE_SIMPLE_STRUCTS = YES
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
TYPEDEF_HIDES_STRUCT = YES
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
@@ -390,330 +495,404 @@ LOOKUP_CACHE_SIZE = 0
# Build related configuration options
#---------------------------------------------------------------------------
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
EXTRACT_ALL = NO
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
EXTRACT_PRIVATE = NO
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
+# The default value is: NO.
EXTRACT_PACKAGE = NO
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
EXTRACT_STATIC = NO
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# declarations. If set to NO, these declarations will be included in the
# documentation.
+# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
INTERNAL_DOCS = NO
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
+# (including Cygwin) ands Mac users are advised to set this option to NO.
+# The default value is: system dependent.
CASE_SENSE_NAMES = NO
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
HIDE_SCOPE_NAMES = NO
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
SHOW_INCLUDE_FILES = NO
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
INLINE_INFO = YES
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
SORT_MEMBER_DOCS = YES
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
SORT_BRIEF_DOCS = NO
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
SORT_GROUP_NAMES = YES
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
SORT_BY_SCOPE_NAME = YES
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
STRICT_PROTO_MATCHING = NO
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
GENERATE_TODOLIST = NO
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
GENERATE_TESTLIST = YES
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
GENERATE_BUGLIST = YES
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
ENABLED_SECTIONS =
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 0
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
SHOW_USED_FILES = NO
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
SHOW_FILES = NO
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page. This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
SHOW_NAMESPACES = NO
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
LAYOUT_FILE = style/DoxygenLayout.xml
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
+# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
WARNINGS = YES
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
WARN_IF_DOC_ERROR = YES
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation. If
+# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
+# The default value is: NO.
WARN_NO_PARAMDOC = YES
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
WARN_LOGFILE = doxygen.log
#---------------------------------------------------------------------------
-# configuration options related to the input files
+# Configuration options related to the input files
#---------------------------------------------------------------------------
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
INPUT = ../include/wiredtiger.in \
../include/wiredtiger_ext.h \
.
# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
+# possible encodings.
+# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
+# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
+# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd,
+# *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.cc \
@@ -747,15 +926,16 @@ FILE_PATTERNS = *.c \
*.vhd \
*.vhdl
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
# Note that relative paths are relative to the directory from which doxygen is
# run.
@@ -769,14 +949,16 @@ EXCLUDE = bdb-map.dox \
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
+# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
@@ -785,740 +967,1212 @@ EXCLUDE_PATTERNS =
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS = __F \
- doc_*
+ doc_*
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
EXAMPLE_PATH = ../../examples/c \
- ../../ext/compressors/nop \
- ../../ext/encryptors/nop \
- ../../ext/encryptors/rotn \
- ../../examples/python \
- ../../test/fuzz
+ ../../ext/compressors/nop \
+ ../../ext/encryptors/nop \
+ ../../ext/encryptors/rotn \
+ ../../examples/python \
+ ../../test/fuzz
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
EXAMPLE_RECURSIVE = NO
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
IMAGE_PATH = images
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output. If FILTER_PATTERNS is specified, this tag will be
-# ignored.
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
FILTER_PATTERNS = *.py=tools/pyfilter \
- *.dox=tools/doxfilter
+ *.dox=tools/doxfilter
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
#---------------------------------------------------------------------------
-# configuration options related to source browsing
+# Configuration options related to source browsing
#---------------------------------------------------------------------------
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
SOURCE_BROWSER = NO
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
INLINE_SOURCES = NO
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
STRIP_CODE_COMMENTS = YES
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# entity all documented functions referencing it will be listed.
+# The default value is: NO.
REFERENCED_BY_RELATION = NO
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
REFERENCES_RELATION = NO
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code. Otherwise they will link to the documentation.
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
REFERENCES_LINK_SOURCE = NO
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see https://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
VERBATIM_HEADERS = NO
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse_libclang=ON option for CMake.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS =
+
+# If clang assisted parsing is enabled you can provide the clang parser with the
+# path to the compilation database (see:
+# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files
+# were built. This is equivalent to specifying the "-p" option to a clang tool,
+# such as clang-check. These options will then be passed to the parser.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse_libclang=ON option for CMake.
+
+CLANG_DATABASE_PATH =
+
#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
+# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
ALPHABETICAL_INDEX = NO
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX = WT_
#---------------------------------------------------------------------------
-# configuration options related to the HTML output
+# Configuration options related to the HTML output
#---------------------------------------------------------------------------
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
GENERATE_HTML = YES
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = .
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER = style/header.html
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER = style/footer.html
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
-# tag will in the future become obsolete.
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefor more
-# robust against future updates. Doxygen will copy the style sheet file to
-# the output directory.
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET = style/wiredtiger.css
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 34
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 81
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 96
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS = YES
+
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = YES
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: https://developer.apple.com/xcode/), introduced with OSX
+# 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
-# style string, e.g. com.mycompany.MyDocSet.documentation.
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 200
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
+# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE =
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax. However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from https://www.mathjax.org before deployment.
+# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/.
+# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = NO
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: https://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: https://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
+# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
GENERATE_LATEX = NO
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
+# invoked.
+#
+# Note that when not enabling USE_PDFLATEX the default is latex when enabling
+# USE_PDFLATEX the default is pdflatex and when in the later case latex is
+# chosen this is overwritten by pdflatex. For specific output languages the
+# default can have been set differently, this depends on the implementation of
+# the output language.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# Note: This tag is used in the Makefile / make.bat.
+# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
+# (.tex).
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
+# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
+# Note: This tag is used in the generated output file (.tex).
+# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
+# The default value is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_MAKEINDEX_CMD = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4wide
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = YES
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = YES
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
+# bibliography, e.g. plainnat, or ieeetr. See
+# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
+# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
+# path from which the emoji images will be read. If a relative path is entered,
+# it will be relative to the LATEX_OUTPUT directory. If left blank the
+# LATEX_OUTPUT directory will be used.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EMOJI_DIRECTORY =
+
#---------------------------------------------------------------------------
-# configuration options related to the RTF output
+# Configuration options related to the RTF output
#---------------------------------------------------------------------------
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
GENERATE_RTF = NO
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's configuration file. A template extensions file can be
+# generated using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
#---------------------------------------------------------------------------
-# configuration options related to the man page output
+# Configuration options related to the man page output
#---------------------------------------------------------------------------
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
-GENERATE_MAN = YES
+GENERATE_MAN = NO
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
-# configuration options related to the XML output
+# Configuration options related to the XML output
#---------------------------------------------------------------------------
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
GENERATE_XML = NO
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
+# namespace members in file scope as well, matching the HTML output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_NS_MEMB_FILE_SCOPE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
+# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# the structure of the code including all documentation. Note that this feature
+# is still experimental and incomplete at the moment.
+# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
+# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
GENERATE_PERLMOD = NO
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader. This is useful
-# if you want to understand what is going on. On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
@@ -1526,50 +2180,58 @@ PERLMOD_MAKEVAR_PREFIX =
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
ENABLE_PREPROCESSING = YES
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = YES
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = DOXYGEN \
__wt_collator:=WT_COLLATOR \
@@ -1596,280 +2258,377 @@ PREDEFINED = DOXYGEN \
WT_HANDLE_CLOSED(x):=x \
WT_HANDLE_NULLABLE(x):=x
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
-# Configuration::additions related to external references
+# Configuration options related to external references
#---------------------------------------------------------------------------
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
TAGFILES =
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
ALLEXTERNALS = NO
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
EXTERNAL_GROUPS = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
-PERL_PATH = /usr/bin/perl
+EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
CLASS_DIAGRAMS = NO
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
-MSCGEN_PATH =
+DIA_PATH =
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
HAVE_DOT = NO
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME =
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
MSCFILE_DIRS =
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
diff --git a/src/third_party/wiredtiger/src/docs/build-posix.dox b/src/third_party/wiredtiger/src/docs/build-posix.dox
index 7fd9cfbec14..4f04cc54dd3 100644
--- a/src/third_party/wiredtiger/src/docs/build-posix.dox
+++ b/src/third_party/wiredtiger/src/docs/build-posix.dox
@@ -1,4 +1,4 @@
-/*! @page build-posix Building and installing WiredTiger on POSIX
+/*! @page build-posix Building and installing WiredTiger on POSIX (Linux, *BSD, OS X):
To read instructions on using the legacy autoconf build system, see @subpage build-autoconf.
diff --git a/src/third_party/wiredtiger/src/docs/introduction.dox b/src/third_party/wiredtiger/src/docs/introduction.dox
index 5e9543cdad6..887814c57ff 100644
--- a/src/third_party/wiredtiger/src/docs/introduction.dox
+++ b/src/third_party/wiredtiger/src/docs/introduction.dox
@@ -1,25 +1,25 @@
/*! @mainpage Reference Guide
-WiredTiger is an high performance, scalable, production quality, NoSQL,
-Open Source extensible platform for data management.
+WiredTiger is a production quality, high performance, scalable, NoSQL, Open Source extensible
+platform for data management. WiredTiger is developed and maintained by MongoDB, Inc., where
+it is the principle database storage engine.
-WiredTiger supports row-oriented storage (where all columns of a row are
-stored together), column-oriented storage (where columns are stored in
-groups, allowing for more efficient access and storage of column
-subsets) and log-structured merge trees (LSM), for sustained throughput
+WiredTiger supports row-oriented storage (where all columns of a row are stored together),
+column-oriented storage (where columns are stored in groups, allowing for more efficient access
+and storage of column subsets) and log-structured merge trees (LSM), for sustained throughput
under random insert workloads.
-WiredTiger includes ACID transactions with standard isolation levels and
-durability at both checkpoint and fine-grained granularity.
+WiredTiger includes ACID transactions with standard isolation levels and durability at both
+checkpoint and fine-grained granularity.
-WiredTiger can be used as a simple key/value store, but also has a
-complete schema layer, including indices and projections.
+WiredTiger can be used as a simple key/value store, but also has a complete schema layer,
+including indices and projections.
-WiredTiger should be generally portable to any 64-bit system supporting
-the ANSI C99, POSIX 1003.1 and POSIX 1003.1c (threads extension) standards.
+WiredTiger should be generally portable to any 64-bit system supporting the ANSI C and POSIX
+1003.1 standards.
-For more information on the WiredTiger architecture and why it might be
-right for your project, see:
+For more information on the WiredTiger architecture and why it might be right for your project,
+see:
- @subpage overview
@@ -30,16 +30,9 @@ For more information about building and installing WiredTiger, see:
For more information about writing WiredTiger applications, see:
+- @ref wt "WiredTiger API reference manual"
- @subpage programming
-For more information about the architecture and internals of WiredTiger, see:
-
-- @subpage arch-index
-
-For more information on tools and techniques used by WiredTiger developers, see:
-
-- @subpage tool-index
-
For more information about administrating WiredTiger databases, see:
- @subpage command_line
@@ -50,15 +43,22 @@ For release change logs and upgrading information, see:
- @subpage md_changelog
- @subpage upgrading
-WiredTiger is Open Source; for more information, see:
+WiredTiger is distributed under Open Source licenses; for more information, see:
- @subpage license
-WiredTiger releases are tested on Linux, FreeBSD and OS X; for more
-information, see:
+WiredTiger releases are tested on a variety of systems; for more information on testing, see:
- @subpage testing
+For more information on tools and techniques used during WiredTiger development, see:
+
+- @subpage tool-index
+
+For more information about the architecture and internals of WiredTiger, see:
+
+- @subpage arch-index
+
To browse the WiredTiger source code repository or contact us, see:
- @subpage community
diff --git a/src/third_party/wiredtiger/src/docs/spell.ok b/src/third_party/wiredtiger/src/docs/spell.ok
index 381b0547ac9..69103d28d1c 100644
--- a/src/third_party/wiredtiger/src/docs/spell.ok
+++ b/src/third_party/wiredtiger/src/docs/spell.ok
@@ -1,9 +1,9 @@
personal_ws-1.1 en 200
ACM
APIs
+ASAN
ActiveState
Adler's
-ASAN
Atomicity
autoconf
BLOBs
@@ -14,18 +14,18 @@ cmake
CMake
COV
CPPFLAGS
+CPPSuite
CPUs
CRC
CSV
+CSuite
+CURSTD
CXX
Cheng
Christoph
Collet's
Coverity
Coverity's
-CPPSuite
-CSuite
-CURSTD
Cyclomatic
DB's
DBTs
@@ -59,6 +59,7 @@ FlexeLint
FreeBSD
FreeBSD's
GCC
+GPL
Gawlick
Gimpel
GitHub
@@ -75,7 +76,6 @@ JavaScript
KMS
LD
LDFLAGS
-LibFuzzer
LIBS
LINKFLAGS
LLC
@@ -91,14 +91,15 @@ LZ
Lameter
LevelDB
Levyx
+LibFuzzer
MERCHANTABILITY
+MSAN
MVCC's
Makefiles
Mewhort
MongoDB
MongoDB's
Multithreaded
-MSAN
NOTFOUND
NSEC
NUMA
@@ -294,8 +295,8 @@ fieldname
fileBackgroundColor
fileBorderColor
fileFontColor
-fileShadowing
fileID
+fileShadowing
fileformats
fileid
filename
@@ -386,6 +387,7 @@ llvm
lmin
ln
loadtext
+loc
logc
lookup
lookups
@@ -633,8 +635,8 @@ warmup
wget
whitespace
wiredtiger
-workgen
workQ
+workgen
writelock
writelocks
wrlock
@@ -654,4 +656,3 @@ yieldcpu
zlib
zseries
zstd
-loc
diff --git a/src/third_party/wiredtiger/src/docs/tools/doxfilter.py b/src/third_party/wiredtiger/src/docs/tools/doxfilter.py
index a84723be0ec..39c48d75011 100755
--- a/src/third_party/wiredtiger/src/docs/tools/doxfilter.py
+++ b/src/third_party/wiredtiger/src/docs/tools/doxfilter.py
@@ -79,6 +79,7 @@ def process_arch(source):
result += '@arch_page_table{' + \
data_structures_str + ',' + \
files_str + '}\n'
+ result += '@arch_page_caution\n'
else:
result += line + '\n'
return result
diff --git a/src/third_party/wiredtiger/src/include/block.h b/src/third_party/wiredtiger/src/include/block.h
index 2006be2f9f4..cd6cf3e662e 100644
--- a/src/third_party/wiredtiger/src/include/block.h
+++ b/src/third_party/wiredtiger/src/include/block.h
@@ -185,7 +185,6 @@ struct __wt_bm {
int (*compact_skip)(WT_BM *, WT_SESSION_IMPL *, bool *);
int (*compact_start)(WT_BM *, WT_SESSION_IMPL *);
int (*corrupt)(WT_BM *, WT_SESSION_IMPL *, const uint8_t *, size_t);
- int (*flush_tier)(WT_BM *, WT_SESSION_IMPL *, uint8_t **, size_t *);
int (*free)(WT_BM *, WT_SESSION_IMPL *, const uint8_t *, size_t);
bool (*is_mapped)(WT_BM *, WT_SESSION_IMPL *);
int (*map_discard)(WT_BM *, WT_SESSION_IMPL *, void *, size_t);
@@ -197,6 +196,7 @@ struct __wt_bm {
int (*salvage_valid)(WT_BM *, WT_SESSION_IMPL *, uint8_t *, size_t, bool);
int (*size)(WT_BM *, WT_SESSION_IMPL *, wt_off_t *);
int (*stat)(WT_BM *, WT_SESSION_IMPL *, WT_DSRC_STATS *stats);
+ int (*switch_object)(WT_BM *, WT_SESSION_IMPL *, uint32_t, uint32_t);
int (*sync)(WT_BM *, WT_SESSION_IMPL *, bool);
int (*verify_addr)(WT_BM *, WT_SESSION_IMPL *, const uint8_t *, size_t);
int (*verify_end)(WT_BM *, WT_SESSION_IMPL *);
@@ -221,8 +221,9 @@ struct __wt_bm {
* Block manager handle, references a single file.
*/
struct __wt_block {
- const char *name; /* Name */
- uint64_t name_hash; /* Hash of name */
+ const char *name; /* Name */
+ uint64_t name_hash; /* Hash of name */
+ WT_BLOCK_FILE_OPENER *opener; /* how to open files/objects */
/* A list of block manager handles, sharing a file descriptor. */
uint32_t ref; /* References */
@@ -239,7 +240,6 @@ struct __wt_block {
/* Configuration information, set when the file is opened. */
uint32_t allocfirst; /* Allocation is first-fit */
uint32_t allocsize; /* Allocation size */
- bool has_objects; /* Address cookies contain object id */
size_t os_cache; /* System buffer cache flush max */
size_t os_cache_max;
size_t os_cache_dirty_max;
@@ -247,8 +247,11 @@ struct __wt_block {
u_int block_header; /* Header length */
/* Object file tracking. */
- uint32_t file_flags, objectid, max_objectid;
- WT_FH **ofh;
+ bool has_objects; /* Address cookies contain object id */
+ uint32_t file_flags; /* Flags for opening objects */
+ uint32_t objectid; /* Current writeable object id */
+ uint32_t max_objectid; /* Size of object handle array */
+ WT_FH **ofh; /* Object file handles */
size_t ofh_alloc;
/*
@@ -316,6 +319,20 @@ struct __wt_block_desc {
#define WT_BLOCK_DESC_SIZE 16
/*
+ * WT_BLOCK_FILE_OPENER --
+ * An open callback for the block manager. This hides details about how to access the
+ * different objects that make up a tiered file.
+ */
+struct __wt_block_file_opener {
+ /* An id to be used with the open call to reference the current object. */
+#define WT_TIERED_CURRENT_ID 0xFFFFFFFFUL
+ int (*open)(
+ WT_BLOCK_FILE_OPENER *, WT_SESSION_IMPL *, uint32_t, WT_FS_OPEN_FILE_TYPE, u_int, WT_FH **);
+ uint32_t (*current_object_id)(WT_BLOCK_FILE_OPENER *);
+ void *cookie; /* Used in open call */
+};
+
+/*
* __wt_block_desc_byteswap --
* Handle big- and little-endian transformation of a description block.
*/
diff --git a/src/third_party/wiredtiger/src/include/connection.h b/src/third_party/wiredtiger/src/include/connection.h
index b13ab2b911e..a087086b200 100644
--- a/src/third_party/wiredtiger/src/include/connection.h
+++ b/src/third_party/wiredtiger/src/include/connection.h
@@ -225,6 +225,7 @@ struct __wt_connection_impl {
WT_SPINLOCK api_lock; /* Connection API spinlock */
WT_SPINLOCK checkpoint_lock; /* Checkpoint spinlock */
WT_SPINLOCK fh_lock; /* File handle queue spinlock */
+ WT_SPINLOCK flush_tier_lock; /* Flush tier spinlock */
WT_SPINLOCK metadata_lock; /* Metadata update spinlock */
WT_SPINLOCK reconfig_lock; /* Single thread reconfigure */
WT_SPINLOCK schema_lock; /* Schema operation spinlock */
@@ -420,8 +421,10 @@ struct __wt_connection_impl {
WT_SESSION_IMPL *tiered_session; /* Tiered thread session */
wt_thread_t tiered_tid; /* Tiered thread */
bool tiered_tid_set; /* Tiered thread set */
+ WT_CONDVAR *flush_cond; /* Flush wait mutex */
WT_CONDVAR *tiered_cond; /* Tiered wait mutex */
bool tiered_server_running; /* Internal tiered server operating */
+ uint32_t flush_state; /* State of last flush tier */
WT_TIERED_MANAGER tiered_mgr; /* Tiered manager thread information */
WT_SESSION_IMPL *tiered_mgr_session; /* Tiered manager thread session */
diff --git a/src/third_party/wiredtiger/src/include/dhandle.h b/src/third_party/wiredtiger/src/include/dhandle.h
index 01cfeffce6e..c17970f4760 100644
--- a/src/third_party/wiredtiger/src/include/dhandle.h
+++ b/src/third_party/wiredtiger/src/include/dhandle.h
@@ -94,8 +94,8 @@ struct __wt_data_handle {
WT_DHANDLE_TYPE_TIERED_TREE
} type;
- /* This will include the tiered type soon. */
-#define WT_DHANDLE_BTREE(dhandle) ((dhandle)->type == WT_DHANDLE_TYPE_BTREE)
+#define WT_DHANDLE_BTREE(dhandle) \
+ ((dhandle)->type == WT_DHANDLE_TYPE_BTREE || (dhandle)->type == WT_DHANDLE_TYPE_TIERED)
bool compact_skip; /* If the handle failed to compact */
diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h
index e89dd2c943e..9d6499f31a6 100644
--- a/src/third_party/wiredtiger/src/include/extern.h
+++ b/src/third_party/wiredtiger/src/include/extern.h
@@ -169,8 +169,8 @@ extern int __wt_block_manager_drop(WT_SESSION_IMPL *session, const char *filenam
extern int __wt_block_manager_named_size(WT_SESSION_IMPL *session, const char *name,
wt_off_t *sizep) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_manager_open(WT_SESSION_IMPL *session, const char *filename,
- const char *cfg[], bool forced_salvage, bool readonly, uint32_t allocsize, WT_BM **bmp)
- WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+ WT_BLOCK_FILE_OPENER *opener, const char *cfg[], bool forced_salvage, bool readonly,
+ uint32_t allocsize, WT_BM **bmp) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_manager_size(WT_BM *bm, WT_SESSION_IMPL *session, wt_off_t *sizep)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_map(WT_SESSION_IMPL *session, WT_BLOCK *block, void *mapped_regionp,
@@ -182,9 +182,9 @@ extern int __wt_block_off_free(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32
wt_off_t offset, wt_off_t size) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_off_remove_overlap(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_EXTLIST *el,
wt_off_t off, wt_off_t size) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_block_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[],
- bool forced_salvage, bool readonly, uint32_t allocsize, WT_BLOCK **blockp)
- WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern int __wt_block_open(WT_SESSION_IMPL *session, const char *filename,
+ WT_BLOCK_FILE_OPENER *opener, const char *cfg[], bool forced_salvage, bool readonly,
+ uint32_t allocsize, WT_BLOCK **blockp) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_read_off(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_ITEM *buf,
uint32_t objectid, wt_off_t offset, uint32_t size, uint32_t checksum)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
@@ -200,12 +200,12 @@ extern int __wt_block_salvage_valid(WT_SESSION_IMPL *session, WT_BLOCK *block, u
size_t addr_size, bool valid) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_size_alloc(WT_SESSION_IMPL *session, WT_SIZE **szp)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_block_tiered_flush(WT_SESSION_IMPL *session, WT_BLOCK *block,
- uint8_t **flush_cookie, size_t *cookie_size) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern int __wt_block_switch_object(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t object_id,
+ uint32_t flags) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern int __wt_block_tiered_fh(WT_SESSION_IMPL *session, WT_BLOCK *block, uint32_t object_id,
+ WT_FH **fhp) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_tiered_load(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_BLOCK_CKPT *ci)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_block_tiered_newfile(WT_SESSION_IMPL *session, WT_BLOCK *block)
- WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_truncate(WT_SESSION_IMPL *session, WT_BLOCK *block, wt_off_t len)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_block_unmap(WT_SESSION_IMPL *session, WT_BLOCK *block, void *mapped_region,
@@ -304,6 +304,8 @@ extern int __wt_btree_open(WT_SESSION_IMPL *session, const char *op_cfg[])
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_btree_stat_init(WT_SESSION_IMPL *session, WT_CURSOR_STAT *cst)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern int __wt_btree_switch_object(WT_SESSION_IMPL *session, uint32_t object_id, uint32_t flags)
+ WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_btree_tree_open(WT_SESSION_IMPL *session, const uint8_t *addr, size_t addr_size)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_buf_catfmt(WT_SESSION_IMPL *session, WT_ITEM *buf, const char *fmt, ...)
@@ -1454,7 +1456,7 @@ extern int __wt_thread_group_resize(WT_SESSION_IMPL *session, WT_THREAD_GROUP *g
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_tier_do_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered, const char *local_uri,
const char *obj_uri) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_tier_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+extern int __wt_tier_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_tiered_bucket_config(WT_SESSION_IMPL *session, const char *cfg[],
WT_BUCKET_STORAGE **bstoragep) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
@@ -1462,13 +1464,16 @@ extern int __wt_tiered_close(WT_SESSION_IMPL *session, WT_TIERED *tiered)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_tiered_conn_config(WT_SESSION_IMPL *session, const char **cfg, bool reconfig)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_tiered_name(WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle, uint64_t id,
+extern int __wt_tiered_name(WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle, uint32_t id,
uint32_t flags, const char **retp) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_tiered_open(WT_SESSION_IMPL *session, const char *cfg[])
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_tiered_put_drop_local(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+extern int __wt_tiered_opener(WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle,
+ WT_BLOCK_FILE_OPENER **openerp, const char **filenamep)
+ WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern int __wt_tiered_put_drop_local(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_tiered_put_drop_shared(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+extern int __wt_tiered_put_drop_shared(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_tiered_put_flush(WT_SESSION_IMPL *session, WT_TIERED *tiered)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
@@ -1825,6 +1830,7 @@ extern void __wt_tiered_get_flush(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT
extern void __wt_tiered_pop_work(
WT_SESSION_IMPL *session, uint32_t type, uint64_t maxval, WT_TIERED_WORK_UNIT **entryp);
extern void __wt_tiered_push_work(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT *entry);
+extern void __wt_tiered_work_free(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT *entry);
extern void __wt_timestamp_to_hex_string(wt_timestamp_t ts, char *hex_timestamp);
extern void __wt_txn_bump_snapshot(WT_SESSION_IMPL *session);
extern void __wt_txn_clear_durable_timestamp(WT_SESSION_IMPL *session);
diff --git a/src/third_party/wiredtiger/src/include/stat.h b/src/third_party/wiredtiger/src/include/stat.h
index 3f4bd5150e3..5b14e4bc80e 100644
--- a/src/third_party/wiredtiger/src/include/stat.h
+++ b/src/third_party/wiredtiger/src/include/stat.h
@@ -586,6 +586,8 @@ struct __wt_connection_stats {
int64_t rec_time_window_prepared;
int64_t rec_split_stashed_bytes;
int64_t rec_split_stashed_objects;
+ int64_t flush_state_races;
+ int64_t flush_tier_busy;
int64_t flush_tier;
int64_t session_open;
int64_t session_query_ts;
diff --git a/src/third_party/wiredtiger/src/include/tiered.h b/src/third_party/wiredtiger/src/include/tiered.h
index 8fa2fec35a7..12ffc5f7b49 100644
--- a/src/third_party/wiredtiger/src/include/tiered.h
+++ b/src/third_party/wiredtiger/src/include/tiered.h
@@ -46,6 +46,18 @@ struct __wt_tiered_manager {
#define WT_TIERED_NAME_SHARED 0x8u
/* AUTOMATIC FLAG VALUE GENERATION STOP */
+/* Flush tier flags */
+/* AUTOMATIC FLAG VALUE GENERATION START */
+#define WT_FLUSH_TIER_FORCE 0x1u
+#define WT_FLUSH_TIER_OFF 0x2u
+#define WT_FLUSH_TIER_ON 0x4u
+/* AUTOMATIC FLAG VALUE GENERATION STOP */
+
+/*
+ * The flush state is a simple counter we manipulate atomically.
+ */
+#define WT_FLUSH_STATE_DONE(state) ((state) == 0)
+
/*
* Different types of work units for tiered trees.
*/
@@ -64,7 +76,7 @@ struct __wt_tiered_work_unit {
uint32_t type; /* Type of operation */
uint64_t op_val; /* A value for the operation */
WT_TIERED *tiered; /* Tiered tree */
- uint64_t id; /* Id of the object */
+ uint32_t id; /* Id of the object */
/* AUTOMATIC FLAG VALUE GENERATION START */
#define WT_TIERED_WORK_FORCE 0x1u /* Force operation */
#define WT_TIERED_WORK_FREE 0x2u /* Free data after operation */
@@ -111,8 +123,10 @@ struct __wt_tiered {
WT_TIERED_TIERS tiers[WT_TIERED_MAX_TIERS]; /* Tiers array */
- uint64_t current_id; /* Current object id number */
- uint64_t next_id; /* Next object number */
+ WT_BLOCK_FILE_OPENER opener;
+
+ uint32_t current_id; /* Current object id number */
+ uint32_t next_id; /* Next object number */
WT_COLLATOR *collator; /* TODO: handle custom collation */
/* TODO: What about compression, encryption, etc? Do we need to worry about that here? */
@@ -123,6 +137,7 @@ struct __wt_tiered {
uint32_t flags;
};
+/* FIXME: Currently the WT_TIERED_OBJECT data structure is not used. */
/*
* WT_TIERED_OBJECT --
* Definition of a tiered object. This is a single object in a tiered tree.
@@ -147,6 +162,7 @@ struct __wt_tiered_object {
uint32_t flags;
};
+/* FIXME: Currently the WT_TIERED_TREE data structure is not used. */
/*
* WT_TIERED_TREE --
* Definition of the shared tiered portion of a tree.
diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in
index d0a4c0ad38e..75ac38e56d6 100644
--- a/src/third_party/wiredtiger/src/include/wiredtiger.in
+++ b/src/third_party/wiredtiger/src/include/wiredtiger.in
@@ -808,6 +808,14 @@ struct __wt_session {
* timestamp. The supplied value must not be older than the current oldest timestamp and it
* must not be newer than the stable timestamp., a string; default empty.}
* @config{force, force sharing of all data., a boolean flag; default \c false.}
+ * @config{lock_wait, wait for locks\, if \c lock_wait=false\, fail if any required locks
+ * are not available immediately., a boolean flag; default \c true.}
+ * @config{sync, wait for all objects to be flushed to the shared storage to the level
+ * specified. The \c off setting does not wait for any objects to be written to the tiered
+ * storage system but returns immediately after generating the objects and work units for an
+ * internal thread. The \c on setting causes the caller to wait until all work queued for
+ * this call to be completely processed before returning., a string\, chosen from the
+ * following options: \c "off"\, \c "on"; default \c on.}
* @configend
* @errors
*/
@@ -4821,11 +4829,11 @@ struct __wt_storage_source {
*/
int (*ss_flush)(WT_STORAGE_SOURCE *storage_source, WT_SESSION *session,
WT_FILE_SYSTEM *file_system, const char *source, const char *object,
- const char *config);
+ const char *config);
/*!
* After a flush, rename the source file from the default file system to be cached in
- * the shared object storage.
+ * the shared object storage.
*
* @errors
*
@@ -5565,541 +5573,545 @@ extern int wiredtiger_extension_terminate(WT_CONNECTION *connection);
#define WT_STAT_CONN_REC_SPLIT_STASHED_BYTES 1271
/*! reconciliation: split objects currently awaiting free */
#define WT_STAT_CONN_REC_SPLIT_STASHED_OBJECTS 1272
+/*! session: flush state races */
+#define WT_STAT_CONN_FLUSH_STATE_RACES 1273
+/*! session: flush_tier busy retries */
+#define WT_STAT_CONN_FLUSH_TIER_BUSY 1274
/*! session: flush_tier operation calls */
-#define WT_STAT_CONN_FLUSH_TIER 1273
+#define WT_STAT_CONN_FLUSH_TIER 1275
/*! session: open session count */
-#define WT_STAT_CONN_SESSION_OPEN 1274
+#define WT_STAT_CONN_SESSION_OPEN 1276
/*! session: session query timestamp calls */
-#define WT_STAT_CONN_SESSION_QUERY_TS 1275
+#define WT_STAT_CONN_SESSION_QUERY_TS 1277
/*! session: table alter failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_ALTER_FAIL 1276
+#define WT_STAT_CONN_SESSION_TABLE_ALTER_FAIL 1278
/*! session: table alter successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_ALTER_SUCCESS 1277
+#define WT_STAT_CONN_SESSION_TABLE_ALTER_SUCCESS 1279
/*! session: table alter unchanged and skipped */
-#define WT_STAT_CONN_SESSION_TABLE_ALTER_SKIP 1278
+#define WT_STAT_CONN_SESSION_TABLE_ALTER_SKIP 1280
/*! session: table compact failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_COMPACT_FAIL 1279
+#define WT_STAT_CONN_SESSION_TABLE_COMPACT_FAIL 1281
/*! session: table compact successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_COMPACT_SUCCESS 1280
+#define WT_STAT_CONN_SESSION_TABLE_COMPACT_SUCCESS 1282
/*! session: table create failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_CREATE_FAIL 1281
+#define WT_STAT_CONN_SESSION_TABLE_CREATE_FAIL 1283
/*! session: table create successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_CREATE_SUCCESS 1282
+#define WT_STAT_CONN_SESSION_TABLE_CREATE_SUCCESS 1284
/*! session: table drop failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_DROP_FAIL 1283
+#define WT_STAT_CONN_SESSION_TABLE_DROP_FAIL 1285
/*! session: table drop successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_DROP_SUCCESS 1284
+#define WT_STAT_CONN_SESSION_TABLE_DROP_SUCCESS 1286
/*! session: table rename failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_RENAME_FAIL 1285
+#define WT_STAT_CONN_SESSION_TABLE_RENAME_FAIL 1287
/*! session: table rename successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_RENAME_SUCCESS 1286
+#define WT_STAT_CONN_SESSION_TABLE_RENAME_SUCCESS 1288
/*! session: table salvage failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_SALVAGE_FAIL 1287
+#define WT_STAT_CONN_SESSION_TABLE_SALVAGE_FAIL 1289
/*! session: table salvage successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_SALVAGE_SUCCESS 1288
+#define WT_STAT_CONN_SESSION_TABLE_SALVAGE_SUCCESS 1290
/*! session: table truncate failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_TRUNCATE_FAIL 1289
+#define WT_STAT_CONN_SESSION_TABLE_TRUNCATE_FAIL 1291
/*! session: table truncate successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_TRUNCATE_SUCCESS 1290
+#define WT_STAT_CONN_SESSION_TABLE_TRUNCATE_SUCCESS 1292
/*! session: table verify failed calls */
-#define WT_STAT_CONN_SESSION_TABLE_VERIFY_FAIL 1291
+#define WT_STAT_CONN_SESSION_TABLE_VERIFY_FAIL 1293
/*! session: table verify successful calls */
-#define WT_STAT_CONN_SESSION_TABLE_VERIFY_SUCCESS 1292
+#define WT_STAT_CONN_SESSION_TABLE_VERIFY_SUCCESS 1294
/*! thread-state: active filesystem fsync calls */
-#define WT_STAT_CONN_THREAD_FSYNC_ACTIVE 1293
+#define WT_STAT_CONN_THREAD_FSYNC_ACTIVE 1295
/*! thread-state: active filesystem read calls */
-#define WT_STAT_CONN_THREAD_READ_ACTIVE 1294
+#define WT_STAT_CONN_THREAD_READ_ACTIVE 1296
/*! thread-state: active filesystem write calls */
-#define WT_STAT_CONN_THREAD_WRITE_ACTIVE 1295
+#define WT_STAT_CONN_THREAD_WRITE_ACTIVE 1297
/*! thread-yield: application thread time evicting (usecs) */
-#define WT_STAT_CONN_APPLICATION_EVICT_TIME 1296
+#define WT_STAT_CONN_APPLICATION_EVICT_TIME 1298
/*! thread-yield: application thread time waiting for cache (usecs) */
-#define WT_STAT_CONN_APPLICATION_CACHE_TIME 1297
+#define WT_STAT_CONN_APPLICATION_CACHE_TIME 1299
/*!
* thread-yield: connection close blocked waiting for transaction state
* stabilization
*/
-#define WT_STAT_CONN_TXN_RELEASE_BLOCKED 1298
+#define WT_STAT_CONN_TXN_RELEASE_BLOCKED 1300
/*! thread-yield: connection close yielded for lsm manager shutdown */
-#define WT_STAT_CONN_CONN_CLOSE_BLOCKED_LSM 1299
+#define WT_STAT_CONN_CONN_CLOSE_BLOCKED_LSM 1301
/*! thread-yield: data handle lock yielded */
-#define WT_STAT_CONN_DHANDLE_LOCK_BLOCKED 1300
+#define WT_STAT_CONN_DHANDLE_LOCK_BLOCKED 1302
/*!
* thread-yield: get reference for page index and slot time sleeping
* (usecs)
*/
-#define WT_STAT_CONN_PAGE_INDEX_SLOT_REF_BLOCKED 1301
+#define WT_STAT_CONN_PAGE_INDEX_SLOT_REF_BLOCKED 1303
/*! thread-yield: log server sync yielded for log write */
-#define WT_STAT_CONN_LOG_SERVER_SYNC_BLOCKED 1302
+#define WT_STAT_CONN_LOG_SERVER_SYNC_BLOCKED 1304
/*! thread-yield: page access yielded due to prepare state change */
-#define WT_STAT_CONN_PREPARED_TRANSITION_BLOCKED_PAGE 1303
+#define WT_STAT_CONN_PREPARED_TRANSITION_BLOCKED_PAGE 1305
/*! thread-yield: page acquire busy blocked */
-#define WT_STAT_CONN_PAGE_BUSY_BLOCKED 1304
+#define WT_STAT_CONN_PAGE_BUSY_BLOCKED 1306
/*! thread-yield: page acquire eviction blocked */
-#define WT_STAT_CONN_PAGE_FORCIBLE_EVICT_BLOCKED 1305
+#define WT_STAT_CONN_PAGE_FORCIBLE_EVICT_BLOCKED 1307
/*! thread-yield: page acquire locked blocked */
-#define WT_STAT_CONN_PAGE_LOCKED_BLOCKED 1306
+#define WT_STAT_CONN_PAGE_LOCKED_BLOCKED 1308
/*! thread-yield: page acquire read blocked */
-#define WT_STAT_CONN_PAGE_READ_BLOCKED 1307
+#define WT_STAT_CONN_PAGE_READ_BLOCKED 1309
/*! thread-yield: page acquire time sleeping (usecs) */
-#define WT_STAT_CONN_PAGE_SLEEP 1308
+#define WT_STAT_CONN_PAGE_SLEEP 1310
/*!
* thread-yield: page delete rollback time sleeping for state change
* (usecs)
*/
-#define WT_STAT_CONN_PAGE_DEL_ROLLBACK_BLOCKED 1309
+#define WT_STAT_CONN_PAGE_DEL_ROLLBACK_BLOCKED 1311
/*! thread-yield: page reconciliation yielded due to child modification */
-#define WT_STAT_CONN_CHILD_MODIFY_BLOCKED_PAGE 1310
+#define WT_STAT_CONN_CHILD_MODIFY_BLOCKED_PAGE 1312
/*! transaction: Number of prepared updates */
-#define WT_STAT_CONN_TXN_PREPARED_UPDATES 1311
+#define WT_STAT_CONN_TXN_PREPARED_UPDATES 1313
/*! transaction: Number of prepared updates committed */
-#define WT_STAT_CONN_TXN_PREPARED_UPDATES_COMMITTED 1312
+#define WT_STAT_CONN_TXN_PREPARED_UPDATES_COMMITTED 1314
/*! transaction: Number of prepared updates repeated on the same key */
-#define WT_STAT_CONN_TXN_PREPARED_UPDATES_KEY_REPEATED 1313
+#define WT_STAT_CONN_TXN_PREPARED_UPDATES_KEY_REPEATED 1315
/*! transaction: Number of prepared updates rolled back */
-#define WT_STAT_CONN_TXN_PREPARED_UPDATES_ROLLEDBACK 1314
+#define WT_STAT_CONN_TXN_PREPARED_UPDATES_ROLLEDBACK 1316
/*! transaction: prepared transactions */
-#define WT_STAT_CONN_TXN_PREPARE 1315
+#define WT_STAT_CONN_TXN_PREPARE 1317
/*! transaction: prepared transactions committed */
-#define WT_STAT_CONN_TXN_PREPARE_COMMIT 1316
+#define WT_STAT_CONN_TXN_PREPARE_COMMIT 1318
/*! transaction: prepared transactions currently active */
-#define WT_STAT_CONN_TXN_PREPARE_ACTIVE 1317
+#define WT_STAT_CONN_TXN_PREPARE_ACTIVE 1319
/*! transaction: prepared transactions rolled back */
-#define WT_STAT_CONN_TXN_PREPARE_ROLLBACK 1318
+#define WT_STAT_CONN_TXN_PREPARE_ROLLBACK 1320
/*! transaction: query timestamp calls */
-#define WT_STAT_CONN_TXN_QUERY_TS 1319
+#define WT_STAT_CONN_TXN_QUERY_TS 1321
/*! transaction: rollback to stable calls */
-#define WT_STAT_CONN_TXN_RTS 1320
+#define WT_STAT_CONN_TXN_RTS 1322
/*! transaction: rollback to stable pages visited */
-#define WT_STAT_CONN_TXN_RTS_PAGES_VISITED 1321
+#define WT_STAT_CONN_TXN_RTS_PAGES_VISITED 1323
/*! transaction: rollback to stable tree walk skipping pages */
-#define WT_STAT_CONN_TXN_RTS_TREE_WALK_SKIP_PAGES 1322
+#define WT_STAT_CONN_TXN_RTS_TREE_WALK_SKIP_PAGES 1324
/*! transaction: rollback to stable updates aborted */
-#define WT_STAT_CONN_TXN_RTS_UPD_ABORTED 1323
+#define WT_STAT_CONN_TXN_RTS_UPD_ABORTED 1325
/*! transaction: sessions scanned in each walk of concurrent sessions */
-#define WT_STAT_CONN_TXN_SESSIONS_WALKED 1324
+#define WT_STAT_CONN_TXN_SESSIONS_WALKED 1326
/*! transaction: set timestamp calls */
-#define WT_STAT_CONN_TXN_SET_TS 1325
+#define WT_STAT_CONN_TXN_SET_TS 1327
/*! transaction: set timestamp durable calls */
-#define WT_STAT_CONN_TXN_SET_TS_DURABLE 1326
+#define WT_STAT_CONN_TXN_SET_TS_DURABLE 1328
/*! transaction: set timestamp durable updates */
-#define WT_STAT_CONN_TXN_SET_TS_DURABLE_UPD 1327
+#define WT_STAT_CONN_TXN_SET_TS_DURABLE_UPD 1329
/*! transaction: set timestamp oldest calls */
-#define WT_STAT_CONN_TXN_SET_TS_OLDEST 1328
+#define WT_STAT_CONN_TXN_SET_TS_OLDEST 1330
/*! transaction: set timestamp oldest updates */
-#define WT_STAT_CONN_TXN_SET_TS_OLDEST_UPD 1329
+#define WT_STAT_CONN_TXN_SET_TS_OLDEST_UPD 1331
/*! transaction: set timestamp stable calls */
-#define WT_STAT_CONN_TXN_SET_TS_STABLE 1330
+#define WT_STAT_CONN_TXN_SET_TS_STABLE 1332
/*! transaction: set timestamp stable updates */
-#define WT_STAT_CONN_TXN_SET_TS_STABLE_UPD 1331
+#define WT_STAT_CONN_TXN_SET_TS_STABLE_UPD 1333
/*! transaction: transaction begins */
-#define WT_STAT_CONN_TXN_BEGIN 1332
+#define WT_STAT_CONN_TXN_BEGIN 1334
/*! transaction: transaction checkpoint currently running */
-#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING 1333
+#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING 1335
/*!
* transaction: transaction checkpoint currently running for history
* store file
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING_HS 1334
+#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING_HS 1336
/*! transaction: transaction checkpoint generation */
-#define WT_STAT_CONN_TXN_CHECKPOINT_GENERATION 1335
+#define WT_STAT_CONN_TXN_CHECKPOINT_GENERATION 1337
/*!
* transaction: transaction checkpoint history store file duration
* (usecs)
*/
-#define WT_STAT_CONN_TXN_HS_CKPT_DURATION 1336
+#define WT_STAT_CONN_TXN_HS_CKPT_DURATION 1338
/*! transaction: transaction checkpoint max time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MAX 1337
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MAX 1339
/*! transaction: transaction checkpoint min time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MIN 1338
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MIN 1340
/*!
* transaction: transaction checkpoint most recent duration for gathering
* all handles (usecs)
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION 1339
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION 1341
/*!
* transaction: transaction checkpoint most recent duration for gathering
* applied handles (usecs)
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION_APPLY 1340
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION_APPLY 1342
/*!
* transaction: transaction checkpoint most recent duration for gathering
* skipped handles (usecs)
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION_SKIP 1341
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_DURATION_SKIP 1343
/*! transaction: transaction checkpoint most recent handles applied */
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_APPLIED 1342
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_APPLIED 1344
/*! transaction: transaction checkpoint most recent handles skipped */
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_SKIPPED 1343
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_SKIPPED 1345
/*! transaction: transaction checkpoint most recent handles walked */
-#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_WALKED 1344
+#define WT_STAT_CONN_TXN_CHECKPOINT_HANDLE_WALKED 1346
/*! transaction: transaction checkpoint most recent time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_RECENT 1345
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_RECENT 1347
/*! transaction: transaction checkpoint prepare currently running */
-#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_RUNNING 1346
+#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_RUNNING 1348
/*! transaction: transaction checkpoint prepare max time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_MAX 1347
+#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_MAX 1349
/*! transaction: transaction checkpoint prepare min time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_MIN 1348
+#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_MIN 1350
/*! transaction: transaction checkpoint prepare most recent time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_RECENT 1349
+#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_RECENT 1351
/*! transaction: transaction checkpoint prepare total time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_TOTAL 1350
+#define WT_STAT_CONN_TXN_CHECKPOINT_PREP_TOTAL 1352
/*! transaction: transaction checkpoint scrub dirty target */
-#define WT_STAT_CONN_TXN_CHECKPOINT_SCRUB_TARGET 1351
+#define WT_STAT_CONN_TXN_CHECKPOINT_SCRUB_TARGET 1353
/*! transaction: transaction checkpoint scrub time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_SCRUB_TIME 1352
+#define WT_STAT_CONN_TXN_CHECKPOINT_SCRUB_TIME 1354
/*! transaction: transaction checkpoint total time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_TOTAL 1353
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_TOTAL 1355
/*! transaction: transaction checkpoints */
-#define WT_STAT_CONN_TXN_CHECKPOINT 1354
+#define WT_STAT_CONN_TXN_CHECKPOINT 1356
/*!
* transaction: transaction checkpoints skipped because database was
* clean
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_SKIPPED 1355
+#define WT_STAT_CONN_TXN_CHECKPOINT_SKIPPED 1357
/*! transaction: transaction failures due to history store */
-#define WT_STAT_CONN_TXN_FAIL_CACHE 1356
+#define WT_STAT_CONN_TXN_FAIL_CACHE 1358
/*!
* transaction: transaction fsync calls for checkpoint after allocating
* the transaction ID
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_FSYNC_POST 1357
+#define WT_STAT_CONN_TXN_CHECKPOINT_FSYNC_POST 1359
/*!
* transaction: transaction fsync duration for checkpoint after
* allocating the transaction ID (usecs)
*/
-#define WT_STAT_CONN_TXN_CHECKPOINT_FSYNC_POST_DURATION 1358
+#define WT_STAT_CONN_TXN_CHECKPOINT_FSYNC_POST_DURATION 1360
/*! transaction: transaction range of IDs currently pinned */
-#define WT_STAT_CONN_TXN_PINNED_RANGE 1359
+#define WT_STAT_CONN_TXN_PINNED_RANGE 1361
/*! transaction: transaction range of IDs currently pinned by a checkpoint */
-#define WT_STAT_CONN_TXN_PINNED_CHECKPOINT_RANGE 1360
+#define WT_STAT_CONN_TXN_PINNED_CHECKPOINT_RANGE 1362
/*! transaction: transaction range of timestamps currently pinned */
-#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP 1361
+#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP 1363
/*! transaction: transaction range of timestamps pinned by a checkpoint */
-#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_CHECKPOINT 1362
+#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_CHECKPOINT 1364
/*!
* transaction: transaction range of timestamps pinned by the oldest
* active read timestamp
*/
-#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_READER 1363
+#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_READER 1365
/*!
* transaction: transaction range of timestamps pinned by the oldest
* timestamp
*/
-#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_OLDEST 1364
+#define WT_STAT_CONN_TXN_PINNED_TIMESTAMP_OLDEST 1366
/*! transaction: transaction read timestamp of the oldest active reader */
-#define WT_STAT_CONN_TXN_TIMESTAMP_OLDEST_ACTIVE_READ 1365
+#define WT_STAT_CONN_TXN_TIMESTAMP_OLDEST_ACTIVE_READ 1367
/*! transaction: transaction rollback to stable currently running */
-#define WT_STAT_CONN_TXN_ROLLBACK_TO_STABLE_RUNNING 1366
+#define WT_STAT_CONN_TXN_ROLLBACK_TO_STABLE_RUNNING 1368
/*! transaction: transaction sync calls */
-#define WT_STAT_CONN_TXN_SYNC 1367
+#define WT_STAT_CONN_TXN_SYNC 1369
/*! transaction: transaction walk of concurrent sessions */
-#define WT_STAT_CONN_TXN_WALK_SESSIONS 1368
+#define WT_STAT_CONN_TXN_WALK_SESSIONS 1370
/*! transaction: transactions committed */
-#define WT_STAT_CONN_TXN_COMMIT 1369
+#define WT_STAT_CONN_TXN_COMMIT 1371
/*! transaction: transactions rolled back */
-#define WT_STAT_CONN_TXN_ROLLBACK 1370
+#define WT_STAT_CONN_TXN_ROLLBACK 1372
/*! LSM: sleep for LSM checkpoint throttle */
-#define WT_STAT_CONN_LSM_CHECKPOINT_THROTTLE 1371
+#define WT_STAT_CONN_LSM_CHECKPOINT_THROTTLE 1373
/*! LSM: sleep for LSM merge throttle */
-#define WT_STAT_CONN_LSM_MERGE_THROTTLE 1372
+#define WT_STAT_CONN_LSM_MERGE_THROTTLE 1374
/*! cache: bytes currently in the cache */
-#define WT_STAT_CONN_CACHE_BYTES_INUSE 1373
+#define WT_STAT_CONN_CACHE_BYTES_INUSE 1375
/*! cache: bytes dirty in the cache cumulative */
-#define WT_STAT_CONN_CACHE_BYTES_DIRTY_TOTAL 1374
+#define WT_STAT_CONN_CACHE_BYTES_DIRTY_TOTAL 1376
/*! cache: bytes read into cache */
-#define WT_STAT_CONN_CACHE_BYTES_READ 1375
+#define WT_STAT_CONN_CACHE_BYTES_READ 1377
/*! cache: bytes written from cache */
-#define WT_STAT_CONN_CACHE_BYTES_WRITE 1376
+#define WT_STAT_CONN_CACHE_BYTES_WRITE 1378
/*! cache: checkpoint blocked page eviction */
-#define WT_STAT_CONN_CACHE_EVICTION_CHECKPOINT 1377
+#define WT_STAT_CONN_CACHE_EVICTION_CHECKPOINT 1379
/*!
* cache: checkpoint of history store file blocked non-history store page
* eviction
*/
-#define WT_STAT_CONN_CACHE_EVICTION_BLOCKED_CHECKPOINT_HS 1378
+#define WT_STAT_CONN_CACHE_EVICTION_BLOCKED_CHECKPOINT_HS 1380
/*! cache: eviction walk target pages histogram - 0-9 */
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT10 1379
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT10 1381
/*! cache: eviction walk target pages histogram - 10-31 */
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT32 1380
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT32 1382
/*! cache: eviction walk target pages histogram - 128 and higher */
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_GE128 1381
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_GE128 1383
/*! cache: eviction walk target pages histogram - 32-63 */
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT64 1382
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT64 1384
/*! cache: eviction walk target pages histogram - 64-128 */
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT128 1383
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_LT128 1385
/*!
* cache: eviction walk target pages reduced due to history store cache
* pressure
*/
-#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_REDUCED 1384
+#define WT_STAT_CONN_CACHE_EVICTION_TARGET_PAGE_REDUCED 1386
/*! cache: eviction walks abandoned */
-#define WT_STAT_CONN_CACHE_EVICTION_WALKS_ABANDONED 1385
+#define WT_STAT_CONN_CACHE_EVICTION_WALKS_ABANDONED 1387
/*! cache: eviction walks gave up because they restarted their walk twice */
-#define WT_STAT_CONN_CACHE_EVICTION_WALKS_STOPPED 1386
+#define WT_STAT_CONN_CACHE_EVICTION_WALKS_STOPPED 1388
/*!
* cache: eviction walks gave up because they saw too many pages and
* found no candidates
*/
-#define WT_STAT_CONN_CACHE_EVICTION_WALKS_GAVE_UP_NO_TARGETS 1387
+#define WT_STAT_CONN_CACHE_EVICTION_WALKS_GAVE_UP_NO_TARGETS 1389
/*!
* cache: eviction walks gave up because they saw too many pages and
* found too few candidates
*/
-#define WT_STAT_CONN_CACHE_EVICTION_WALKS_GAVE_UP_RATIO 1388
+#define WT_STAT_CONN_CACHE_EVICTION_WALKS_GAVE_UP_RATIO 1390
/*! cache: eviction walks reached end of tree */
-#define WT_STAT_CONN_CACHE_EVICTION_WALKS_ENDED 1389
+#define WT_STAT_CONN_CACHE_EVICTION_WALKS_ENDED 1391
/*! cache: eviction walks restarted */
-#define WT_STAT_CONN_CACHE_EVICTION_WALK_RESTART 1390
+#define WT_STAT_CONN_CACHE_EVICTION_WALK_RESTART 1392
/*! cache: eviction walks started from root of tree */
-#define WT_STAT_CONN_CACHE_EVICTION_WALK_FROM_ROOT 1391
+#define WT_STAT_CONN_CACHE_EVICTION_WALK_FROM_ROOT 1393
/*! cache: eviction walks started from saved location in tree */
-#define WT_STAT_CONN_CACHE_EVICTION_WALK_SAVED_POS 1392
+#define WT_STAT_CONN_CACHE_EVICTION_WALK_SAVED_POS 1394
/*! cache: hazard pointer blocked page eviction */
-#define WT_STAT_CONN_CACHE_EVICTION_HAZARD 1393
+#define WT_STAT_CONN_CACHE_EVICTION_HAZARD 1395
/*! cache: history store table insert calls */
-#define WT_STAT_CONN_CACHE_HS_INSERT 1394
+#define WT_STAT_CONN_CACHE_HS_INSERT 1396
/*! cache: history store table insert calls that returned restart */
-#define WT_STAT_CONN_CACHE_HS_INSERT_RESTART 1395
+#define WT_STAT_CONN_CACHE_HS_INSERT_RESTART 1397
/*!
* cache: history store table out-of-order resolved updates that lose
* their durable timestamp
*/
-#define WT_STAT_CONN_CACHE_HS_ORDER_LOSE_DURABLE_TIMESTAMP 1396
+#define WT_STAT_CONN_CACHE_HS_ORDER_LOSE_DURABLE_TIMESTAMP 1398
/*!
* cache: history store table out-of-order updates that were fixed up by
* reinserting with the fixed timestamp
*/
-#define WT_STAT_CONN_CACHE_HS_ORDER_REINSERT 1397
+#define WT_STAT_CONN_CACHE_HS_ORDER_REINSERT 1399
/*! cache: history store table reads */
-#define WT_STAT_CONN_CACHE_HS_READ 1398
+#define WT_STAT_CONN_CACHE_HS_READ 1400
/*! cache: history store table reads missed */
-#define WT_STAT_CONN_CACHE_HS_READ_MISS 1399
+#define WT_STAT_CONN_CACHE_HS_READ_MISS 1401
/*! cache: history store table reads requiring squashed modifies */
-#define WT_STAT_CONN_CACHE_HS_READ_SQUASH 1400
+#define WT_STAT_CONN_CACHE_HS_READ_SQUASH 1402
/*!
* cache: history store table truncation by rollback to stable to remove
* an unstable update
*/
-#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_RTS_UNSTABLE 1401
+#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_RTS_UNSTABLE 1403
/*!
* cache: history store table truncation by rollback to stable to remove
* an update
*/
-#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_RTS 1402
+#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_RTS 1404
/*! cache: history store table truncation to remove an update */
-#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE 1403
+#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE 1405
/*!
* cache: history store table truncation to remove range of updates due
* to key being removed from the data page during reconciliation
*/
-#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_ONPAGE_REMOVAL 1404
+#define WT_STAT_CONN_CACHE_HS_KEY_TRUNCATE_ONPAGE_REMOVAL 1406
/*!
* cache: history store table truncation to remove range of updates due
* to out-of-order timestamp update on data page
*/
-#define WT_STAT_CONN_CACHE_HS_ORDER_REMOVE 1405
+#define WT_STAT_CONN_CACHE_HS_ORDER_REMOVE 1407
/*! cache: history store table writes requiring squashed modifies */
-#define WT_STAT_CONN_CACHE_HS_WRITE_SQUASH 1406
+#define WT_STAT_CONN_CACHE_HS_WRITE_SQUASH 1408
/*! cache: in-memory page passed criteria to be split */
-#define WT_STAT_CONN_CACHE_INMEM_SPLITTABLE 1407
+#define WT_STAT_CONN_CACHE_INMEM_SPLITTABLE 1409
/*! cache: in-memory page splits */
-#define WT_STAT_CONN_CACHE_INMEM_SPLIT 1408
+#define WT_STAT_CONN_CACHE_INMEM_SPLIT 1410
/*! cache: internal pages evicted */
-#define WT_STAT_CONN_CACHE_EVICTION_INTERNAL 1409
+#define WT_STAT_CONN_CACHE_EVICTION_INTERNAL 1411
/*! cache: internal pages split during eviction */
-#define WT_STAT_CONN_CACHE_EVICTION_SPLIT_INTERNAL 1410
+#define WT_STAT_CONN_CACHE_EVICTION_SPLIT_INTERNAL 1412
/*! cache: leaf pages split during eviction */
-#define WT_STAT_CONN_CACHE_EVICTION_SPLIT_LEAF 1411
+#define WT_STAT_CONN_CACHE_EVICTION_SPLIT_LEAF 1413
/*! cache: modified pages evicted */
-#define WT_STAT_CONN_CACHE_EVICTION_DIRTY 1412
+#define WT_STAT_CONN_CACHE_EVICTION_DIRTY 1414
/*! cache: overflow pages read into cache */
-#define WT_STAT_CONN_CACHE_READ_OVERFLOW 1413
+#define WT_STAT_CONN_CACHE_READ_OVERFLOW 1415
/*! cache: page split during eviction deepened the tree */
-#define WT_STAT_CONN_CACHE_EVICTION_DEEPEN 1414
+#define WT_STAT_CONN_CACHE_EVICTION_DEEPEN 1416
/*! cache: page written requiring history store records */
-#define WT_STAT_CONN_CACHE_WRITE_HS 1415
+#define WT_STAT_CONN_CACHE_WRITE_HS 1417
/*! cache: pages read into cache */
-#define WT_STAT_CONN_CACHE_READ 1416
+#define WT_STAT_CONN_CACHE_READ 1418
/*! cache: pages read into cache after truncate */
-#define WT_STAT_CONN_CACHE_READ_DELETED 1417
+#define WT_STAT_CONN_CACHE_READ_DELETED 1419
/*! cache: pages read into cache after truncate in prepare state */
-#define WT_STAT_CONN_CACHE_READ_DELETED_PREPARED 1418
+#define WT_STAT_CONN_CACHE_READ_DELETED_PREPARED 1420
/*! cache: pages requested from the cache */
-#define WT_STAT_CONN_CACHE_PAGES_REQUESTED 1419
+#define WT_STAT_CONN_CACHE_PAGES_REQUESTED 1421
/*! cache: pages seen by eviction walk */
-#define WT_STAT_CONN_CACHE_EVICTION_PAGES_SEEN 1420
+#define WT_STAT_CONN_CACHE_EVICTION_PAGES_SEEN 1422
/*! cache: pages written from cache */
-#define WT_STAT_CONN_CACHE_WRITE 1421
+#define WT_STAT_CONN_CACHE_WRITE 1423
/*! cache: pages written requiring in-memory restoration */
-#define WT_STAT_CONN_CACHE_WRITE_RESTORE 1422
+#define WT_STAT_CONN_CACHE_WRITE_RESTORE 1424
/*! cache: tracked dirty bytes in the cache */
-#define WT_STAT_CONN_CACHE_BYTES_DIRTY 1423
+#define WT_STAT_CONN_CACHE_BYTES_DIRTY 1425
/*! cache: unmodified pages evicted */
-#define WT_STAT_CONN_CACHE_EVICTION_CLEAN 1424
+#define WT_STAT_CONN_CACHE_EVICTION_CLEAN 1426
/*! checkpoint-cleanup: pages added for eviction */
-#define WT_STAT_CONN_CC_PAGES_EVICT 1425
+#define WT_STAT_CONN_CC_PAGES_EVICT 1427
/*! checkpoint-cleanup: pages removed */
-#define WT_STAT_CONN_CC_PAGES_REMOVED 1426
+#define WT_STAT_CONN_CC_PAGES_REMOVED 1428
/*! checkpoint-cleanup: pages skipped during tree walk */
-#define WT_STAT_CONN_CC_PAGES_WALK_SKIPPED 1427
+#define WT_STAT_CONN_CC_PAGES_WALK_SKIPPED 1429
/*! checkpoint-cleanup: pages visited */
-#define WT_STAT_CONN_CC_PAGES_VISITED 1428
+#define WT_STAT_CONN_CC_PAGES_VISITED 1430
/*! cursor: Total number of entries skipped by cursor next calls */
-#define WT_STAT_CONN_CURSOR_NEXT_SKIP_TOTAL 1429
+#define WT_STAT_CONN_CURSOR_NEXT_SKIP_TOTAL 1431
/*! cursor: Total number of entries skipped by cursor prev calls */
-#define WT_STAT_CONN_CURSOR_PREV_SKIP_TOTAL 1430
+#define WT_STAT_CONN_CURSOR_PREV_SKIP_TOTAL 1432
/*!
* cursor: Total number of entries skipped to position the history store
* cursor
*/
-#define WT_STAT_CONN_CURSOR_SKIP_HS_CUR_POSITION 1431
+#define WT_STAT_CONN_CURSOR_SKIP_HS_CUR_POSITION 1433
/*!
* cursor: Total number of times a search near has exited due to prefix
* config
*/
-#define WT_STAT_CONN_CURSOR_SEARCH_NEAR_PREFIX_FAST_PATHS 1432
+#define WT_STAT_CONN_CURSOR_SEARCH_NEAR_PREFIX_FAST_PATHS 1434
/*!
* cursor: cursor next calls that skip due to a globally visible history
* store tombstone
*/
-#define WT_STAT_CONN_CURSOR_NEXT_HS_TOMBSTONE 1433
+#define WT_STAT_CONN_CURSOR_NEXT_HS_TOMBSTONE 1435
/*!
* cursor: cursor next calls that skip greater than or equal to 100
* entries
*/
-#define WT_STAT_CONN_CURSOR_NEXT_SKIP_GE_100 1434
+#define WT_STAT_CONN_CURSOR_NEXT_SKIP_GE_100 1436
/*! cursor: cursor next calls that skip less than 100 entries */
-#define WT_STAT_CONN_CURSOR_NEXT_SKIP_LT_100 1435
+#define WT_STAT_CONN_CURSOR_NEXT_SKIP_LT_100 1437
/*!
* cursor: cursor prev calls that skip due to a globally visible history
* store tombstone
*/
-#define WT_STAT_CONN_CURSOR_PREV_HS_TOMBSTONE 1436
+#define WT_STAT_CONN_CURSOR_PREV_HS_TOMBSTONE 1438
/*!
* cursor: cursor prev calls that skip greater than or equal to 100
* entries
*/
-#define WT_STAT_CONN_CURSOR_PREV_SKIP_GE_100 1437
+#define WT_STAT_CONN_CURSOR_PREV_SKIP_GE_100 1439
/*! cursor: cursor prev calls that skip less than 100 entries */
-#define WT_STAT_CONN_CURSOR_PREV_SKIP_LT_100 1438
+#define WT_STAT_CONN_CURSOR_PREV_SKIP_LT_100 1440
/*! cursor: open cursor count */
-#define WT_STAT_CONN_CURSOR_OPEN_COUNT 1439
+#define WT_STAT_CONN_CURSOR_OPEN_COUNT 1441
/*! reconciliation: approximate byte size of timestamps in pages written */
-#define WT_STAT_CONN_REC_TIME_WINDOW_BYTES_TS 1440
+#define WT_STAT_CONN_REC_TIME_WINDOW_BYTES_TS 1442
/*!
* reconciliation: approximate byte size of transaction IDs in pages
* written
*/
-#define WT_STAT_CONN_REC_TIME_WINDOW_BYTES_TXN 1441
+#define WT_STAT_CONN_REC_TIME_WINDOW_BYTES_TXN 1443
/*! reconciliation: fast-path pages deleted */
-#define WT_STAT_CONN_REC_PAGE_DELETE_FAST 1442
+#define WT_STAT_CONN_REC_PAGE_DELETE_FAST 1444
/*! reconciliation: page reconciliation calls */
-#define WT_STAT_CONN_REC_PAGES 1443
+#define WT_STAT_CONN_REC_PAGES 1445
/*! reconciliation: page reconciliation calls for eviction */
-#define WT_STAT_CONN_REC_PAGES_EVICTION 1444
+#define WT_STAT_CONN_REC_PAGES_EVICTION 1446
/*! reconciliation: pages deleted */
-#define WT_STAT_CONN_REC_PAGE_DELETE 1445
+#define WT_STAT_CONN_REC_PAGE_DELETE 1447
/*!
* reconciliation: pages written including an aggregated newest start
* durable timestamp
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_START_DURABLE_TS 1446
+#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_START_DURABLE_TS 1448
/*!
* reconciliation: pages written including an aggregated newest stop
* durable timestamp
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_DURABLE_TS 1447
+#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_DURABLE_TS 1449
/*!
* reconciliation: pages written including an aggregated newest stop
* timestamp
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_TS 1448
+#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_TS 1450
/*!
* reconciliation: pages written including an aggregated newest stop
* transaction ID
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_TXN 1449
+#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_STOP_TXN 1451
/*!
* reconciliation: pages written including an aggregated newest
* transaction ID
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_TXN 1450
+#define WT_STAT_CONN_REC_TIME_AGGR_NEWEST_TXN 1452
/*!
* reconciliation: pages written including an aggregated oldest start
* timestamp
*/
-#define WT_STAT_CONN_REC_TIME_AGGR_OLDEST_START_TS 1451
+#define WT_STAT_CONN_REC_TIME_AGGR_OLDEST_START_TS 1453
/*! reconciliation: pages written including an aggregated prepare */
-#define WT_STAT_CONN_REC_TIME_AGGR_PREPARED 1452
+#define WT_STAT_CONN_REC_TIME_AGGR_PREPARED 1454
/*!
* reconciliation: pages written including at least one start durable
* timestamp
*/
-#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_DURABLE_START_TS 1453
+#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_DURABLE_START_TS 1455
/*!
* reconciliation: pages written including at least one start transaction
* ID
*/
-#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_START_TXN 1454
+#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_START_TXN 1456
/*!
* reconciliation: pages written including at least one stop durable
* timestamp
*/
-#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_DURABLE_STOP_TS 1455
+#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_DURABLE_STOP_TS 1457
/*! reconciliation: pages written including at least one stop timestamp */
-#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_STOP_TS 1456
+#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_STOP_TS 1458
/*!
* reconciliation: pages written including at least one stop transaction
* ID
*/
-#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_STOP_TXN 1457
+#define WT_STAT_CONN_REC_TIME_WINDOW_PAGES_STOP_TXN 1459
/*! reconciliation: records written including a start durable timestamp */
-#define WT_STAT_CONN_REC_TIME_WINDOW_DURABLE_START_TS 1458
+#define WT_STAT_CONN_REC_TIME_WINDOW_DURABLE_START_TS 1460
/*! reconciliation: records written including a start timestamp */
-#define WT_STAT_CONN_REC_TIME_WINDOW_START_TS 1459
+#define WT_STAT_CONN_REC_TIME_WINDOW_START_TS 1461
/*! reconciliation: records written including a start transaction ID */
-#define WT_STAT_CONN_REC_TIME_WINDOW_START_TXN 1460
+#define WT_STAT_CONN_REC_TIME_WINDOW_START_TXN 1462
/*! reconciliation: records written including a stop durable timestamp */
-#define WT_STAT_CONN_REC_TIME_WINDOW_DURABLE_STOP_TS 1461
+#define WT_STAT_CONN_REC_TIME_WINDOW_DURABLE_STOP_TS 1463
/*! reconciliation: records written including a stop timestamp */
-#define WT_STAT_CONN_REC_TIME_WINDOW_STOP_TS 1462
+#define WT_STAT_CONN_REC_TIME_WINDOW_STOP_TS 1464
/*! reconciliation: records written including a stop transaction ID */
-#define WT_STAT_CONN_REC_TIME_WINDOW_STOP_TXN 1463
+#define WT_STAT_CONN_REC_TIME_WINDOW_STOP_TXN 1465
/*! session: tiered operations dequeued and processed */
-#define WT_STAT_CONN_TIERED_WORK_UNITS_DEQUEUED 1464
+#define WT_STAT_CONN_TIERED_WORK_UNITS_DEQUEUED 1466
/*! session: tiered operations scheduled */
-#define WT_STAT_CONN_TIERED_WORK_UNITS_CREATED 1465
+#define WT_STAT_CONN_TIERED_WORK_UNITS_CREATED 1467
/*! session: tiered storage local retention time (secs) */
-#define WT_STAT_CONN_TIERED_RETENTION 1466
+#define WT_STAT_CONN_TIERED_RETENTION 1468
/*! session: tiered storage object size */
-#define WT_STAT_CONN_TIERED_OBJECT_SIZE 1467
+#define WT_STAT_CONN_TIERED_OBJECT_SIZE 1469
/*! transaction: race to read prepared update retry */
-#define WT_STAT_CONN_TXN_READ_RACE_PREPARE_UPDATE 1468
+#define WT_STAT_CONN_TXN_READ_RACE_PREPARE_UPDATE 1470
/*!
* transaction: rollback to stable history store records with stop
* timestamps older than newer records
*/
-#define WT_STAT_CONN_TXN_RTS_HS_STOP_OLDER_THAN_NEWER_START 1469
+#define WT_STAT_CONN_TXN_RTS_HS_STOP_OLDER_THAN_NEWER_START 1471
/*! transaction: rollback to stable inconsistent checkpoint */
-#define WT_STAT_CONN_TXN_RTS_INCONSISTENT_CKPT 1470
+#define WT_STAT_CONN_TXN_RTS_INCONSISTENT_CKPT 1472
/*! transaction: rollback to stable keys removed */
-#define WT_STAT_CONN_TXN_RTS_KEYS_REMOVED 1471
+#define WT_STAT_CONN_TXN_RTS_KEYS_REMOVED 1473
/*! transaction: rollback to stable keys restored */
-#define WT_STAT_CONN_TXN_RTS_KEYS_RESTORED 1472
+#define WT_STAT_CONN_TXN_RTS_KEYS_RESTORED 1474
/*! transaction: rollback to stable restored tombstones from history store */
-#define WT_STAT_CONN_TXN_RTS_HS_RESTORE_TOMBSTONES 1473
+#define WT_STAT_CONN_TXN_RTS_HS_RESTORE_TOMBSTONES 1475
/*! transaction: rollback to stable restored updates from history store */
-#define WT_STAT_CONN_TXN_RTS_HS_RESTORE_UPDATES 1474
+#define WT_STAT_CONN_TXN_RTS_HS_RESTORE_UPDATES 1476
/*! transaction: rollback to stable sweeping history store keys */
-#define WT_STAT_CONN_TXN_RTS_SWEEP_HS_KEYS 1475
+#define WT_STAT_CONN_TXN_RTS_SWEEP_HS_KEYS 1477
/*! transaction: rollback to stable updates removed from history store */
-#define WT_STAT_CONN_TXN_RTS_HS_REMOVED 1476
+#define WT_STAT_CONN_TXN_RTS_HS_REMOVED 1478
/*! transaction: transaction checkpoints due to obsolete pages */
-#define WT_STAT_CONN_TXN_CHECKPOINT_OBSOLETE_APPLIED 1477
+#define WT_STAT_CONN_TXN_CHECKPOINT_OBSOLETE_APPLIED 1479
/*! transaction: update conflicts */
-#define WT_STAT_CONN_TXN_UPDATE_CONFLICT 1478
+#define WT_STAT_CONN_TXN_UPDATE_CONFLICT 1480
/*!
* @}
diff --git a/src/third_party/wiredtiger/src/include/wt_internal.h b/src/third_party/wiredtiger/src/include/wt_internal.h
index 4f896a73525..4487554a7f3 100644
--- a/src/third_party/wiredtiger/src/include/wt_internal.h
+++ b/src/third_party/wiredtiger/src/include/wt_internal.h
@@ -77,6 +77,8 @@ struct __wt_block_ckpt;
typedef struct __wt_block_ckpt WT_BLOCK_CKPT;
struct __wt_block_desc;
typedef struct __wt_block_desc WT_BLOCK_DESC;
+struct __wt_block_file_opener;
+typedef struct __wt_block_file_opener WT_BLOCK_FILE_OPENER;
struct __wt_block_header;
typedef struct __wt_block_header WT_BLOCK_HEADER;
struct __wt_block_mods;
diff --git a/src/third_party/wiredtiger/src/support/stat.c b/src/third_party/wiredtiger/src/support/stat.c
index 647d87bf9fb..912e00945f5 100644
--- a/src/third_party/wiredtiger/src/support/stat.c
+++ b/src/third_party/wiredtiger/src/support/stat.c
@@ -1249,6 +1249,8 @@ static const char *const __stats_connection_desc[] = {
"reconciliation: records written including a prepare state",
"reconciliation: split bytes currently awaiting free",
"reconciliation: split objects currently awaiting free",
+ "session: flush state races",
+ "session: flush_tier busy retries",
"session: flush_tier operation calls",
"session: open session count",
"session: session query timestamp calls",
@@ -1773,6 +1775,8 @@ __wt_stat_connection_clear_single(WT_CONNECTION_STATS *stats)
stats->rec_time_window_prepared = 0;
/* not clearing rec_split_stashed_bytes */
/* not clearing rec_split_stashed_objects */
+ stats->flush_state_races = 0;
+ stats->flush_tier_busy = 0;
stats->flush_tier = 0;
/* not clearing session_open */
stats->session_query_ts = 0;
@@ -2289,6 +2293,8 @@ __wt_stat_connection_aggregate(WT_CONNECTION_STATS **from, WT_CONNECTION_STATS *
to->rec_time_window_prepared += WT_STAT_READ(from, rec_time_window_prepared);
to->rec_split_stashed_bytes += WT_STAT_READ(from, rec_split_stashed_bytes);
to->rec_split_stashed_objects += WT_STAT_READ(from, rec_split_stashed_objects);
+ to->flush_state_races += WT_STAT_READ(from, flush_state_races);
+ to->flush_tier_busy += WT_STAT_READ(from, flush_tier_busy);
to->flush_tier += WT_STAT_READ(from, flush_tier);
to->session_open += WT_STAT_READ(from, session_open);
to->session_query_ts += WT_STAT_READ(from, session_query_ts);
diff --git a/src/third_party/wiredtiger/src/tiered/tiered_handle.c b/src/third_party/wiredtiger/src/tiered/tiered_handle.c
index 11ecd0a4941..007f8f5de39 100644
--- a/src/third_party/wiredtiger/src/tiered/tiered_handle.c
+++ b/src/third_party/wiredtiger/src/tiered/tiered_handle.c
@@ -97,6 +97,7 @@ __tiered_create_local(WT_SESSION_IMPL *session, WT_TIERED *tiered)
__wt_verbose(session, WT_VERB_TIERED, "TIER_CREATE_LOCAL: LOCAL: %s", name);
cfg[0] = WT_CONFIG_BASE(session, object_meta);
cfg[1] = tiered->obj_config;
+ cfg[2] = "tiered_object=true,readonly=true";
__wt_verbose(session, WT_VERB_TIERED, "TIER_CREATE_LOCAL: obj_config: %s : %s", name, cfg[1]);
WT_ASSERT(session, tiered->obj_config != NULL);
WT_ERR(__wt_config_merge(session, cfg, NULL, (const char **)&config));
@@ -113,11 +114,15 @@ __tiered_create_local(WT_SESSION_IMPL *session, WT_TIERED *tiered)
this_tier->name = name;
F_SET(this_tier, WT_TIERS_OP_READ | WT_TIERS_OP_WRITE);
- if (0) {
+ WT_WITH_DHANDLE(
+ session, &tiered->iface, ret = __wt_btree_switch_object(session, tiered->current_id, 0));
+ WT_ERR(ret);
+
err:
+ if (ret != 0)
/* Only free name on error. */
__wt_free(session, name);
- }
+
__wt_free(session, config);
return (ret);
}
@@ -270,7 +275,7 @@ __tiered_update_metadata(WT_SESSION_IMPL *session, WT_TIERED *tiered, const char
newconfig = NULL;
WT_RET(__wt_scr_alloc(session, 0, &tmp));
- WT_RET(__wt_buf_fmt(session, tmp, "last=%" PRIu64 ",tiers=(\"", tiered->current_id));
+ WT_RET(__wt_buf_fmt(session, tmp, "last=%" PRIu32 ",tiers=(", tiered->current_id));
for (i = 0; i < WT_TIERED_MAX_TIERS; ++i) {
if (tiered->tiers[i].name == NULL) {
__wt_verbose(session, WT_VERB_TIERED, "TIER_UPDATE_META: names[%" PRIu32 "] NULL", i);
@@ -278,7 +283,7 @@ __tiered_update_metadata(WT_SESSION_IMPL *session, WT_TIERED *tiered, const char
}
__wt_verbose(session, WT_VERB_TIERED, "TIER_UPDATE_META: names[%" PRIu32 "]: %s", i,
tiered->tiers[i].name);
- WT_RET(__wt_buf_catfmt(session, tmp, "%s%s\"", i == 0 ? "" : ",", tiered->tiers[i].name));
+ WT_RET(__wt_buf_catfmt(session, tmp, "%s\"%s\"", i == 0 ? "" : ",", tiered->tiers[i].name));
}
WT_RET(__wt_buf_catfmt(session, tmp, ")"));
@@ -399,7 +404,7 @@ __wt_tiered_switch(WT_SESSION_IMPL *session, const char *config)
*/
int
__wt_tiered_name(
- WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle, uint64_t id, uint32_t flags, const char **retp)
+ WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle, uint32_t id, uint32_t flags, const char **retp)
{
WT_DECL_ITEM(tmp);
WT_DECL_RET;
@@ -424,12 +429,12 @@ __wt_tiered_name(
if (LF_ISSET(WT_TIERED_NAME_PREFIX))
WT_ERR(__wt_buf_fmt(session, tmp, "file:%s-", name));
else
- WT_ERR(__wt_buf_fmt(session, tmp, "file:%s-%010" PRIu64 ".wtobj", name, id));
+ WT_ERR(__wt_buf_fmt(session, tmp, "file:%s-%010" PRIu32 ".wtobj", name, id));
} else if (LF_ISSET(WT_TIERED_NAME_OBJECT)) {
if (LF_ISSET(WT_TIERED_NAME_PREFIX))
WT_ERR(__wt_buf_fmt(session, tmp, "object:%s-", name));
else
- WT_ERR(__wt_buf_fmt(session, tmp, "object:%s-%010" PRIu64 ".wtobj", name, id));
+ WT_ERR(__wt_buf_fmt(session, tmp, "object:%s-%010" PRIu32 ".wtobj", name, id));
} else {
WT_ASSERT(session, !LF_ISSET(WT_TIERED_NAME_PREFIX));
WT_ASSERT(session, LF_ISSET(WT_TIERED_NAME_SHARED));
@@ -450,7 +455,7 @@ static int
__tiered_open(WT_SESSION_IMPL *session, const char *cfg[])
{
WT_CONFIG_ITEM cval, tierconf;
- WT_DATA_HANDLE *dhandle;
+ WT_DATA_HANDLE *dhandle, *file_dhandle;
WT_DECL_ITEM(tmp);
WT_DECL_RET;
WT_TIERED *tiered;
@@ -459,13 +464,15 @@ __tiered_open(WT_SESSION_IMPL *session, const char *cfg[])
uint32_t unused;
#endif
char *metaconf;
+ const char *newconfig;
const char *obj_cfg[] = {WT_CONFIG_BASE(session, object_meta), NULL, NULL};
+ const char *new_tiered_cfg[] = {NULL, NULL, NULL, NULL};
const char **tiered_cfg, *config;
dhandle = session->dhandle;
tiered = (WT_TIERED *)dhandle;
tiered_cfg = dhandle->cfg;
- config = NULL;
+ config = newconfig = NULL;
metaconf = NULL;
WT_RET(__wt_scr_alloc(session, 0, &tmp));
@@ -497,7 +504,7 @@ __tiered_open(WT_SESSION_IMPL *session, const char *cfg[])
WT_ERR(__wt_strndup(session, cval.str, cval.len, &tiered->value_format));
WT_ERR(__wt_config_getones(session, config, "last", &cval));
- tiered->current_id = (uint64_t)cval.val;
+ tiered->current_id = (uint32_t)cval.val;
tiered->next_id = tiered->current_id + 1;
__wt_verbose(session, WT_VERB_TIERED, "TIERED_OPEN: current %d, next %d",
(int)tiered->current_id, (int)tiered->next_id);
@@ -512,15 +519,31 @@ __tiered_open(WT_SESSION_IMPL *session, const char *cfg[])
__wt_verbose(
session, WT_VERB_TIERED, "TIERED_OPEN: create %s config %s", dhandle->name, config);
WT_ERR(__wt_tiered_switch(session, config));
+ file_dhandle = tiered->tiers[WT_TIERED_INDEX_LOCAL].tier;
+ WT_ASSERT(session, file_dhandle != dhandle && file_dhandle->type == WT_DHANDLE_TYPE_BTREE);
- /* XXX brute force, need to figure out functions to use to do this properly. */
- /* We need to update the dhandle config entry to reflect the new tiers metadata. */
- WT_ERR(__wt_metadata_search(session, dhandle->name, &metaconf));
+ /*
+ * XXX brute force, need to figure out functions to use to do this properly.
+ *
+ * We are updating the tiered dhandle config entry to reflect the new tiers metadata. The
+ * tiered dhandle must look almost exactly like the local file dhandle. The difference is
+ * that the local file dhandle is marked as readonly and also tagged as a tiered object.
+ * We'll turn those off before putting it into tiered dhandle.
+ */
+ WT_ERR(__wt_metadata_search(session, file_dhandle->name, &metaconf));
__wt_verbose(session, WT_VERB_TIERED, "TIERED_OPEN: after switch meta conf %s %s",
dhandle->name, metaconf);
+ new_tiered_cfg[0] = metaconf;
+ new_tiered_cfg[1] = "tiered_object=false,readonly=false";
+ WT_ERR(__wt_config_merge(session, new_tiered_cfg, NULL, &newconfig));
__wt_free(session, dhandle->cfg[1]);
- dhandle->cfg[1] = metaconf;
+ dhandle->cfg[1] = newconfig;
+ WT_ERR(__wt_config_merge(session, dhandle->cfg, NULL, &newconfig));
+ WT_ERR(__wt_metadata_update(session, dhandle->name, newconfig));
}
+ WT_ERR(__wt_btree_open(session, tiered_cfg));
+ WT_ERR(__wt_btree_switch_object(session, tiered->current_id, 0));
+
#if 1
if (0) {
/* Temp code to keep s_all happy. */
diff --git a/src/third_party/wiredtiger/src/tiered/tiered_opener.c b/src/third_party/wiredtiger/src/tiered/tiered_opener.c
new file mode 100644
index 00000000000..0e28bcdcbcb
--- /dev/null
+++ b/src/third_party/wiredtiger/src/tiered/tiered_opener.c
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2014-present MongoDB, Inc.
+ * Copyright (c) 2008-2014 WiredTiger, Inc.
+ * All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ */
+
+#include "wt_internal.h"
+
+/*
+ * __tiered_opener_open --
+ * Open an object by number.
+ */
+static int
+__tiered_opener_open(WT_BLOCK_FILE_OPENER *opener, WT_SESSION_IMPL *session, uint32_t object_id,
+ WT_FS_OPEN_FILE_TYPE type, u_int flags, WT_FH **fhp)
+{
+ WT_BUCKET_STORAGE *bstorage;
+ WT_DECL_RET;
+ WT_TIERED *tiered;
+ const char *object_name, *object_uri;
+
+ tiered = opener->cookie;
+ object_uri = NULL;
+
+ WT_ASSERT(session,
+ (object_id > 0 && object_id <= tiered->current_id) || object_id == WT_TIERED_CURRENT_ID);
+ /*
+ * FIXME-WT-7590 we will need some kind of locking while we're looking at the tiered structure.
+ * This can be called at any time, because we are opening the objects lazily.
+ */
+ if (object_id == tiered->current_id || object_id == WT_TIERED_CURRENT_ID) {
+ bstorage = NULL;
+ object_name = tiered->tiers[WT_TIERED_INDEX_LOCAL].name;
+ if (!WT_PREFIX_SKIP(object_name, "file:"))
+ WT_RET_MSG(session, EINVAL, "expected a 'file:' URI");
+ } else {
+ WT_ERR(
+ __wt_tiered_name(session, &tiered->iface, object_id, WT_TIERED_NAME_OBJECT, &object_uri));
+ object_name = object_uri;
+ WT_PREFIX_SKIP_REQUIRED(session, object_name, "object:");
+ bstorage = tiered->bstorage;
+ }
+ WT_WITH_BUCKET_STORAGE(
+ bstorage, session, { ret = __wt_open(session, object_name, type, flags, fhp); });
+err:
+ __wt_free(session, object_uri);
+ return (ret);
+}
+
+/*
+ * __tiered_opener_current_id --
+ * Get the current writeable object id.
+ */
+static uint32_t
+__tiered_opener_current_id(WT_BLOCK_FILE_OPENER *opener)
+{
+ WT_TIERED *tiered;
+
+ tiered = opener->cookie;
+
+ /*
+ * FIXME-WT-7590 we will need some kind of locking while we're looking at the tiered structure.
+ * This can be called at any time, because we are opening the objects lazily.
+ */
+ return (tiered->current_id);
+}
+
+/*
+ * __wt_tiered_opener --
+ * Set up an opener for a tiered handle.
+ */
+int
+__wt_tiered_opener(WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle,
+ WT_BLOCK_FILE_OPENER **openerp, const char **filenamep)
+{
+ WT_TIERED *tiered;
+ const char *filename;
+
+ filename = dhandle->name;
+ *openerp = NULL;
+
+ if (dhandle->type == WT_DHANDLE_TYPE_BTREE) {
+ if (!WT_PREFIX_SKIP(filename, "file:"))
+ WT_RET_MSG(session, EINVAL, "expected a 'file:' URI");
+ *filenamep = filename;
+ } else if (dhandle->type == WT_DHANDLE_TYPE_TIERED) {
+ tiered = (WT_TIERED *)dhandle;
+ tiered->opener.open = __tiered_opener_open;
+ tiered->opener.current_object_id = __tiered_opener_current_id;
+ tiered->opener.cookie = tiered;
+ *openerp = &tiered->opener;
+ *filenamep = dhandle->name;
+ } else
+ WT_RET_MSG(session, EINVAL, "invalid URI: %s", dhandle->name);
+
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/src/tiered/tiered_work.c b/src/third_party/wiredtiger/src/tiered/tiered_work.c
index 728a7a0b3b2..8fa2634fcd2 100644
--- a/src/third_party/wiredtiger/src/tiered/tiered_work.c
+++ b/src/third_party/wiredtiger/src/tiered/tiered_work.c
@@ -9,6 +9,32 @@
#include "wt_internal.h"
/*
+ * __wt_tiered_work_free --
+ * Free a work unit and account for it in the flush state.
+ */
+void
+__wt_tiered_work_free(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT *entry)
+{
+ WT_CONNECTION_IMPL *conn;
+ uint32_t new_state, old_state;
+
+ conn = S2C(session);
+ for (;;) {
+ WT_BARRIER();
+ old_state = conn->flush_state;
+ new_state = old_state - 1;
+ if (__wt_atomic_casv32(&conn->flush_state, old_state, new_state))
+ break;
+ WT_STAT_CONN_INCR(session, flush_state_races);
+ __wt_yield();
+ }
+ /* If all work is done signal any waiting thread waiting for sync. */
+ if (WT_FLUSH_STATE_DONE(conn->flush_state))
+ __wt_cond_signal(session, conn->flush_cond);
+ __wt_free(session, entry);
+}
+
+/*
* __wt_tiered_push_work --
* Push a work unit to the queue. Assumes it is passed an already filled out structure.
*/
@@ -16,12 +42,23 @@ void
__wt_tiered_push_work(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT *entry)
{
WT_CONNECTION_IMPL *conn;
+ uint32_t new_state, old_state;
conn = S2C(session);
+
__wt_spin_lock(session, &conn->tiered_lock);
TAILQ_INSERT_TAIL(&conn->tieredqh, entry, q);
WT_STAT_CONN_INCR(session, tiered_work_units_created);
__wt_spin_unlock(session, &conn->tiered_lock);
+ for (;;) {
+ WT_BARRIER();
+ old_state = conn->flush_state;
+ new_state = old_state + 1;
+ if (__wt_atomic_casv32(&conn->flush_state, old_state, new_state))
+ break;
+ WT_STAT_CONN_INCR(session, flush_state_races);
+ __wt_yield();
+ }
__wt_cond_signal(session, conn->tiered_cond);
return;
}
@@ -98,7 +135,7 @@ __wt_tiered_get_drop_shared(WT_SESSION_IMPL *session, WT_TIERED_WORK_UNIT **entr
* Add a drop local work unit for the given ID to the queue.
*/
int
-__wt_tiered_put_drop_local(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+__wt_tiered_put_drop_local(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
{
WT_TIERED_WORK_UNIT *entry;
uint64_t now;
@@ -120,7 +157,7 @@ __wt_tiered_put_drop_local(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t
* Add a drop shared work unit for the given ID to the queue.
*/
int
-__wt_tiered_put_drop_shared(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint64_t id)
+__wt_tiered_put_drop_shared(WT_SESSION_IMPL *session, WT_TIERED *tiered, uint32_t id)
{
WT_TIERED_WORK_UNIT *entry;
diff --git a/src/third_party/wiredtiger/src/txn/txn.c b/src/third_party/wiredtiger/src/txn/txn.c
index dabce04d12f..c73e8bcdb4c 100644
--- a/src/third_party/wiredtiger/src/txn/txn.c
+++ b/src/third_party/wiredtiger/src/txn/txn.c
@@ -1159,9 +1159,15 @@ __txn_resolve_prepared_op(WT_SESSION_IMPL *session, WT_TXN_OP *op, bool commit,
continue;
}
- /* Ignore the already resolved updates. */
- if (upd->prepare_state == WT_PREPARE_RESOLVED)
+ /*
+ * Performing an update on the same key where the truncate operation is performed can lead
+ * to updates that are already resolved in the updated list. Ignore the already resolved
+ * updates.
+ */
+ if (upd->prepare_state == WT_PREPARE_RESOLVED) {
+ WT_ASSERT(session, upd->type == WT_UPDATE_TOMBSTONE);
continue;
+ }
/*
* Newer updates are inserted at head of update chain, and transaction operations are added
diff --git a/src/third_party/wiredtiger/src/txn/txn_recover.c b/src/third_party/wiredtiger/src/txn/txn_recover.c
index a1cbbdc564a..debee377638 100644
--- a/src/third_party/wiredtiger/src/txn/txn_recover.c
+++ b/src/third_party/wiredtiger/src/txn/txn_recover.c
@@ -1016,6 +1016,14 @@ done:
WT_ERR(__wt_rollback_to_stable(session, NULL, true));
}
+ /*
+ * Sometimes eviction is triggered after doing a checkpoint. However, we don't want eviction to
+ * make the tree dirty after checkpoint as this will interfere with WT_SESSION alter which
+ * expects a clean tree.
+ */
+ if (eviction_started)
+ WT_TRET(__wt_evict_destroy(session));
+
if (do_checkpoint || rts_executed)
/*
* Forcibly log a checkpoint so the next open is fast and keep the metadata up to date with
diff --git a/src/third_party/wiredtiger/test/bloom/test_bloom.c b/src/third_party/wiredtiger/test/bloom/test_bloom.c
index 76b5bc0a50b..fb964c52a31 100644
--- a/src/third_party/wiredtiger/test/bloom/test_bloom.c
+++ b/src/third_party/wiredtiger/test/bloom/test_bloom.c
@@ -28,6 +28,8 @@
#include "test_util.h"
+#define HOME_SIZE 512
+
static struct {
WT_CONNECTION *wt_conn; /* WT_CONNECTION handle */
WT_SESSION *wt_session; /* WT_SESSION handle */
@@ -104,8 +106,14 @@ setup(void)
WT_CONNECTION *conn;
WT_SESSION *session;
char config[512];
+ static char home[HOME_SIZE]; /* Base home directory */
+
+ testutil_work_dir_from_path(home, HOME_SIZE, "WT_TEST");
- testutil_check(system("rm -f WiredTiger* *.bf"));
+ /* Clean the test directory if it already exists. */
+ testutil_clean_work_dir(home);
+ /* Create the home test directory for the test. */
+ testutil_make_work_dir(home);
/*
* This test doesn't test public Wired Tiger functionality, it still needs connection and
@@ -120,7 +128,7 @@ setup(void)
"create,error_prefix=\"%s\",cache_size=%" PRIu32 "MB,%s", progname, g.c_cache,
g.config_open == NULL ? "" : g.config_open));
- testutil_check(wiredtiger_open(NULL, NULL, config, &conn));
+ testutil_check(wiredtiger_open(home, NULL, config, &conn));
testutil_check(conn->open_session(conn, NULL, NULL, &session));
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
index 6caaa4d4456..74d35e1cbcd 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
@@ -3,6 +3,11 @@
# Used as a basic test for the framework.
duration_seconds=10,
cache_size_mb=1000,
+checkpoint_manager=
+(
+ enabled=true,
+ op_rate=5s
+),
runtime_monitor=
(
stat_cache_size=
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
index 6eeda0ab7c0..34b6f9b89fe 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
@@ -4,6 +4,11 @@
duration_seconds=10,
cache_size_mb=5000,
enable_logging=true,
+checkpoint_manager=
+(
+ enabled=true,
+ op_rate=5s
+),
runtime_monitor=
(
stat_cache_size=
@@ -25,4 +30,4 @@ workload_generator=
read_threads=1,
update_threads=1,
value_size=2000
-), \ No newline at end of file
+),
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/checkpoint_manager.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/checkpoint_manager.h
new file mode 100644
index 00000000000..d5396737c43
--- /dev/null
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/checkpoint_manager.h
@@ -0,0 +1,73 @@
+/*-
+ * Public Domain 2014-present MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef CHECKPOINT_MANAGER_H
+#define CHECKPOINT_MANAGER_H
+
+#include "util/api_const.h"
+#include "connection_manager.h"
+
+namespace test_harness {
+
+class checkpoint_manager : public component {
+ public:
+ explicit checkpoint_manager(configuration *configuration)
+ : component(CHECKPOINT_MANAGER, configuration)
+ {
+ }
+ virtual ~checkpoint_manager() = default;
+
+ /* Delete the copy constructor and the assignment operator. */
+ checkpoint_manager(const checkpoint_manager &) = delete;
+ checkpoint_manager &operator=(const checkpoint_manager &) = delete;
+
+ void
+ load() override final
+ {
+ /* Load the general component things. */
+ component::load();
+
+ /* Create session that we'll use for checkpointing. */
+ if (_enabled)
+ _session = connection_manager::instance().create_session();
+ }
+
+ void
+ do_work() override final
+ {
+ debug_print("Running checkpoint", DEBUG_INFO);
+ testutil_check(_session->checkpoint(_session, nullptr));
+ }
+
+ private:
+ WT_SESSION *_session = nullptr;
+};
+
+} // namespace test_harness
+
+#endif
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/throttle.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/throttle.h
index bfe5816c70e..da5c54fb2d0 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/throttle.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/throttle.h
@@ -30,34 +30,45 @@
#define THROTTLE_H
#include <thread>
+#include <string>
#include "configuration.h"
namespace test_harness {
class throttle {
public:
- throttle(const int64_t op_count, const char interval)
+ explicit throttle(const std::string &throttle_rate)
{
- testutil_assert(op_count > 0);
- /* Lazily compute the ms for every type. */
- if (interval == 's')
- _ms = 1000 / op_count;
- else if (interval == 'm')
- _ms = (60 * 1000) / op_count;
- else if (interval == 'h')
- _ms = (60 * 60 * 1000) / op_count;
- else
- testutil_die(-1, "Specified throttle interval not supported.");
+ std::string magnitude;
+ uint64_t multiplier = 0;
+ /*
+ * Find the ms, s, or m in the string. Searching for "ms" first as the following two
+ * searches would match as well.
+ */
+ size_t pos = throttle_rate.find("ms");
+ if (pos != std::string::npos)
+ multiplier = 1;
+ else {
+ pos = throttle_rate.find("s");
+ if (pos != std::string::npos)
+ multiplier = 1000;
+ else {
+ pos = throttle_rate.find("m");
+ if (pos != std::string::npos)
+ multiplier = 60 * 1000;
+ else
+ testutil_die(-1, "no rate specifier given");
+ }
+ }
+ magnitude = throttle_rate.substr(0, pos);
+ /* This will throw if it can't cast, which is fine. */
+ _ms = std::stoi(magnitude) * multiplier;
}
- throttle(configuration *config)
- : throttle(
- config->get_optional_int(OP_COUNT, 1), config->get_optional_string(INTERVAL, "s")[0])
- {
- }
+ explicit throttle(configuration *config) : throttle(config->get_string(OP_RATE)) {}
/* Default to a second per operation. */
- throttle() : throttle(1, 's') {}
+ throttle() : throttle("1s") {}
void
sleep()
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
index bc559a03104..90aecc9aa59 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
@@ -38,6 +38,7 @@ extern "C" {
#include "core/component.h"
#include "core/throttle.h"
#include "connection_manager.h"
+#include "workload/database_operation.h"
namespace test_harness {
/* Static statistic get function. */
@@ -52,7 +53,7 @@ get_stat(WT_CURSOR *cursor, int stat_field, int64_t *valuep)
class statistic {
public:
- statistic(configuration *config)
+ explicit statistic(configuration *config)
{
_enabled = config->get_bool(ENABLED);
}
@@ -75,13 +76,13 @@ class statistic {
class cache_limit_statistic : public statistic {
public:
- cache_limit_statistic(configuration *config) : statistic(config)
+ explicit cache_limit_statistic(configuration *config) : statistic(config)
{
limit = config->get_int(LIMIT);
}
void
- check(WT_CURSOR *cursor)
+ check(WT_CURSOR *cursor) override final
{
testutil_assert(cursor != nullptr);
int64_t cache_bytes_image, cache_bytes_other, cache_bytes_max;
@@ -96,11 +97,10 @@ class cache_limit_statistic : public statistic {
*/
use_percent = ((cache_bytes_image + cache_bytes_other + 0.0) / cache_bytes_max) * 100;
if (use_percent > limit) {
- std::string error_string =
+ const std::string error_string =
"runtime_monitor: Cache usage exceeded during test! Limit: " + std::to_string(limit) +
" usage: " + std::to_string(use_percent);
- debug_print(error_string, DEBUG_ERROR);
- testutil_assert(use_percent < limit);
+ testutil_die(-1, error_string.c_str());
} else
debug_print("Cache usage: " + std::to_string(use_percent), DEBUG_TRACE);
}
@@ -109,13 +109,89 @@ class cache_limit_statistic : public statistic {
int64_t limit;
};
+static std::string
+collection_name_to_file_name(const std::string &collection_name)
+{
+ /* Strip out the URI prefix. */
+ const size_t colon_pos = collection_name.find(':');
+ testutil_assert(colon_pos != std::string::npos);
+ const auto stripped_name = collection_name.substr(colon_pos + 1);
+
+ /* Now add the directory and file extension. */
+ return std::string(DEFAULT_DIR) + "/" + stripped_name + ".wt";
+}
+
+class db_size_statistic : public statistic {
+ public:
+ db_size_statistic(configuration *config, database &database)
+ : statistic(config), _database(database)
+ {
+ _limit = config->get_int(LIMIT);
+#ifdef _WIN32
+ debug_print("Database size checking is not implemented on Windows", DEBUG_ERROR);
+#endif
+ }
+ virtual ~db_size_statistic() = default;
+
+ /* Don't need the stat cursor for this. */
+ void
+ check(WT_CURSOR *) override final
+ {
+ const auto file_names = get_file_names();
+#ifndef _WIN32
+ size_t db_size = 0;
+ for (const auto &name : file_names) {
+ struct stat sb;
+ if (stat(name.c_str(), &sb) == 0) {
+ db_size += sb.st_size;
+ debug_print(name + " was " + std::to_string(sb.st_size) + " bytes", DEBUG_TRACE);
+ } else
+ /* The only good reason for this to fail is if the file hasn't been created yet. */
+ testutil_assert(errno == ENOENT);
+ }
+ debug_print("Current database size is " + std::to_string(db_size) + " bytes", DEBUG_TRACE);
+ if (db_size > _limit) {
+ const std::string error_string =
+ "runtime_monitor: Database size limit exceeded during test! Limit: " +
+ std::to_string(_limit) + " db size: " + std::to_string(db_size);
+ testutil_die(-1, error_string.c_str());
+ }
+#else
+ static_cast<void>(file_names);
+ static_cast<void>(_database);
+ static_cast<void>(_limit);
+#endif
+ }
+
+ private:
+ std::vector<std::string>
+ get_file_names()
+ {
+ std::vector<std::string> file_names;
+ for (const auto &name : _database.get_collection_names())
+ file_names.push_back(collection_name_to_file_name(name));
+
+ /* Add WiredTiger internal tables. */
+ file_names.push_back(std::string(DEFAULT_DIR) + "/" + WT_HS_FILE);
+ file_names.push_back(std::string(DEFAULT_DIR) + "/" + WT_METAFILE);
+
+ return file_names;
+ }
+
+ database &_database;
+ int64_t _limit;
+};
+
/*
* The runtime monitor class is designed to track various statistics or other runtime signals
* relevant to the given workload.
*/
class runtime_monitor : public component {
public:
- runtime_monitor(configuration *config) : component("runtime_monitor", config) {}
+ runtime_monitor(configuration *config, database &database)
+ : component("runtime_monitor", config), _database(database)
+ {
+ }
~runtime_monitor()
{
@@ -129,7 +205,7 @@ class runtime_monitor : public component {
runtime_monitor &operator=(const runtime_monitor &) = delete;
void
- load()
+ load() override final
{
configuration *sub_config;
std::string statistic_list;
@@ -147,11 +223,15 @@ class runtime_monitor : public component {
sub_config = _config->get_subconfig(STAT_CACHE_SIZE);
_stats.push_back(new cache_limit_statistic(sub_config));
delete sub_config;
+
+ sub_config = _config->get_subconfig(STAT_DB_SIZE);
+ _stats.push_back(new db_size_statistic(sub_config, _database));
+ delete sub_config;
}
}
void
- do_work()
+ do_work() override final
{
for (const auto &it : _stats) {
if (it->enabled())
@@ -163,6 +243,7 @@ class runtime_monitor : public component {
WT_CURSOR *_cursor = nullptr;
WT_SESSION *_session = nullptr;
std::vector<statistic *> _stats;
+ database &_database;
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
index f5049df074d..2434704f6f9 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
@@ -41,6 +41,7 @@ extern "C" {
#include "util/api_const.h"
#include "core/component.h"
#include "core/configuration.h"
+#include "checkpoint_manager.h"
#include "connection_manager.h"
#include "runtime_monitor.h"
#include "timestamp_manager.h"
@@ -57,31 +58,34 @@ class test : public database_operation {
test(const std::string &config, const std::string &name)
{
_config = new configuration(name, config);
- _runtime_monitor = new runtime_monitor(_config->get_subconfig(RUNTIME_MONITOR));
+ _checkpoint_manager = new checkpoint_manager(_config->get_subconfig(CHECKPOINT_MANAGER));
+ _runtime_monitor = new runtime_monitor(_config->get_subconfig(RUNTIME_MONITOR), _database);
_timestamp_manager = new timestamp_manager(_config->get_subconfig(TIMESTAMP_MANAGER));
_workload_tracking = new workload_tracking(_config->get_subconfig(WORKLOAD_TRACKING),
OPERATION_TRACKING_TABLE_CONFIG, TABLE_OPERATION_TRACKING, SCHEMA_TRACKING_TABLE_CONFIG,
TABLE_SCHEMA_TRACKING);
- _workload_generator = new workload_generator(
- _config->get_subconfig(WORKLOAD_GENERATOR), this, _timestamp_manager, _workload_tracking);
+ _workload_generator = new workload_generator(_config->get_subconfig(WORKLOAD_GENERATOR),
+ this, _timestamp_manager, _workload_tracking, _database);
_thread_manager = new thread_manager();
/*
* Ordering is not important here, any dependencies between components should be resolved
* internally by the components.
*/
- _components = {
- _workload_tracking, _workload_generator, _timestamp_manager, _runtime_monitor};
+ _components = {_workload_tracking, _workload_generator, _timestamp_manager,
+ _runtime_monitor, _checkpoint_manager};
}
~test()
{
delete _config;
+ delete _checkpoint_manager;
delete _runtime_monitor;
delete _timestamp_manager;
delete _thread_manager;
delete _workload_generator;
delete _workload_tracking;
_config = nullptr;
+ _checkpoint_manager = nullptr;
_runtime_monitor = nullptr;
_timestamp_manager = nullptr;
_thread_manager = nullptr;
@@ -181,11 +185,13 @@ class test : public database_operation {
std::string _name;
std::vector<component *> _components;
configuration *_config;
+ checkpoint_manager *_checkpoint_manager = nullptr;
runtime_monitor *_runtime_monitor = nullptr;
thread_manager *_thread_manager = nullptr;
timestamp_manager *_timestamp_manager = nullptr;
workload_generator *_workload_generator = nullptr;
workload_tracking *_workload_tracking = nullptr;
+ database _database;
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/thread_manager.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/thread_manager.h
index b7f736c169d..d15d935584a 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/thread_manager.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/thread_manager.h
@@ -31,9 +31,6 @@
#include <thread>
-#include "workload/database_operation.h"
-#include "workload/thread_context.h"
-
namespace test_harness {
/* Class that handles threads, from their initialization to their deletion. */
class thread_manager {
@@ -52,27 +49,13 @@ class thread_manager {
}
/*
- * Generic function to create threads that take contexts, typically these will be static
- * functions.
- */
- template <typename Callable>
- void
- add_thread(thread_context *tc, database_operation *db_operation, Callable &&fct)
- {
- tc->set_running(true);
- std::thread *t = new std::thread(fct, std::ref(*tc), std::ref(*db_operation));
- _workers.push_back(t);
- }
-
- /*
- * Generic function to create threads that do not take thread contexts but take a single
- * argument, typically these threads are calling non static member function of classes.
+ * Generic function to create threads that call member function of classes.
*/
- template <typename Callable, typename Args>
+ template <typename Callable, typename... Args>
void
- add_thread(Callable &&fct, Args &&args)
+ add_thread(Callable &&fct, Args &&... args)
{
- std::thread *t = new std::thread(fct, args);
+ std::thread *t = new std::thread(fct, args...);
_workers.push_back(t);
}
@@ -83,8 +66,9 @@ class thread_manager {
join()
{
for (const auto &it : _workers) {
- if (it->joinable())
- it->join();
+ while (!it->joinable()) {
+ }
+ it->join();
}
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/timestamp_manager.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/timestamp_manager.h
index 96b5f6bc69c..adc5b734c05 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/timestamp_manager.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/timestamp_manager.h
@@ -47,7 +47,7 @@ class timestamp_manager : public component {
timestamp_manager(configuration *config) : component("timestamp_manager", config) {}
void
- load()
+ load() override final
{
component::load();
@@ -58,7 +58,7 @@ class timestamp_manager : public component {
}
void
- do_work()
+ do_work() override final
{
std::string config;
/* latest_ts_s represents the time component of the latest timestamp provided. */
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/util/api_const.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/util/api_const.h
index 2ea702b4848..146e98e38d6 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/util/api_const.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/util/api_const.h
@@ -33,6 +33,7 @@
namespace test_harness {
/* Component names. */
+static const char *CHECKPOINT_MANAGER = "checkpoint_manager";
static const char *RUNTIME_MONITOR = "runtime_monitor";
static const char *TIMESTAMP_MANAGER = "timestamp_manager";
static const char *WORKLOAD_GENERATOR = "workload_generator";
@@ -52,12 +53,13 @@ static const char *LIMIT = "limit";
static const char *MAX = "max";
static const char *MIN = "min";
static const char *OLDEST_LAG = "oldest_lag";
-static const char *OP_COUNT = "op_count";
+static const char *OP_RATE = "op_rate";
static const char *OPS_PER_TRANSACTION = "ops_per_transaction";
-static const char *READ_THREADS = "read_threads";
+static const char *READ_CONFIG = "read_config";
static const char *STABLE_LAG = "stable_lag";
static const char *STAT_CACHE_SIZE = "stat_cache_size";
-static const char *UPDATE_THREADS = "update_threads";
+static const char *STAT_DB_SIZE = "stat_db_size";
+static const char *THREAD_COUNT = "thread_count";
static const char *UPDATE_CONFIG = "update_config";
static const char *VALUE_SIZE = "value_size";
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
index c2a7ed9f6a6..c9562954bfd 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
@@ -50,24 +50,77 @@ struct value_t {
/* A collection is made of mapped Key objects. */
struct collection_t {
std::map<key_value_t, key_t> keys;
- std::map<key_value_t, value_t> *values = {nullptr};
+ std::map<key_value_t, value_t> values;
};
/* Representation of the collections in memory. */
class database {
public:
- const std::vector<std::string>
- get_collection_names() const
+ std::vector<std::string>
+ get_collection_names()
{
+ std::lock_guard<std::mutex> lg(_mtx);
std::vector<std::string> collection_names;
- for (auto const &it : collections)
+ for (auto const &it : _collections)
collection_names.push_back(it.first);
return (collection_names);
}
- std::map<std::string, collection_t> collections;
+ std::map<key_value_t, key_t>
+ get_keys(const std::string &collection_name)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ return (_collections.at(collection_name).keys);
+ }
+
+ void
+ add_collection(const std::string &collection_name)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ testutil_assert(_collections.find(collection_name) == _collections.end());
+ _collections[collection_name] = {};
+ }
+
+ value_t
+ get_record(const std::string &collection_name, const char *key)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ return (_collections.at(collection_name).values.at(key));
+ }
+
+ void
+ insert_record(const std::string &collection_name, const char *key, const char *value)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ auto &c = _collections.at(collection_name);
+ c.keys[key].exists = true;
+ value_t v;
+ v.value = key_value_t(value);
+ c.values.emplace(key_value_t(key), v);
+ }
+
+ void
+ update_record(const std::string &collection_name, const char *key, const char *value)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ auto &c = _collections.at(collection_name);
+ c.values.at(key).value = key_value_t(value);
+ }
+
+ void
+ delete_record(const std::string &collection_name, const char *key)
+ {
+ std::lock_guard<std::mutex> lg(_mtx);
+ auto &c = _collections.at(collection_name);
+ c.keys.at(key).exists = false;
+ c.values.erase(key);
+ }
+
+ private:
+ std::map<std::string, collection_t> _collections;
+ std::mutex _mtx;
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
index fc97c1e381c..007d39da345 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
@@ -69,7 +69,7 @@ class database_operation {
collection_count = config->get_int(COLLECTION_COUNT);
for (size_t i = 0; i < collection_count; ++i) {
collection_name = "table:collection" + std::to_string(i);
- database.collections[collection_name] = {};
+ database.add_collection(collection_name);
testutil_check(
session->create(session, collection_name.c_str(), DEFAULT_FRAMEWORK_SCHEMA));
ts = timestamp_manager->get_next_ts();
@@ -87,8 +87,7 @@ class database_operation {
/* Keys must be unique. */
testutil_assert(key_count <= pow(10, key_size));
- for (const auto &it_collections : database.collections) {
- collection_name = it_collections.first;
+ for (const auto &collection_name : database.get_collection_names()) {
key_cpt = 0;
/*
* WiredTiger lets you open a cursor on a collection using the same pointer. When a
@@ -122,19 +121,18 @@ class database_operation {
/* Basic read operation that walks a cursors across all collections. */
virtual void
- read_operation(thread_context &context, WT_SESSION *session)
+ read_operation(thread_context *tc)
{
WT_CURSOR *cursor;
std::vector<WT_CURSOR *> cursors;
- testutil_assert(session != nullptr);
/* Get a cursor for each collection in collection_names. */
- for (const auto &it : context.get_collection_names()) {
- testutil_check(session->open_cursor(session, it.c_str(), NULL, NULL, &cursor));
+ for (const auto &it : tc->database.get_collection_names()) {
+ testutil_check(tc->session->open_cursor(tc->session, it.c_str(), NULL, NULL, &cursor));
cursors.push_back(cursor);
}
- while (!cursors.empty() && context.is_running()) {
+ while (!cursors.empty() && tc->running()) {
/* Walk each cursor. */
for (const auto &it : cursors) {
if (it->next(it) != 0)
@@ -147,30 +145,28 @@ class database_operation {
* Basic update operation that updates all the keys to a random value in each collection.
*/
virtual void
- update_operation(thread_context &context, WT_SESSION *session)
+ update_operation(thread_context *tc)
{
- WT_DECL_RET;
WT_CURSOR *cursor;
+ WT_DECL_RET;
wt_timestamp_t ts;
std::vector<WT_CURSOR *> cursors;
- std::vector<std::string> collection_names = context.get_collection_names();
+ std::vector<std::string> collection_names = tc->database.get_collection_names();
key_value_t key, generated_value;
const char *key_tmp;
- int64_t value_size = context.get_value_size();
- uint64_t i;
+ uint64_t i = 0;
+ bool using_timestamps = tc->timestamp_manager->enabled();
- testutil_assert(session != nullptr);
/* Get a cursor for each collection in collection_names. */
for (const auto &it : collection_names) {
- testutil_check(session->open_cursor(session, it.c_str(), NULL, NULL, &cursor));
+ testutil_check(tc->session->open_cursor(tc->session, it.c_str(), NULL, NULL, &cursor));
cursors.push_back(cursor);
}
/*
* Update each collection while the test is running.
*/
- i = 0;
- while (context.is_running() && !collection_names.empty()) {
+ while (tc->running() && !collection_names.empty()) {
if (i >= collection_names.size())
i = 0;
ret = cursors[i]->next(cursors[i]);
@@ -190,35 +186,30 @@ class database_operation {
*/
key = key_value_t(key_tmp);
generated_value =
- random_generator::random_generator::instance().generate_string(value_size);
- ts = context.get_timestamp_manager()->get_next_ts();
+ random_generator::random_generator::instance().generate_string(tc->value_size);
- /* Start a transaction if possible. */
- if (!context.is_in_transaction()) {
- context.begin_transaction(session, "");
- context.set_commit_timestamp(session, ts);
- }
+ /* Start a transaction. */
+ if (!tc->transaction.active())
+ tc->transaction.begin(tc->session, "");
+
+ ts = tc->timestamp_manager->get_next_ts();
+ if (using_timestamps)
+ tc->transaction.set_commit_timestamp(
+ tc->session, timestamp_manager::decimal_to_hex(ts));
- update(context.get_tracking(), cursors[i], collection_names[i], key.c_str(),
+ update(tc->tracking, cursors[i], collection_names[i], key.c_str(),
generated_value.c_str(), ts);
- /* Commit the current transaction if possible. */
- context.increment_operation_count();
- if (context.can_commit_transaction())
- context.commit_transaction(session, "");
+ /* Commit the current transaction. */
+ tc->transaction.op_count++;
+ if (tc->transaction.can_commit())
+ tc->transaction.commit(tc->session, "");
}
+ tc->sleep();
}
-
- /*
- * The update operations will be later on inside a loop that will be managed through
- * throttle management.
- */
- while (context.is_running())
- context.sleep();
-
/* Make sure the last operation is committed now the work is finished. */
- if (context.is_in_transaction())
- context.commit_transaction(session, "");
+ if (tc->transaction.active())
+ tc->transaction.commit(tc->session, "");
}
private:
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
index 2cf20066504..63291ac9756 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
@@ -30,172 +30,137 @@
#define THREAD_CONTEXT_H
#include "../core/throttle.h"
+#include "../timestamp_manager.h"
#include "database_model.h"
#include "random_generator.h"
#include "workload_tracking.h"
namespace test_harness {
-/* Define the different thread operations. */
-enum class thread_operation {
- INSERT,
- UPDATE,
- READ,
- REMOVE,
- CHECKPOINT,
- TIMESTAMP,
- MONITOR,
- COMPONENT
-};
-
-/* Container class for a thread and any data types it may need to interact with the database. */
-class thread_context {
+class transaction_context {
public:
- thread_context(timestamp_manager *timestamp_manager, workload_tracking *tracking, database &db,
- thread_operation type, int64_t max_op, int64_t min_op, int64_t value_size, throttle throttle)
- : _database(db), _min_op(min_op), _max_op(max_op), _timestamp_manager(timestamp_manager),
- _type(type), _tracking(tracking), _value_size(value_size), _throttle(throttle)
+ explicit transaction_context(configuration *config)
{
- }
-
- void
- finish()
- {
- _running = false;
- }
-
- const std::vector<std::string>
- get_collection_names() const
- {
- return (_database.get_collection_names());
- }
-
- thread_operation
- get_thread_operation() const
- {
- return (_type);
- }
-
- timestamp_manager *
- get_timestamp_manager() const
- {
- return (_timestamp_manager);
- }
-
- workload_tracking *
- get_tracking() const
- {
- return (_tracking);
- }
-
- int64_t
- get_value_size() const
- {
- return (_value_size);
- }
-
- bool
- is_running() const
- {
- return (_running);
+ configuration *transaction_config = config->get_subconfig(OPS_PER_TRANSACTION);
+ _min_op_count = transaction_config->get_int(MIN);
+ _max_op_count = transaction_config->get_int(MAX);
+ delete transaction_config;
}
bool
- is_in_transaction() const
+ active() const
{
return (_in_txn);
}
- void
- sleep()
+ /*
+ * The current transaction can be committed if: A transaction has started and the number of
+ * operations executed in the current transaction has exceeded the threshold.
+ */
+ bool
+ can_commit() const
{
- _throttle.sleep();
+ return (_in_txn && op_count >= _target_op_count);
}
void
- set_running(bool running)
+ commit(WT_SESSION *session, const std::string &config)
{
- _running = running;
+ /* A transaction cannot be committed if not started. */
+ testutil_assert(_in_txn);
+ testutil_check(
+ session->commit_transaction(session, config.empty() ? nullptr : config.c_str()));
+ _in_txn = false;
}
void
- begin_transaction(WT_SESSION *session, const std::string &config)
+ begin(WT_SESSION *session, const std::string &config)
{
- if (!_in_txn && _timestamp_manager->enabled()) {
+ if (!_in_txn) {
testutil_check(
session->begin_transaction(session, config.empty() ? nullptr : config.c_str()));
/* This randomizes the number of operations to be executed in one transaction. */
- _max_op_count = random_generator::instance().generate_integer(_min_op, _max_op);
- _current_op_count = 0;
+ _target_op_count =
+ random_generator::instance().generate_integer(_min_op_count, _max_op_count);
+ op_count = 0;
_in_txn = true;
}
}
/*
- * The current transaction can be committed if:
- * - The timestamp manager is enabled and
- * - A transaction has started and
- * - The thread is done working. This is useful when the test is ended and the thread has
- * not reached the maximum number of operations per transaction or
- * - The number of operations executed in the current transaction has exceeded the
- * threshold.
+ * Set a commit timestamp.
*/
- bool
- can_commit_transaction() const
+ void
+ set_commit_timestamp(WT_SESSION *session, const std::string &ts)
{
- return (_timestamp_manager->enabled() && _in_txn &&
- (!_running || (_current_op_count > _max_op_count)));
+ std::string config = std::string(COMMIT_TS) + "=" + ts;
+ testutil_check(session->timestamp_transaction(session, config.c_str()));
}
- void
- commit_transaction(WT_SESSION *session, const std::string &config)
+ /*
+ * op_count is the current number of operations that have been executed in the current
+ * transaction.
+ */
+ int64_t op_count = 0;
+
+ private:
+ /*
+ * _min_op_count and _max_op_count are the minimum and maximum number of operations within one
+ * transaction. is the current maximum number of operations that can be executed in the current
+ * transaction.
+ */
+ int64_t _min_op_count = 0;
+ int64_t _max_op_count = INT64_MAX;
+ int64_t _target_op_count = 0;
+ bool _in_txn = false;
+};
+
+/* Container class for a thread and any data types it may need to interact with the database. */
+class thread_context {
+ public:
+ thread_context(configuration *config, timestamp_manager *timestamp_manager,
+ workload_tracking *tracking, database &db)
+ : database(db), timestamp_manager(timestamp_manager), tracking(tracking),
+ transaction(transaction_context(config))
{
- /* A transaction cannot be committed if not started. */
- testutil_assert(_in_txn);
- testutil_check(
- session->commit_transaction(session, config.empty() ? nullptr : config.c_str()));
- _in_txn = false;
+ session = connection_manager::instance().create_session();
+ _throttle = throttle(config);
+
+ /* These won't exist for read threads which is why we use optional here. */
+ key_size = config->get_optional_int(KEY_SIZE, 1);
+ value_size = config->get_optional_int(VALUE_SIZE, 1);
+
+ testutil_assert(key_size > 0 && value_size > 0);
}
void
- increment_operation_count(uint64_t inc = 1)
+ finish()
{
- _current_op_count += inc;
+ _running = false;
}
- /*
- * Set a commit timestamp if the timestamp manager is enabled.
- */
void
- set_commit_timestamp(WT_SESSION *session, wt_timestamp_t ts)
+ sleep()
{
- if (!_timestamp_manager->enabled())
- return;
+ _throttle.sleep();
+ }
- std::string config = std::string(COMMIT_TS) + "=" + _timestamp_manager->decimal_to_hex(ts);
- testutil_check(session->timestamp_transaction(session, config.c_str()));
+ bool
+ running() const
+ {
+ return (_running);
}
+ WT_SESSION *session;
+ transaction_context transaction;
+ test_harness::timestamp_manager *timestamp_manager;
+ test_harness::workload_tracking *tracking;
+ test_harness::database &database;
+ int64_t key_size = 0;
+ int64_t value_size = 0;
+
private:
- /* Representation of the collections and their key/value pairs in memory. */
- database _database;
- /*
- * _current_op_count is the current number of operations that have been executed in the current
- * transaction.
- */
- uint64_t _current_op_count = 0U;
- bool _in_txn = false, _running = false;
- /*
- * _min_op and _max_op are the minimum and maximum number of operations within one transaction.
- * _max_op_count is the current maximum number of operations that can be executed in the current
- * transaction. _max_op_count will always be <= _max_op.
- */
- int64_t _min_op, _max_op, _max_op_count = 0;
- timestamp_manager *_timestamp_manager;
- const thread_operation _type;
throttle _throttle;
- workload_tracking *_tracking;
- /* Temporary member that comes from the test configuration. */
- int64_t _value_size;
+ bool _running = true;
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
index 41efadb440b..d44ee79e563 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
@@ -76,7 +76,7 @@ class workload_tracking : public component {
}
void
- load()
+ load() override final
{
WT_SESSION *session;
@@ -102,7 +102,7 @@ class workload_tracking : public component {
}
void
- run()
+ run() override final
{
/* Does not do anything. */
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
index aaab9ad25a9..7f583a66888 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
@@ -120,11 +120,7 @@ class workload_validation {
* The data model is now fully updated for the last read collection. It can be
* checked.
*/
- check_reference(session, collection_name, database.collections.at(collection_name));
- /* Clear memory. */
- delete database.collections[collection_name].values;
- database.collections[collection_name].values = nullptr;
-
+ check_reference(session, collection_name, database);
collection_name = key_collection_name;
}
};
@@ -139,12 +135,8 @@ class workload_validation {
* empty if there is no collections to check after the end of the test (no collections
* created or all deleted).
*/
- if (!collection_name.empty()) {
- check_reference(session, collection_name, database.collections.at(collection_name));
- /* Clear memory. */
- delete database.collections[collection_name].values;
- database.collections[collection_name].values = nullptr;
- }
+ if (!collection_name.empty())
+ check_reference(session, collection_name, database);
}
private:
@@ -200,25 +192,17 @@ class workload_validation {
* the key has been inserted previously in an existing collection and can be safely
* deleted.
*/
- database.collections.at(collection_name).keys.at(key).exists = false;
- delete database.collections.at(collection_name).values;
- database.collections.at(collection_name).values = nullptr;
+ database.delete_record(collection_name, key);
break;
case tracking_operation::INSERT: {
/*
* Keys are unique, it is safe to assume the key has not been encountered before.
*/
- database.collections[collection_name].keys[key].exists = true;
- if (database.collections[collection_name].values == nullptr)
- database.collections[collection_name].values = new std::map<key_value_t, value_t>();
- value_t v;
- v.value = key_value_t(value);
- std::pair<key_value_t, value_t> pair(key_value_t(key), v);
- database.collections[collection_name].values->insert(pair);
+ database.insert_record(collection_name, key, value);
break;
}
case tracking_operation::UPDATE:
- database.collections[collection_name].values->at(key).value = key_value_t(value);
+ database.update_record(collection_name, key, value);
break;
default:
testutil_die(DEBUG_ERROR, "Unexpected operation in the tracking table: %d",
@@ -232,8 +216,7 @@ class workload_validation {
* representation in memory of the collection values and keys according to the tracking table.
*/
void
- check_reference(
- WT_SESSION *session, const std::string &collection_name, const collection_t &collection)
+ check_reference(WT_SESSION *session, const std::string &collection_name, database &database)
{
bool is_valid;
key_t key;
@@ -247,7 +230,7 @@ class workload_validation {
collection_name.c_str());
/* Walk through each key/value pair of the current collection. */
- for (const auto &keys : collection.keys) {
+ for (const auto &keys : database.get_keys(collection_name)) {
key_str = keys.first;
key = keys.second;
/* The key/value pair exists. */
@@ -263,12 +246,12 @@ class workload_validation {
/* Check the associated value is valid. */
if (key.exists) {
- testutil_assert(collection.values != nullptr);
if (!verify_value(session, collection_name, key_str.c_str(),
- collection.values->at(key_str).value))
+ database.get_record(collection_name, key_str.c_str()).value))
testutil_die(DEBUG_ERROR,
"check_reference: failed for key %s / value %s in collection %s.",
- key_str.c_str(), collection.values->at(key_str).value.c_str(),
+ key_str.c_str(),
+ database.get_record(collection_name, key_str.c_str()).value.c_str(),
collection_name.c_str());
}
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
index 5e084229123..1980cf6ac6c 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
@@ -46,9 +46,10 @@ namespace test_harness {
class workload_generator : public component {
public:
workload_generator(configuration *configuration, database_operation *db_operation,
- timestamp_manager *timestamp_manager, workload_tracking *tracking)
- : component("workload_generator", configuration), _database_operation(db_operation),
- _timestamp_manager(timestamp_manager), _tracking(tracking)
+ timestamp_manager *timestamp_manager, workload_tracking *tracking, database &database)
+ : component("workload_generator", configuration), _database(database),
+ _database_operation(db_operation), _timestamp_manager(timestamp_manager),
+ _tracking(tracking)
{
}
@@ -64,57 +65,46 @@ class workload_generator : public component {
/* Do the work of the main part of the workload. */
void
- run()
+ run() override final
{
- configuration *transaction_config, *update_config, *insert_config;
- int64_t min_operation_per_transaction, max_operation_per_transaction, read_threads,
- update_threads, value_size;
+ configuration *read_config, *update_config, *insert_config;
/* Populate the database. */
_database_operation->populate(_database, _timestamp_manager, _config, _tracking);
_db_populated = true;
/* Retrieve useful parameters from the test configuration. */
- transaction_config = _config->get_subconfig(OPS_PER_TRANSACTION);
update_config = _config->get_subconfig(UPDATE_CONFIG);
insert_config = _config->get_subconfig(INSERT_CONFIG);
- read_threads = _config->get_int(READ_THREADS);
- update_threads = _config->get_int(UPDATE_THREADS);
-
- min_operation_per_transaction = transaction_config->get_int(MIN);
- max_operation_per_transaction = transaction_config->get_int(MAX);
- testutil_assert(max_operation_per_transaction >= min_operation_per_transaction);
- value_size = _config->get_int(VALUE_SIZE);
- testutil_assert(value_size >= 0);
+ read_config = _config->get_subconfig(READ_CONFIG);
/* Generate threads to execute read operations on the collections. */
- for (size_t i = 0; i < read_threads && _running; ++i) {
- thread_context *tc = new thread_context(_timestamp_manager, _tracking, _database,
- thread_operation::READ, max_operation_per_transaction, min_operation_per_transaction,
- value_size, throttle());
+ for (size_t i = 0; i < read_config->get_int(THREAD_COUNT) && _running; ++i) {
+ thread_context *tc =
+ new thread_context(read_config, _timestamp_manager, _tracking, _database);
_workers.push_back(tc);
- _thread_manager.add_thread(tc, _database_operation, &execute_operation);
+ _thread_manager.add_thread(
+ &database_operation::read_operation, _database_operation, tc);
}
/* Generate threads to execute update operations on the collections. */
- for (size_t i = 0; i < update_threads && _running; ++i) {
- thread_context *tc = new thread_context(_timestamp_manager, _tracking, _database,
- thread_operation::UPDATE, max_operation_per_transaction,
- min_operation_per_transaction, value_size, throttle(update_config));
+ for (size_t i = 0; i < update_config->get_int(THREAD_COUNT) && _running; ++i) {
+ thread_context *tc =
+ new thread_context(update_config, _timestamp_manager, _tracking, _database);
_workers.push_back(tc);
- _thread_manager.add_thread(tc, _database_operation, &execute_operation);
+ _thread_manager.add_thread(
+ &database_operation::update_operation, _database_operation, tc);
}
- delete transaction_config;
+ delete read_config;
delete update_config;
delete insert_config;
}
void
- finish()
+ finish() override final
{
component::finish();
-
for (const auto &it : _workers)
it->finish();
_thread_manager.join();
@@ -133,36 +123,8 @@ class workload_generator : public component {
return (_db_populated);
}
- /* Workload threaded operations. */
- static void
- execute_operation(thread_context &context, database_operation &db_operation)
- {
- WT_SESSION *session;
-
- session = connection_manager::instance().create_session();
-
- switch (context.get_thread_operation()) {
- case thread_operation::READ:
- db_operation.read_operation(context, session);
- break;
- case thread_operation::REMOVE:
- case thread_operation::INSERT:
- /* Sleep until it is implemented. */
- while (context.is_running())
- std::this_thread::sleep_for(std::chrono::seconds(1));
- break;
- case thread_operation::UPDATE:
- db_operation.update_operation(context, session);
- break;
- default:
- testutil_die(DEBUG_ERROR, "system: thread_operation is unknown : %d",
- static_cast<int>(context.get_thread_operation()));
- break;
- }
- }
-
private:
- database _database;
+ database &_database;
database_operation *_database_operation;
thread_manager _thread_manager;
timestamp_manager *_timestamp_manager;
diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/example_test.cxx b/src/third_party/wiredtiger/test/cppsuite/tests/example_test.cxx
index cc08d3d003a..ab8c38dec16 100644
--- a/src/third_party/wiredtiger/test/cppsuite/tests/example_test.cxx
+++ b/src/third_party/wiredtiger/test/cppsuite/tests/example_test.cxx
@@ -36,19 +36,22 @@ class example_test : public test_harness::test {
public:
example_test(const std::string &config, const std::string &name) : test(config, name) {}
- virtual void
+ void
populate(test_harness::database &database, test_harness::timestamp_manager *_timestamp_manager,
test_harness::configuration *_config, test_harness::workload_tracking *tracking)
+ override final
{
std::cout << "populate: nothing done." << std::endl;
}
- virtual void
- read_operation(test_harness::thread_context &context, WT_SESSION *session)
+
+ void
+ read_operation(test_harness::thread_context *context) override final
{
std::cout << "read_operation: nothing done." << std::endl;
}
- virtual void
- update_operation(test_harness::thread_context &context, WT_SESSION *session)
+
+ void
+ update_operation(test_harness::thread_context *context) override final
{
std::cout << "update_operation: nothing done." << std::endl;
}
diff --git a/src/third_party/wiredtiger/test/ctest_helpers.cmake b/src/third_party/wiredtiger/test/ctest_helpers.cmake
index 08584b719ca..d11f494c8ce 100644
--- a/src/third_party/wiredtiger/test/ctest_helpers.cmake
+++ b/src/third_party/wiredtiger/test/ctest_helpers.cmake
@@ -104,6 +104,15 @@ function(create_test_executable target)
target_link_libraries(${target} ${CREATE_TEST_LIBS})
endif()
+ # If compiling for windows, additionally link in the shim library.
+ if(WT_WIN)
+ target_include_directories(
+ ${target}
+ PUBLIC ${CMAKE_SOURCE_DIR}/test/windows
+ )
+ target_link_libraries(${target} windows_shim)
+ endif()
+
# Install any additional files, scripts, etc in the output test binary
# directory. Useful if we need to setup an additional wrappers needed to run the test
# executable.
@@ -133,3 +142,123 @@ function(create_test_executable target)
add_dependencies(${target} copy_dir_${target}_${dir_basename})
endforeach()
endfunction()
+
+function(define_test_variants target)
+ cmake_parse_arguments(
+ PARSE_ARGV
+ 1
+ "DEFINE_TEST"
+ ""
+ ""
+ "VARIANTS;LABELS"
+ )
+ if (NOT "${DEFINE_TEST_UNPARSED_ARGUMENTS}" STREQUAL "")
+ message(FATAL_ERROR "Unknown arguments to define_test_variants: ${DEFINE_TEST_VARIANTS_UNPARSED_ARGUMENTS}")
+ endif()
+ if ("${DEFINE_TEST_VARIANTS}" STREQUAL "")
+ message(FATAL_ERROR "Need at least one variant for define_test_variants")
+ endif()
+
+ set(defined_tests)
+ foreach(variant ${DEFINE_TEST_VARIANTS})
+ list(LENGTH variant variant_length)
+ if (NOT variant_length EQUAL 2)
+ message(
+ FATAL_ERROR
+ "Invalid variant format: ${variant} - Expected format 'variant_name;variant args'"
+ )
+ endif()
+ list(GET variant 0 curr_variant_name)
+ list(GET variant 1 curr_variant_args)
+ set(variant_args)
+ if(WT_WIN)
+ separate_arguments(variant_args WINDOWS_COMMAND ${curr_variant_args})
+ else()
+ separate_arguments(variant_args UNIX_COMMAND ${curr_variant_args})
+ endif()
+ # Create a variant directory to run the test in.
+ add_custom_target(${curr_variant_name}_test_dir
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${curr_variant_name})
+ # Ensure the variant target is created prior to building the test.
+ add_dependencies(${target} ${curr_variant_name}_test_dir)
+ add_test(
+ NAME ${curr_variant_name}
+ COMMAND $<TARGET_FILE:${target}> ${variant_args}
+ # Run each variant in its own subdirectory, allowing us to execute variants in
+ # parallel.
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${curr_variant_name}
+ )
+ list(APPEND defined_tests ${curr_variant_name})
+ endforeach()
+ if(DEFINE_TEST_LABELS)
+ set_tests_properties(${defined_tests} PROPERTIES LABELS "${DEFINE_TEST_LABELS}")
+ endif()
+endfunction()
+
+macro(define_c_test)
+ cmake_parse_arguments(
+ "C_TEST"
+ "SMOKE"
+ "TARGET;DIR_NAME;DEPENDS"
+ "SOURCES;FLAGS;ARGUMENTS"
+ ${ARGN}
+ )
+ if (NOT "${C_TEST_UNPARSED_ARGUMENTS}" STREQUAL "")
+ message(FATAL_ERROR "Unknown arguments to define_c_test: ${C_TEST_UNPARSED_ARGUMENTS}")
+ endif()
+ if ("${C_TEST_TARGET}" STREQUAL "")
+ message(FATAL_ERROR "No target name given to define_c_test")
+ endif()
+ if ("${C_TEST_SOURCES}" STREQUAL "")
+ message(FATAL_ERROR "No sources given to define_c_test")
+ endif()
+ if ("${C_TEST_DIR_NAME}" STREQUAL "")
+ message(FATAL_ERROR "No directory given to define_c_test")
+ endif()
+
+ # Check that the csuite dependencies are enabled before compiling and creating the test.
+ eval_dependency("${C_TEST_DEPENDS}" enabled)
+ if(enabled)
+ set(additional_executable_args)
+ if(NOT "${C_TEST_FLAGS}" STREQUAL "")
+ list(APPEND additional_executable_args FLAGS ${C_TEST_FLAGS})
+ endif()
+ if (C_TEST_SMOKE)
+ # csuite test comes with a smoke execution wrapper.
+ create_test_executable(${C_TEST_TARGET}
+ SOURCES ${C_TEST_SOURCES}
+ ADDITIONAL_FILES ${CMAKE_CURRENT_SOURCE_DIR}/${C_TEST_DIR_NAME}/smoke.sh
+ BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}
+ ${additional_executable_args}
+ )
+ add_test(NAME ${C_TEST_TARGET}
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}/smoke.sh ${C_TEST_ARGUMENTS} $<TARGET_FILE:${C_TEST_TARGET}>
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}
+ )
+ else()
+ create_test_executable(${C_TEST_TARGET}
+ SOURCES ${C_TEST_SOURCES}
+ BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}
+ ${additional_executable_args}
+ )
+ # Take a CMake-based path and convert it to a platform-specfic path (/ for Unix, \ for Windows).
+ set(wt_test_home_dir ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}/WT_HOME_${C_TEST_TARGET})
+ file(TO_NATIVE_PATH "${wt_test_home_dir}" wt_test_home_dir)
+ # Ensure each DB home directory is run under the tests working directory.
+ set(command_args -h ${wt_test_home_dir})
+ list(APPEND command_args ${C_TEST_ARGUMENTS})
+ set(exec_wrapper)
+ if(WT_WIN)
+ # This is a workaround to run our csuite tests under Windows using CTest. When executing a test,
+ # CTests by-passes the shell and directly executes the test as a child process. In doing so CTest executes the binary with forward-slash paths.
+ # Which while technically valid breaks assumptions in our testing utilities. Wrap the execution in powershell to avoid this.
+ set(exec_wrapper "powershell.exe")
+ endif()
+ add_test(NAME ${C_TEST_TARGET}
+ COMMAND ${exec_wrapper} $<TARGET_FILE:${C_TEST_TARGET}> ${command_args}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${C_TEST_DIR_NAME}
+ )
+ endif()
+ list(APPEND c_tests ${C_TEST_TARGET})
+ endif()
+endmacro(define_c_test)
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index 27233ca996c..441b0dd3cc1 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -148,13 +148,13 @@ functions:
# Please note the "branch" variable used here is a normal shell variable to store the name of
# a WiredTiger Github branch, while the "branch_name" used above is an Evergreen built-in variable
# to store the name of the branch being tested by the Evergreen project.
- for branch in develop mongodb-4.4 mongodb-4.2 mongodb-4.0 mongodb-3.6 ; do
+ for branch in develop mongodb-5.0 mongodb-4.4 mongodb-4.2 mongodb-4.0 ; do
echo "[Debug] Checking out branch $branch ..."
git checkout $branch && git pull --ff-only && git reset --hard origin/$branch && git clean -fdqx
sh reconf
# Java API is removed in newer branches via WT-6675.
- if [ "$branch" = "mongodb-4.2" -o "$branch" = "mongodb-4.0" -o "$branch" = "mongodb-3.6" ]; then
+ if [ "$branch" = "mongodb-4.2" -o "$branch" = "mongodb-4.0" ]; then
../configure CFLAGS="-DMIGHT_NOT_RUN -Wno-error" --enable-java --enable-python --enable-strict
(cd lang/python && make ../../../lang/python/wiredtiger_wrap.c)
(cd lang/java && make ../../../lang/java/wiredtiger_wrap.c)
@@ -163,11 +163,6 @@ functions:
(cd lang/python && make ../../../lang/python/wiredtiger_wrap.c)
fi
- # Make sure the code fragment is in the documentation build.
- make all-am
- make -C test/utility
- make -C examples/c
-
(cd ../dist && sh s_docs)
(cd .. && rm -rf docs-$branch && mv docs docs-$branch)
done
@@ -192,7 +187,7 @@ functions:
# Please note the "branch" variable used here is a normal shell variable to store the name of
# a WiredTiger Github branch, while the "branch_name" used above is an Evergreen built-in variable
# to store the name of the branch being tested by the Evergreen project.
- for branch in mongodb-3.6 mongodb-4.0 mongodb-4.2 mongodb-4.4 develop ; do
+ for branch in mongodb-4.0 mongodb-4.2 mongodb-4.4 mongodb-5.0 develop ; do
echo "[Debug] Copying over doc directory for branch $branch ..."
rsync -avq ../docs-$branch/ $branch/
@@ -2635,7 +2630,7 @@ buildvariants:
display_name: "~ Documentation update"
batchtime: 1440 # 1 day
run_on:
- - ubuntu1804-test
+ - ubuntu2004-test
tasks:
- name: doc-update
expansions:
diff --git a/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh b/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
index 78e5ee2e2e1..39c6c4c2564 100755
--- a/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
+++ b/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
@@ -69,6 +69,7 @@ run_format()
args=""
args+="runs.type=row " # Temporarily disable column store tests
+ args+="btree.prefix=0 " # Prefix testing isn't portable between releases
args+="cache=80 " # Medium cache so there's eviction
args+="checkpoints=1 " # Force periodic writes
args+="compression=snappy " # We only built with snappy, force the choice
@@ -81,8 +82,8 @@ run_format()
args+="rows=1000000 "
args+="salvage=0 " # Faster runs
args+="timer=4 "
- args+="transaction.isolation=snapshot "
- args+="transaction.timestamps=1 "
+ args+="transaction.isolation=snapshot " # Older releases can't do lower isolation levels
+ args+="transaction.timestamps=1 " # Older releases can't do non-timestamp transactions
args+="verify=0 " # Faster runs
for am in $2; do
diff --git a/src/third_party/wiredtiger/test/format/wts.c b/src/third_party/wiredtiger/test/format/wts.c
index f95d7903c94..48ea45b1022 100644
--- a/src/third_party/wiredtiger/test/format/wts.c
+++ b/src/third_party/wiredtiger/test/format/wts.c
@@ -358,7 +358,7 @@ create_object(WT_CONNECTION *conn)
CONFIG_APPEND(p, ",assert=(read_timestamp=%s)", g.c_txn_timestamps ? "always" : "never");
if (g.c_assert_write_timestamp)
CONFIG_APPEND(p, ",assert=(write_timestamp=on),write_timestamp_usage=%s",
- g.c_txn_timestamps ? "always" : "never");
+ g.c_txn_timestamps ? "key_consistent" : "never");
/* Configure LSM. */
if (DATASOURCE("lsm")) {
diff --git a/src/third_party/wiredtiger/test/manydbs/manydbs.c b/src/third_party/wiredtiger/test/manydbs/manydbs.c
index 9cb970e04bd..2dc0e5a1f8f 100644
--- a/src/third_party/wiredtiger/test/manydbs/manydbs.c
+++ b/src/third_party/wiredtiger/test/manydbs/manydbs.c
@@ -164,7 +164,8 @@ main(int argc, char *argv[])
testutil_make_work_dir(home);
__wt_random_init(&rnd);
for (i = 0; i < dbs; ++i) {
- testutil_check(__wt_snprintf(hometmp, HOME_SIZE, "%s/%s.%d", home, HOME_BASE, i));
+ testutil_check(
+ __wt_snprintf(hometmp, HOME_SIZE, "%s%c%s.%d", home, DIR_DELIM, HOME_BASE, i));
testutil_make_work_dir(hometmp);
/*
* Open each database. Rotate different configurations among them. Open a session and
diff --git a/src/third_party/wiredtiger/test/suite/run.py b/src/third_party/wiredtiger/test/suite/run.py
index d173fb7c10b..86aafdb261c 100755
--- a/src/third_party/wiredtiger/test/suite/run.py
+++ b/src/third_party/wiredtiger/test/suite/run.py
@@ -61,6 +61,8 @@ elif os.path.basename(curdir) == '.libs' and \
wt_builddir = os.path.join(curdir, os.pardir)
elif os.path.isfile(os.path.join(curdir, 'wt')):
wt_builddir = curdir
+elif os.path.isfile(os.path.join(curdir, 'wt.exe')):
+ wt_builddir = curdir
elif os.path.isfile(os.path.join(wt_disttop, 'wt')):
wt_builddir = wt_disttop
elif os.path.isfile(os.path.join(wt_disttop, 'build_posix', 'wt')):
diff --git a/src/third_party/wiredtiger/test/suite/test_hs14.py b/src/third_party/wiredtiger/test/suite/test_hs14.py
index caaaa371c61..1351dd23e37 100644
--- a/src/third_party/wiredtiger/test/suite/test_hs14.py
+++ b/src/third_party/wiredtiger/test/suite/test_hs14.py
@@ -75,6 +75,9 @@ class test_hs14(wttest.WiredTigerTestCase):
cursor[self.create_key(i)] = value4
self.session.commit_transaction('commit_timestamp=' + timestamp_str(4))
+ # A checkpoint will ensure that older values are written to the history store.
+ self.session.checkpoint()
+
start = time.time()
self.session.begin_transaction('read_timestamp=' + timestamp_str(3))
for i in range(1, 10000):
@@ -94,6 +97,9 @@ class test_hs14(wttest.WiredTigerTestCase):
cursor[self.create_key(i)] = value5
self.session.commit_transaction('commit_timestamp=' + timestamp_str(10))
+ # A checkpoint will ensure that older values are written to the history store.
+ self.session.checkpoint()
+
start = time.time()
self.session.begin_transaction('read_timestamp=' + timestamp_str(9))
for i in range(1, 10000):
diff --git a/src/third_party/wiredtiger/test/suite/test_prepare11.py b/src/third_party/wiredtiger/test/suite/test_prepare11.py
index f3ffa6d8bdc..4b35a173576 100644
--- a/src/third_party/wiredtiger/test/suite/test_prepare11.py
+++ b/src/third_party/wiredtiger/test/suite/test_prepare11.py
@@ -27,6 +27,8 @@
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
+from wtscenario import make_scenarios
+
def timestamp_str(t):
return '%x' % t
@@ -36,6 +38,13 @@ class test_prepare11(wttest.WiredTigerTestCase):
conn_config = 'cache_size=2MB,statistics=(all)'
session_config = 'isolation=snapshot'
+ commit_values = [
+ ('commit', dict(commit=True)),
+ ('rollback', dict(commit=False)),
+ ]
+
+ scenarios = make_scenarios(commit_values)
+
def test_prepare_update_rollback(self):
uri = "table:test_prepare11"
self.session.create(uri, 'key_format=S,value_format=S')
@@ -49,4 +58,9 @@ class test_prepare11(wttest.WiredTigerTestCase):
c.reserve()
c['key1'] = 'yyyy'
self.session.prepare_transaction('prepare_timestamp=10')
- self.session.rollback_transaction()
+ if self.commit:
+ self.session.timestamp_transaction('commit_timestamp=' + timestamp_str(20))
+ self.session.timestamp_transaction('durable_timestamp=' + timestamp_str(30))
+ self.session.commit_transaction()
+ else:
+ self.session.rollback_transaction()
diff --git a/src/third_party/wiredtiger/test/suite/test_prepare16.py b/src/third_party/wiredtiger/test/suite/test_prepare16.py
new file mode 100644
index 00000000000..23eb85bf444
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_prepare16.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-present MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wttest
+from wiredtiger import WT_NOTFOUND
+from wtscenario import make_scenarios
+
+def timestamp_str(t):
+ return '%x' % t
+
+# test_prepare16.py
+# Test that the prepare transaction rollback/commit multiple keys
+# and each key can occupy a leaf page.
+class test_prepare16(wttest.WiredTigerTestCase):
+ in_memory_values = [
+ ('no_inmem', dict(in_memory=False)),
+ ('inmem', dict(in_memory=True))
+ ]
+
+ key_format_values = [
+ ('column', dict(key_format='r')),
+ ('integer_row', dict(key_format='i')),
+ ]
+
+ txn_end_values = [
+ ('commit', dict(commit=True)),
+ ('rollback', dict(commit=False)),
+ ]
+
+ scenarios = make_scenarios(in_memory_values, key_format_values, txn_end_values)
+
+ def conn_config(self):
+ config = 'cache_size=250MB'
+ if self.in_memory:
+ config += ',in_memory=true'
+ else:
+ config += ',in_memory=false'
+ return config
+
+ def test_prepare(self):
+ nrows = 1000
+
+ # Prepare transactions for column store table is not yet supported.
+ if self.key_format == 'r':
+ self.skipTest('Prepare transactions for column store table is not yet supported')
+
+ # Create a table without logging.
+ uri = "table:prepare16"
+ create_config = 'allocation_size=512,key_format=S,value_format=S,leaf_page_max=512,leaf_value_max=64MB'
+ self.session.create(uri, create_config)
+
+ # Pin oldest and stable timestamps to 10.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
+ ',stable_timestamp=' + timestamp_str(10))
+
+ valuea = 'a' * 400
+
+ cursor = self.session.open_cursor(uri)
+ self.session.begin_transaction()
+ for i in range(1, nrows + 1):
+ cursor[str(i)] = valuea
+
+ cursor.reset()
+ cursor.close()
+ self.session.prepare_transaction('prepare_timestamp=' + timestamp_str(10))
+
+ s = self.conn.open_session()
+ s.begin_transaction('ignore_prepare = true')
+ # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
+ evict_cursor = s.open_cursor(uri, None, "debug=(release_evict)")
+
+ for i in range(1, nrows + 1):
+ evict_cursor.set_key(str(i))
+ self.assertEquals(evict_cursor.search(), WT_NOTFOUND)
+ evict_cursor.reset()
+
+ if self.commit:
+ self.session.timestamp_transaction('commit_timestamp=' + timestamp_str(20))
+ self.session.timestamp_transaction('durable_timestamp=' + timestamp_str(30))
+ self.session.commit_transaction()
+ else:
+ self.session.rollback_transaction()
+
+ self.conn.set_timestamp('stable_timestamp=' + timestamp_str(30))
+ if not self.in_memory:
+ self.session.checkpoint()
+
+ self.session.begin_transaction('read_timestamp=' + timestamp_str(20))
+ cursor = self.session.open_cursor(uri)
+ for i in range(1, nrows + 1):
+ cursor.set_key(str(i))
+ if self.commit:
+ self.assertEquals(cursor.search(), 0)
+ self.assertEqual(cursor.get_value(), valuea)
+ else:
+ self.assertEquals(cursor.search(), WT_NOTFOUND)
+ self.session.commit_transaction()
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered02.py b/src/third_party/wiredtiger/test/suite/test_tiered02.py
index 515e84388e5..3416e581acb 100755
--- a/src/third_party/wiredtiger/test/suite/test_tiered02.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered02.py
@@ -35,9 +35,7 @@ class test_tiered02(wttest.WiredTigerTestCase):
K = 1024
M = 1024 * K
G = 1024 * M
- # TODO: tiered: change this to a table: URI, otherwise we are
- # not using tiered files.
- uri = "file:test_tiered02"
+ uri = "table:test_tiered02"
auth_token = "test_token"
bucket = "mybucket"
@@ -46,25 +44,25 @@ class test_tiered02(wttest.WiredTigerTestCase):
prefix = "pfx-"
def conn_config(self):
- os.makedirs(self.bucket, exist_ok=True)
+ if not os.path.exists(self.bucket):
+ os.mkdir(self.bucket)
return \
'statistics=(all),' + \
'tiered_storage=(auth_token=%s,' % self.auth_token + \
'bucket=%s,' % self.bucket + \
'bucket_prefix=%s,' % self.prefix + \
- 'name=%s)' % self.extension_name
+ 'name=%s),tiered_manager=(wait=0)' % self.extension_name
# Load the local store extension, but skip the test if it is missing.
def conn_extensions(self, extlist):
extlist.skip_if_missing = True
extlist.extension('storage_sources', self.extension_name)
- def confirm_flush(self, increase=True):
- # TODO: tiered: flush tests disabled, as the interface
- # for flushing will be changed.
- return
+ def progress(self, s):
+ self.verbose(3, s)
+ self.pr(s)
- self.flushed_objects
+ def confirm_flush(self, increase=True):
got = sorted(list(os.listdir(self.bucket)))
self.pr('Flushed objects: ' + str(got))
if increase:
@@ -80,45 +78,86 @@ class test_tiered02(wttest.WiredTigerTestCase):
self.flushed_objects = 0
args = 'key_format=S'
+ intl_page = 'internal_page_max=16K'
+ base_create = 'key_format=S,value_format=S,' + intl_page
+ self.pr("create sys")
+ #self.session.create(self.uri + 'xxx', base_create)
+
+ self.progress('Create simple data set (10)')
ds = SimpleDataSet(self, self.uri, 10, config=args)
+ self.progress('populate')
ds.populate()
ds.check()
+ self.progress('checkpoint')
self.session.checkpoint()
- # For some reason, every checkpoint does not cause a flush.
- # As we're about to move to a new model of flushing, we're not going to chase this error.
- #self.confirm_flush()
+ self.progress('flush_tier')
+ self.session.flush_tier(None)
+ self.confirm_flush()
+ # FIXME-WT-7589 reopening a connection does not yet work.
+ if False:
+ self.close_conn()
+ self.progress('reopen_conn')
+ self.reopen_conn()
+ # Check what was there before
+ ds = SimpleDataSet(self, self.uri, 10, config=args)
+ ds.check()
+
+ self.progress('Create simple data set (50)')
ds = SimpleDataSet(self, self.uri, 50, config=args)
+ self.progress('populate')
ds.populate()
ds.check()
+ self.progress('checkpoint')
self.session.checkpoint()
+ self.progress('flush_tier')
+ self.session.flush_tier(None)
self.confirm_flush()
+ # FIXME-WT-7589 This test works up to this point, then runs into trouble.
+ if True:
+ return
+
+ self.progress('Create simple data set (100)')
ds = SimpleDataSet(self, self.uri, 100, config=args)
+ self.progress('populate')
ds.populate()
ds.check()
+ self.progress('checkpoint')
self.session.checkpoint()
+ self.progress('flush_tier')
+ self.session.flush_tier(None)
self.confirm_flush()
+ self.progress('Create simple data set (200)')
ds = SimpleDataSet(self, self.uri, 200, config=args)
+ self.progress('populate')
ds.populate()
ds.check()
+ self.progress('close_conn')
self.close_conn()
self.confirm_flush() # closing the connection does a checkpoint
+ self.progress('reopen_conn')
self.reopen_conn()
# Check what was there before
ds = SimpleDataSet(self, self.uri, 200, config=args)
ds.check()
# Now add some more.
+ self.progress('Create simple data set (300)')
ds = SimpleDataSet(self, self.uri, 300, config=args)
+ self.progress('populate')
ds.populate()
ds.check()
- # We haven't done a checkpoint/flush so there should be
+ # We haven't done a flush so there should be
# nothing extra on the shared tier.
self.confirm_flush(increase=False)
+ self.progress('checkpoint')
+ self.session.checkpoint()
+ self.confirm_flush(increase=False)
+ self.progress('END TEST')
if __name__ == '__main__':
wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered04.py b/src/third_party/wiredtiger/test/suite/test_tiered04.py
index 26254f4d33d..78a7e274e53 100755
--- a/src/third_party/wiredtiger/test/suite/test_tiered04.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered04.py
@@ -88,13 +88,19 @@ class test_tiered04(wttest.WiredTigerTestCase):
stat_cursor.close()
return val
+ def check(self, tc, n):
+ for i in range(0, n):
+ self.assertEqual(tc[str(i)], str(i))
+ tc.set_key(str(n))
+ self.assertEquals(tc.search(), wiredtiger.WT_NOTFOUND)
+
# Test calling the flush_tier API.
def test_tiered(self):
# Create three tables. One using the system tiered storage, one
# specifying its own bucket and object size and one using no
# tiered storage. Use stats to verify correct setup.
intl_page = 'internal_page_max=16K'
- base_create = 'key_format=S,' + intl_page
+ base_create = 'key_format=S,value_format=S,' + intl_page
self.pr("create sys")
self.session.create(self.uri, base_create)
conf = \
@@ -110,13 +116,34 @@ class test_tiered04(wttest.WiredTigerTestCase):
self.pr("create non tiered/local")
self.session.create(self.uri_none, base_create + conf)
- #self.pr("open cursor")
- #c = self.session.open_cursor(self.uri)
self.pr("flush tier")
+ c = self.session.open_cursor(self.uri)
+ c["0"] = "0"
+ self.check(c, 1)
+ c.close()
self.session.flush_tier(None)
- self.pr("flush tier again")
+ c = self.session.open_cursor(self.uri)
+ c["1"] = "1"
+ self.check(c, 2)
+ c.close()
+
+ c = self.session.open_cursor(self.uri)
+ c["2"] = "2"
+ self.check(c, 3)
+
+ self.pr("flush tier again, holding open cursor")
+ # FIXME-WT-7591 Remove the extra cursor close and open surrounding the flush_tier call.
+ # Having a cursor open during a flush_tier does not yet work, so the test closes it,
+ # and reopens after the flush_tier.
+ c.close()
self.session.flush_tier(None)
+ c = self.session.open_cursor(self.uri)
+
+ c["3"] = "3"
+ self.check(c, 4)
+ c.close()
+
calls = self.get_stat(stat.conn.flush_tier, None)
flush = 2
self.assertEqual(calls, flush)
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered05.py b/src/third_party/wiredtiger/test/suite/test_tiered05.py
index 097af289a2b..5ac7293cb46 100755
--- a/src/third_party/wiredtiger/test/suite/test_tiered05.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered05.py
@@ -59,6 +59,7 @@ class test_tiered05(wttest.WiredTigerTestCase):
# Test calling the flush_tier API with a tiered manager. Should get an error.
def test_tiered(self):
self.session.create(self.uri, 'key_format=S')
+ # Allow time for the thread to start up.
time.sleep(self.wait)
msg = "/storage manager thread is configured/"
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
diff --git a/src/third_party/wiredtiger/tools/wt_ckpt_decode.py b/src/third_party/wiredtiger/tools/wt_ckpt_decode.py
index 6664a3445e8..940bbea37b6 100755
--- a/src/third_party/wiredtiger/tools/wt_ckpt_decode.py
+++ b/src/third_party/wiredtiger/tools/wt_ckpt_decode.py
@@ -63,10 +63,13 @@ def show_one(label, value):
l = l if l > 1 else 1
print(' {0}{1}{2:10d} (0x{2:x})'.format(label, (' ' * l), value, value))
-def show_triple(triple, name, allocsize):
- off = triple[0]
- size = triple[1]
- csum = triple[2]
+def show_ref(ref, name, allocsize):
+ if len(ref) == 4:
+ show_one(name + ' object', ref[0])
+ ref = ref[1:]
+ off = ref[0]
+ size = ref[1]
+ csum = ref[2]
if size == 0:
off = -1
csum = 0
@@ -82,19 +85,49 @@ def decode_arg(arg, allocsize):
if version != 1:
print('**** ERROR: unknown version ' + str(version))
addr = bytes(addr[1:])
- result = unpack('iiiiiiiiiiiiii',addr)
- if len(result) != 14:
- print('**** ERROR: result len unexpected: ' + str(len(result)))
- show_triple(result[0:3], 'root', allocsize)
- show_triple(result[3:6], 'alloc', allocsize)
- show_triple(result[6:9], 'avail', allocsize)
- show_triple(result[9:12], 'discard', allocsize)
- file_size = result[12]
- ckpt_size = result[13]
+
+ # The number of values in a checkpoint may be 14 or 18. In the latter case, the checkpoint is
+ # for a tiered Btree, and contains object ids for each of the four references in the checkpoint.
+ # In the former case, the checkpoint is for a regular (local, single file) Btree, and there are
+ # no objects. Based on what is present, we show them slightly appropriately.
+
+ # First, we get the largest number of ints that can be decoded.
+ result = []
+ iformat = 'iiiiiiiiiiiiii'
+ result_len = 0
+ while True:
+ try:
+ result = unpack(iformat, addr)
+ result_len = len(result)
+ except:
+ break
+ iformat += 'i'
+
+ # Then we check the number of results against what we expect.
+ if result_len == 14:
+ ref_cnt = 3 # no object ids
+ elif result_len == 18:
+ ref_cnt = 4 # has object ids
+ else:
+ if result_len == 0:
+ result_len = 'unknown'
+ print('**** ERROR: number of integers to decode ({}) '.format(result_len) +
+ 'does not match expected checkpoint format')
+ return
+ pos = 0
+
+ # Now that we know whether the references have object ids, we can show them.
+ for refname in [ 'root', 'alloc', 'avail', 'discard' ]:
+ show_ref(result[pos : pos + ref_cnt], refname, allocsize)
+ pos += ref_cnt
+ file_size = result[pos]
+ ckpt_size = result[pos+1]
show_one('file size', file_size)
show_one('checkpoint size', ckpt_size)
-#decode_arg('018281e420f2fa4a8381e40c5855ca808080808080e22fc0e20fc0', 4096)
+#decode_arg('018281e420f2fa4a8381e40c5855ca808080808080e22fc0e20fc0', 4096) # regular Btree
+#decode_arg('01818181e412e4fd01818281e41546bd16818381e4f2dbec3980808080e22fc0cfc0', 4096) # tiered
+#decode_arg('01818181e412e4fd01818281e41546bd16818381e4f2dbec39808080e22fc0cfc0', 4096) # bad
allocsize = 4096
try: