summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/third_party/wiredtiger/build_posix/Make.subdirs1
-rw-r--r--src/third_party/wiredtiger/build_posix/aclocal/options.m417
-rw-r--r--src/third_party/wiredtiger/build_posix/configure.ac.in26
-rw-r--r--src/third_party/wiredtiger/build_win/wiredtiger.def1
-rwxr-xr-xsrc/third_party/wiredtiger/dist/api_config.py255
-rwxr-xr-xsrc/third_party/wiredtiger/dist/api_config_gen.py4
-rw-r--r--src/third_party/wiredtiger/dist/docs_data.py5
-rw-r--r--src/third_party/wiredtiger/dist/filelist1
-rw-r--r--src/third_party/wiredtiger/dist/log.py15
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_all3
-rw-r--r--src/third_party/wiredtiger/dist/s_export.list1
-rw-r--r--src/third_party/wiredtiger/dist/s_funcs.list1
-rw-r--r--src/third_party/wiredtiger/dist/s_string.ok11
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_void1
-rw-r--r--src/third_party/wiredtiger/dist/test_data.py56
-rw-r--r--src/third_party/wiredtiger/dist/test_tag.py203
-rw-r--r--src/third_party/wiredtiger/dist/test_tags.ok13
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_delete.c6
-rw-r--r--src/third_party/wiredtiger/src/config/config_api.c35
-rw-r--r--src/third_party/wiredtiger/src/docs/Doxyfile1561
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-checkpoint.dox88
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-glossary.dox6
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-index.dox4
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-transaction.dox194
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_create.pngbin162127 -> 145802 bytes
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_generic.pngbin16233 -> 14680 bytes
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_rename.pngbin148026 -> 133892 bytes
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/transaction_lifecycle.pngbin0 -> 44919 bytes
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.cmapx38
-rw-r--r--src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.pngbin66162 -> 61723 bytes
-rw-r--r--src/third_party/wiredtiger/src/docs/spell.ok27
-rw-r--r--src/third_party/wiredtiger/src/docs/tool-index.dox7
-rw-r--r--src/third_party/wiredtiger/src/docs/tool-libfuzzer.dox206
-rw-r--r--src/third_party/wiredtiger/src/history/hs_rec.c21
-rw-r--r--src/third_party/wiredtiger/src/include/extern.h2
-rw-r--r--src/third_party/wiredtiger/src/include/log.h7
-rw-r--r--src/third_party/wiredtiger/src/include/wiredtiger.in7
-rw-r--r--src/third_party/wiredtiger/src/log/log_auto.c40
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c21
-rw-r--r--src/third_party/wiredtiger/src/utilities/util_printlog.c15
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_config.c28
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h107
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h12
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx15
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml22
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh4
-rw-r--r--src/third_party/wiredtiger/test/fuzz/Makefile.am20
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-01
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-11
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-101
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-21
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-31
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-41
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-51
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-61
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-71
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-81
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-91
-rw-r--r--src/third_party/wiredtiger/test/fuzz/config/fuzz_config.c68
-rw-r--r--src/third_party/wiredtiger/test/fuzz/fuzz_coverage.sh63
-rw-r--r--src/third_party/wiredtiger/test/fuzz/fuzz_run.sh62
-rw-r--r--src/third_party/wiredtiger/test/fuzz/fuzz_util.c173
-rw-r--r--src/third_party/wiredtiger/test/fuzz/fuzz_util.h51
-rw-r--r--src/third_party/wiredtiger/test/fuzz/modify/fuzz_modify.c75
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/run.py4
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup01.py5
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_backup04.py44
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup07.py23
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup10.py32
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup11.py34
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup12.py87
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup13.py80
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup14.py165
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup15.py161
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup16.py11
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup17.py83
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup19.py158
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_checkpoint02.py2
-rw-r--r--src/third_party/wiredtiger/test/suite/test_checkpoint03.py2
-rw-r--r--src/third_party/wiredtiger/test/suite/test_hs15.py5
-rw-r--r--src/third_party/wiredtiger/test/suite/test_prepare13.py96
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py99
-rw-r--r--src/third_party/wiredtiger/test/suite/test_rollback_to_stable15.py134
-rw-r--r--src/third_party/wiredtiger/test/suite/test_txn08.py4
-rw-r--r--src/third_party/wiredtiger/test/suite/test_txn24.py27
-rw-r--r--src/third_party/wiredtiger/test/suite/test_util18.py194
-rw-r--r--src/third_party/wiredtiger/test/suite/test_util19.py97
-rw-r--r--src/third_party/wiredtiger/test/suite/test_util20.py50
-rw-r--r--src/third_party/wiredtiger/test/suite/wtbackup.py250
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/wttest.py4
-rw-r--r--src/third_party/wiredtiger/test/test_coverage.md6
-rw-r--r--src/third_party/wiredtiger/tools/xray_to_optrack/xray_to_optrack.cxx2
93 files changed, 3688 insertions, 1783 deletions
diff --git a/src/third_party/wiredtiger/build_posix/Make.subdirs b/src/third_party/wiredtiger/build_posix/Make.subdirs
index 117e54bb613..7038cb8e4f7 100644
--- a/src/third_party/wiredtiger/build_posix/Make.subdirs
+++ b/src/third_party/wiredtiger/build_posix/Make.subdirs
@@ -34,6 +34,7 @@ test/csuite
test/cursor_order
test/fops
test/format
+test/fuzz POSIX_HOST LIBFUZZER
test/huge
test/import
test/manydbs
diff --git a/src/third_party/wiredtiger/build_posix/aclocal/options.m4 b/src/third_party/wiredtiger/build_posix/aclocal/options.m4
index 3148c986b50..34802b9634f 100644
--- a/src/third_party/wiredtiger/build_posix/aclocal/options.m4
+++ b/src/third_party/wiredtiger/build_posix/aclocal/options.m4
@@ -266,4 +266,21 @@ if test "$wt_cv_enable_llvm" = "yes"; then
fi
fi
AM_CONDITIONAL([LLVM], [test x$wt_cv_enable_llvm = xyes])
+
+AC_MSG_CHECKING(if --enable-libfuzzer option specified)
+AC_ARG_ENABLE(libfuzzer,
+ [AS_HELP_STRING([--enable-libfuzzer],
+ [Configure with LibFuzzer.])], r=$enableval, r=no)
+case "$r" in
+no) wt_cv_enable_libfuzzer=no;;
+*) wt_cv_enable_libfuzzer=yes;;
+esac
+AC_MSG_RESULT($wt_cv_enable_libfuzzer)
+if test "$wt_cv_enable_libfuzzer" = "yes"; then
+ AX_CHECK_COMPILE_FLAG([-fsanitize=fuzzer-no-link], [wt_cv_libfuzzer_works=yes])
+ if test "$wt_cv_libfuzzer_works" != "yes"; then
+ AC_MSG_ERROR([--enable-libfuzzer requires a Clang version that supports -fsanitize=fuzzer-no-link])
+ fi
+fi
+AM_CONDITIONAL([LIBFUZZER], [test x$wt_cv_enable_libfuzzer = xyes])
])
diff --git a/src/third_party/wiredtiger/build_posix/configure.ac.in b/src/third_party/wiredtiger/build_posix/configure.ac.in
index 3c427b45e14..7bcbae594cc 100644
--- a/src/third_party/wiredtiger/build_posix/configure.ac.in
+++ b/src/third_party/wiredtiger/build_posix/configure.ac.in
@@ -29,18 +29,20 @@ define([AC_LIBTOOL_LANG_F77_CONFIG], [:])dnl
# reason to believe "c++" can build compatible objects.
#
# Check whether the C++ compiler works by linking a trivial program.
-if test "$CC" = "cc"; then
- AC_CACHE_CHECK([whether the C++ compiler works],
- [wt_cv_prog_cxx_works],
- [AC_LANG_PUSH([C++])
- AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])],
- [wt_cv_prog_cxx_works=yes],
- [wt_cv_prog_cxx_works=no])
- AC_LANG_POP([C++])])
-else
- AC_MSG_WARN([C++ compiler ignored unless compiler is named "cc"])
- wt_cv_prog_cxx_works=no
-fi
+AM_CONDITIONAL([IS_CXX_OK], [test "$CC" = "cc"])
+AM_COND_IF([IS_CXX_OK], [], AM_CONDITIONAL([IS_CXX_OK], [test $(expr `"$CC" --version | head -n 1 | grep -o -E "[[[[:digit:]]]].[[[[:digit:]]]].[[[[:digit:]]]]" | uniq`) = $(expr `"$CXX" --version | head -n 1 | grep -o -E "[[[[:digit:]]]].[[[[:digit:]]]].[[[[:digit:]]]]" | uniq`)]))
+
+AM_COND_IF([IS_CXX_OK],
+ [AC_CACHE_CHECK([whether the C++ compiler works],
+ [wt_cv_prog_cxx_works],
+ [AC_LANG_PUSH([C++])
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])],
+ [wt_cv_prog_cxx_works=yes],
+ [wt_cv_prog_cxx_works=no])
+ AC_LANG_POP([C++])])],
+ [AC_MSG_WARN([C++ compiler ignored unless compiler is named "cc" or gcc and g++ versions match])
+ wt_cv_prog_cxx_works=no])
+
AM_CONDITIONAL([HAVE_CXX], [test "$wt_cv_prog_cxx_works" = "yes"])
LT_PREREQ(2.2.6)
diff --git a/src/third_party/wiredtiger/build_win/wiredtiger.def b/src/third_party/wiredtiger/build_win/wiredtiger.def
index 71c52bd81af..16a824acf1b 100644
--- a/src/third_party/wiredtiger/build_win/wiredtiger.def
+++ b/src/third_party/wiredtiger/build_win/wiredtiger.def
@@ -15,6 +15,7 @@ EXPORTS
wiredtiger_struct_pack
wiredtiger_struct_size
wiredtiger_struct_unpack
+ wiredtiger_test_config_validate
wiredtiger_unpack_int
wiredtiger_unpack_item
wiredtiger_unpack_start
diff --git a/src/third_party/wiredtiger/dist/api_config.py b/src/third_party/wiredtiger/dist/api_config.py
index 3ab0ec15504..8313b4bb445 100755
--- a/src/third_party/wiredtiger/dist/api_config.py
+++ b/src/third_party/wiredtiger/dist/api_config.py
@@ -2,9 +2,21 @@
from __future__ import print_function
import os, re, sys, textwrap
-import api_data
from dist import compare_srcfile, format_srcfile
+test_config = False
+
+# This file serves two purposes, it can generate configuration for the main wiredtiger library and,
+# it can generate configuration for the c and cpp suite tests. To avoid duplication we import the
+# differing apis here and then treat them as the same for the remainder of the script. However we
+# do have different logic depending on whether we intend to generate the test api or not, which is
+# managed with a boolean flag.
+if len(sys.argv) == 1 or sys.argv[1] != "-t":
+ import api_data as api_data_def
+else:
+ test_config = True
+ import test_data as api_data_def
+
# Temporary file.
tmp_file = '__tmp'
@@ -76,7 +88,7 @@ def parseconfig(c, method_name, name_indent=''):
if ctype == 'category':
for subc in sorted(c.subconfig):
output += parseconfig(subc, method_name, \
- name_indent + (' ' * 4))
+ name_indent + (' ' * 4))
output += '@config{ ),,}\n'
return output
@@ -97,58 +109,61 @@ def getconfcheck(c):
w.wrap(check + ' ' + cstr + ', ' + sstr + ' },'))
return check
-skip = False
-for line in open(f, 'r'):
- if skip:
- if '@configend' in line:
- skip = False
- continue
+if not test_config:
+ skip = False
+ for line in open(f, 'r'):
+ if skip:
+ if '@configend' in line:
+ skip = False
+ continue
- m = cbegin_re.match(line)
- if not m:
- tfile.write(line)
- continue
+ m = cbegin_re.match(line)
+ if not m:
+ tfile.write(line)
+ continue
- prefix, config_name = m.groups()
- if config_name not in api_data.methods:
- print("Missing configuration for " + config_name, file=sys.stderr)
- tfile.write(line)
- continue
+ prefix, config_name = m.groups()
+ if config_name not in api_data_def.methods:
+ print("Missing configuration for " + config_name, file=sys.stderr)
+ tfile.write(line)
+ continue
- skip = ('@configstart' in line)
+ skip = ('@configstart' in line)
- if not api_data.methods[config_name].config:
- tfile.write(prefix + '@configempty{' + config_name +
- ', see dist/api_data.py}\n')
- continue
-
- tfile.write(prefix + '@configstart{' + config_name +
- ', see dist/api_data.py}\n')
-
- w = textwrap.TextWrapper(width=100-len(prefix.expandtabs()),
- break_on_hyphens=False,
- break_long_words=False,
- replace_whitespace=False,
- fix_sentence_endings=True)
- # Separate at spaces, and after a set of non-breaking space indicators.
- w.wordsep_re = w.wordsep_simple_re = \
- re.compile(r'(\s+|(?<=&nbsp;)[\w_,.;:]+)')
- for c in api_data.methods[config_name].config:
- if 'undoc' in c.flags:
+ if not api_data_def.methods[config_name].config:
+ tfile.write(prefix + '@configempty{' + config_name +
+ ', see dist/api_data.py}\n')
continue
- output = parseconfig(c, config_name)
- for l in w.wrap(output):
- tfile.write(prefix + l.replace('\n', '\n' + prefix) + '\n')
- tfile.write(prefix + '@configend\n')
+ tfile.write(prefix + '@configstart{' + config_name +
+ ', see dist/api_data.py}\n')
-tfile.close()
-compare_srcfile(tmp_file, f)
+ w = textwrap.TextWrapper(width=100-len(prefix.expandtabs()),
+ break_on_hyphens=False,
+ break_long_words=False,
+ replace_whitespace=False,
+ fix_sentence_endings=True)
+ # Separate at spaces, and after a set of non-breaking space indicators.
+ w.wordsep_re = w.wordsep_simple_re = \
+ re.compile(r'(\s+|(?<=&nbsp;)[\w_,.;:]+)')
+ for c in api_data_def.methods[config_name].config:
+ if 'undoc' in c.flags:
+ continue
+ output = parseconfig(c, config_name)
+ for l in w.wrap(output):
+ tfile.write(prefix + l.replace('\n', '\n' + prefix) + '\n')
+
+ tfile.write(prefix + '@configend\n')
+
+ tfile.close()
+ compare_srcfile(tmp_file, f)
#####################################################################
# Create config_def.c with defaults for each config string
#####################################################################
f='../src/config/config_def.c'
+if test_config:
+ f = '../test/cppsuite/test_config.c'
tfile = open(tmp_file, 'w')
tfile.write('''/* DO NOT EDIT: automatically built by dist/api_config.py. */
@@ -239,8 +254,8 @@ def getsubconfigstr(c):
# Write structures of arrays of allowable configuration options, including a
# NULL as a terminator for iteration.
-for name in sorted(api_data.methods.keys()):
- config = api_data.methods[name].config
+for name in sorted(api_data_def.methods.keys()):
+ config = api_data_def.methods[name].config
if config:
tfile.write('''
static const WT_CONFIG_CHECK confchk_%(name)s[] = {
@@ -258,8 +273,8 @@ tfile.write('static const WT_CONFIG_ENTRY config_entries[] = {')
slot=-1
config_defines = ''
-for name in sorted(api_data.methods.keys()):
- config = api_data.methods[name].config
+for name in sorted(api_data_def.methods.keys()):
+ config = api_data_def.methods[name].config
slot += 1
# Build a list of #defines that reference specific slots in the list (the
@@ -295,72 +310,94 @@ tfile.write('\n};\n')
# Write the routine that connects the WT_CONNECTION_IMPL structure to the list
# of configuration entry structures.
-tfile.write('''
-int
-__wt_conn_config_init(WT_SESSION_IMPL *session)
-{
-\tWT_CONNECTION_IMPL *conn;
-\tconst WT_CONFIG_ENTRY *ep, **epp;
-
-\tconn = S2C(session);
-
-\t/* Build a list of pointers to the configuration information. */
-\tWT_RET(__wt_calloc_def(session, WT_ELEMENTS(config_entries), &epp));
-\tconn->config_entries = epp;
-
-\t/* Fill in the list to reference the default information. */
-\tfor (ep = config_entries;;) {
-\t\t*epp++ = ep++;
-\t\tif (ep->method == NULL)
-\t\t\tbreak;
-\t}
-\treturn (0);
-}
-
-void
-__wt_conn_config_discard(WT_SESSION_IMPL *session)
-{
-\tWT_CONNECTION_IMPL *conn;
-
-\tconn = S2C(session);
-
-\t__wt_free(session, conn->config_entries);
-}
-
-/*
- * __wt_conn_config_match --
- * Return the static configuration entry for a method.
- */
-const WT_CONFIG_ENTRY *
-__wt_conn_config_match(const char *method)
-{
-\tconst WT_CONFIG_ENTRY *ep;
-
-\tfor (ep = config_entries; ep->method != NULL; ++ep)
-\t\tif (strcmp(method, ep->method) == 0)
-\t\t\treturn (ep);
-\treturn (NULL);
-}
-''')
+if not test_config:
+ tfile.write('''
+ int
+ __wt_conn_config_init(WT_SESSION_IMPL *session)
+ {
+ \tWT_CONNECTION_IMPL *conn;
+ \tconst WT_CONFIG_ENTRY *ep, **epp;
+
+ \tconn = S2C(session);
+
+ \t/* Build a list of pointers to the configuration information. */
+ \tWT_RET(__wt_calloc_def(session, WT_ELEMENTS(config_entries), &epp));
+ \tconn->config_entries = epp;
+
+ \t/* Fill in the list to reference the default information. */
+ \tfor (ep = config_entries;;) {
+ \t\t*epp++ = ep++;
+ \t\tif (ep->method == NULL)
+ \t\t\tbreak;
+ \t}
+ \treturn (0);
+ }
+
+ void
+ __wt_conn_config_discard(WT_SESSION_IMPL *session)
+ {
+ \tWT_CONNECTION_IMPL *conn;
+
+ \tconn = S2C(session);
+
+ \t__wt_free(session, conn->config_entries);
+ }
+
+ /*
+ * __wt_conn_config_match --
+ * Return the static configuration entry for a method.
+ */
+ const WT_CONFIG_ENTRY *
+ __wt_conn_config_match(const char *method)
+ {
+ \tconst WT_CONFIG_ENTRY *ep;
+
+ \tfor (ep = config_entries; ep->method != NULL; ++ep)
+ \t\tif (strcmp(method, ep->method) == 0)
+ \t\t\treturn (ep);
+ \treturn (NULL);
+ }
+ ''')
+else:
+ tfile.write(
+ '''
+ /*
+ * __wt_test_config_match --
+ * Return the static configuration entry for a test.
+ */
+ const WT_CONFIG_ENTRY *
+ __wt_test_config_match(const char *test_name)
+ {
+ const WT_CONFIG_ENTRY *ep;
+
+ for (ep = config_entries; ep->method != NULL; ++ep)
+ if (strcmp(test_name, ep->method) == 0)
+ return (ep);
+ return (NULL);
+ }
+ '''
+ )
tfile.close()
format_srcfile(tmp_file)
compare_srcfile(tmp_file, f)
# Update the config.h file with the #defines for the configuration entries.
-tfile = open(tmp_file, 'w')
-skip = 0
-for line in open('../src/include/config.h', 'r'):
- if skip:
- if 'configuration section: END' in line:
- tfile.write('/*\n' + line)
- skip = 0
- else:
- tfile.write(line)
- if 'configuration section: BEGIN' in line:
- skip = 1
- tfile.write(' */\n')
- tfile.write(config_defines)
-tfile.close()
-format_srcfile(tmp_file)
-compare_srcfile(tmp_file, '../src/include/config.h')
+if not test_config:
+ tfile = open(tmp_file, 'w')
+ skip = 0
+ config_file = '../src/include/config.h'
+ for line in open(config_file, 'r'):
+ if skip:
+ if 'configuration section: END' in line:
+ tfile.write('/*\n' + line)
+ skip = 0
+ else:
+ tfile.write(line)
+ if 'configuration section: BEGIN' in line:
+ skip = 1
+ tfile.write(' */\n')
+ tfile.write(config_defines)
+ tfile.close()
+ format_srcfile(tmp_file)
+ compare_srcfile(tmp_file, config_file)
diff --git a/src/third_party/wiredtiger/dist/api_config_gen.py b/src/third_party/wiredtiger/dist/api_config_gen.py
new file mode 100755
index 00000000000..e8c088fbf42
--- /dev/null
+++ b/src/third_party/wiredtiger/dist/api_config_gen.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+import os
+os.system("./api_config.py")
+os.system("./api_config.py -t")
diff --git a/src/third_party/wiredtiger/dist/docs_data.py b/src/third_party/wiredtiger/dist/docs_data.py
index 9f2bb32486c..0aa93464bef 100644
--- a/src/third_party/wiredtiger/dist/docs_data.py
+++ b/src/third_party/wiredtiger/dist/docs_data.py
@@ -19,6 +19,11 @@ arch_doc_pages = [
ArchDocPage('arch-cache',
['WT_CACHE', 'WT_CACHE_POOL'],
['src/include/cache.h', 'src/include/cache_inline.h']),
+ ArchDocPage('arch-checkpoint',
+ ['WT_CONNECTION'],
+ ['src/block/block_ckpt.c', 'src/block/block_ckpt_scan.c',
+ 'src/conn/conn_ckpt.c', 'src/meta/meta_ckpt.c',
+ 'src/txn/txn_ckpt.c']),
ArchDocPage('arch-column',
['WT_BTREE'],
['src/include/btree.h']),
diff --git a/src/third_party/wiredtiger/dist/filelist b/src/third_party/wiredtiger/dist/filelist
index 7bbc5e5596a..39b85746b6c 100644
--- a/src/third_party/wiredtiger/dist/filelist
+++ b/src/third_party/wiredtiger/dist/filelist
@@ -215,3 +215,4 @@ src/txn/txn_log.c
src/txn/txn_recover.c
src/txn/txn_rollback_to_stable.c
src/txn/txn_timestamp.c
+test/cppsuite/test_config.c
diff --git a/src/third_party/wiredtiger/dist/log.py b/src/third_party/wiredtiger/dist/log.py
index b2c5b5d1af9..2d418b4d2fd 100644
--- a/src/third_party/wiredtiger/dist/log.py
+++ b/src/third_party/wiredtiger/dist/log.py
@@ -86,6 +86,19 @@ def printf_setup(f, i, nl_indent):
def n_setup(f):
return len(field_types[f[0]][4])
+# Check for an operation that has a file id type. Redact any user data
+# if the redact flag is set, but print operations for file id 0, known
+# to be the metadata.
+def check_redact(optype):
+ for f in optype.fields:
+ if f[0] == 'uint32_id':
+ redact_str = '\tif (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && '
+ redact_str += '%s != WT_METAFILE_ID) {\n' % (f[1])
+ redact_str += '\t\tWT_RET(__wt_fprintf(session, args->fs, " REDACTED"));\n'
+ redact_str += '\t\treturn (0);\n\t}\n'
+ return redact_str
+ return ''
+
# Create a printf line, with an optional setup function.
# ishex indicates that the the field name in the output is modified
# (to add "-hex"), and that the setup and printf are conditional
@@ -287,6 +300,7 @@ __wt_logop_%(name)s_print(WT_SESSION_IMPL *session,
\t%(arg_init)sWT_RET(__wt_logop_%(name)s_unpack(
\t session, pp, end%(arg_addrs)s));
+\t%(redact)s
\tWT_RET(__wt_fprintf(session, args->fs,
\t " \\"optype\\": \\"%(name)s\\",\\n"));
%(print_args)s
@@ -303,6 +317,7 @@ __wt_logop_%(name)s_print(WT_SESSION_IMPL *session,
'arg_fini' : ('\nerr:\t__wt_free(session, escaped);\n\treturn (ret);'
if has_escape(optype.fields) else '\treturn (0);'),
'arg_addrs' : ''.join(', &%s' % f[1] for f in optype.fields),
+ 'redact' : check_redact(optype),
'print_args' : ('\t' + '\n\t'.join(printf_line(f, optype, i, s)
for i,f in enumerate(optype.fields) for s in range(0, n_setup(f)))
if optype.fields else ''),
diff --git a/src/third_party/wiredtiger/dist/s_all b/src/third_party/wiredtiger/dist/s_all
index ac9c9699f5a..8b36c09aa66 100755
--- a/src/third_party/wiredtiger/dist/s_all
+++ b/src/third_party/wiredtiger/dist/s_all
@@ -73,7 +73,7 @@ run()
# already parallelize internally.
run "sh ./s_readme $force"
run "sh ./s_install $force"
-run "python api_config.py"
+run "python api_config_gen.py"
run "python api_err.py"
run "python flags.py"
run "python log.py"
@@ -83,6 +83,7 @@ run "sh ./s_style"
run "./s_clang-format"
run "python prototypes.py"
run "sh ./s_typedef -b"
+run "python test_tag.py"
COMMANDS="
2>&1 ./s_define > ${t_pfx}s_define
diff --git a/src/third_party/wiredtiger/dist/s_export.list b/src/third_party/wiredtiger/dist/s_export.list
index e85bf62517d..ed070963ab0 100644
--- a/src/third_party/wiredtiger/dist/s_export.list
+++ b/src/third_party/wiredtiger/dist/s_export.list
@@ -14,6 +14,7 @@ wiredtiger_strerror
wiredtiger_struct_pack
wiredtiger_struct_size
wiredtiger_struct_unpack
+wiredtiger_test_config_validate
wiredtiger_unpack_int
wiredtiger_unpack_item
wiredtiger_unpack_start
diff --git a/src/third_party/wiredtiger/dist/s_funcs.list b/src/third_party/wiredtiger/dist/s_funcs.list
index 0b7db52d26c..4f3d2a2ca87 100644
--- a/src/third_party/wiredtiger/dist/s_funcs.list
+++ b/src/third_party/wiredtiger/dist/s_funcs.list
@@ -45,6 +45,7 @@ wiredtiger_pack_uint
wiredtiger_struct_pack
wiredtiger_struct_size
wiredtiger_struct_unpack
+wiredtiger_test_config_validate
wiredtiger_unpack_int
wiredtiger_unpack_item
wiredtiger_unpack_start
diff --git a/src/third_party/wiredtiger/dist/s_string.ok b/src/third_party/wiredtiger/dist/s_string.ok
index a60c3165259..f9f2b67712f 100644
--- a/src/third_party/wiredtiger/dist/s_string.ok
+++ b/src/third_party/wiredtiger/dist/s_string.ok
@@ -216,6 +216,8 @@ LAS
LF
LLLLLL
LLLLLLL
+LLVM
+LLVMFuzzerTestOneInput
LOGREC
LOGSCAN
LOOKASIDE
@@ -231,6 +233,7 @@ LZO
LeafGreen
LevelDB
Levyx
+LibFuzzer
LmRrSVv
LmsT
LoadLoad
@@ -626,6 +629,7 @@ create's
createCStream
crypto
cryptobad
+cstring
csuite
csv
ctime
@@ -809,6 +813,9 @@ fsyncs
ftruncate
func
funcid
+fuzz
+fuzzer
+fuzzutil
fvisibility
fwrite
gc
@@ -1039,7 +1046,7 @@ multithreaded
munmap
mutex
mutexes
-mx
+mux
mytable
mytxn
namespace
@@ -1123,6 +1130,7 @@ pcpu
perf
pfx
pluggable
+poc
popen
poptable
popthreads
@@ -1457,6 +1465,7 @@ wtperf's
wts
wtstats
xF
+xdeadbeef
xff
xxxx
xxxxx
diff --git a/src/third_party/wiredtiger/dist/s_void b/src/third_party/wiredtiger/dist/s_void
index fca1ccc9810..0e9890acf78 100755
--- a/src/third_party/wiredtiger/dist/s_void
+++ b/src/third_party/wiredtiger/dist/s_void
@@ -81,6 +81,7 @@ func_ok()
-e '/int __wt_txn_read_upd_list$/d' \
-e '/int __wt_txn_rollback_required$/d' \
-e '/int __wt_win_directory_list_free$/d' \
+ -e '/int LLVMFuzzerTestOneInput$/d' \
-e '/int bdb_compare_reverse$/d' \
-e '/int copyout_val$/d' \
-e '/int csv_error$/d' \
diff --git a/src/third_party/wiredtiger/dist/test_data.py b/src/third_party/wiredtiger/dist/test_data.py
new file mode 100644
index 00000000000..ac46cf55bc2
--- /dev/null
+++ b/src/third_party/wiredtiger/dist/test_data.py
@@ -0,0 +1,56 @@
+# This file is a python script that describes the cpp test framework test configuration options.
+
+class Method:
+ def __init__(self, config):
+ # Deal with duplicates: with complex configurations (like
+ # WT_SESSION::create), it's simpler to deal with duplicates once than
+ # manually as configurations are defined
+ self.config = []
+ lastname = None
+ for c in sorted(config):
+ if '.' in c.name:
+ raise "Bad config key '%s'" % c.name
+ if c.name == lastname:
+ continue
+ lastname = c.name
+ self.config.append(c)
+
+class Config:
+ def __init__(self, name, default, desc, subconfig=None, **flags):
+ self.name = name
+ self.default = default
+ self.desc = desc
+ self.subconfig = subconfig
+ self.flags = flags
+
+ # Comparators for sorting.
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def __ne__(self, other):
+ return self.name != other.name
+
+ def __lt__(self, other):
+ return self.name < other.name
+
+ def __le__(self, other):
+ return self.name <= other.name
+
+ def __gt__(self, other):
+ return self.name > other.name
+
+ def __ge__(self, other):
+ return self.name >= other.name
+methods = {
+'poc_test' : Method([
+ Config('collection_count', '1', r'''
+ the number of collections to create for testing''',
+ min='1', max='10'),
+ Config('key_size', '10', r'''
+ the size of the keys to be created in bytes''',
+ min='1', max='10000'),
+ Config('values', 'first', r'''
+ The value that each key will be populated with, used an example string configuration''',
+ choices=['first', 'second', 'third'])
+]),
+}
diff --git a/src/third_party/wiredtiger/dist/test_tag.py b/src/third_party/wiredtiger/dist/test_tag.py
new file mode 100644
index 00000000000..defa9906e1c
--- /dev/null
+++ b/src/third_party/wiredtiger/dist/test_tag.py
@@ -0,0 +1,203 @@
+import os
+import sys
+
+##### LOCAL VARIABLES AND CONSTANTS #####
+component = ""
+testing_area = ""
+test_type = ""
+
+is_end = False
+is_file_ignored = False
+is_file_tagged = False
+is_start = False
+show_info = False
+show_missing_files = False
+
+nb_ignored_files = 0
+nb_missing_files = 0
+nb_valid_files = 0
+
+sorted_tags = []
+test_files = []
+
+tagged_files = {}
+valid_tags = {}
+
+END_TAG = "[END_TAGS]"
+IGNORE_FILE = "ignored_file"
+NB_TAG_ARGS = 3
+START_TAG = "[TEST_TAGS]"
+#####
+
+##### PROCESS ARGS #####
+for arg in sys.argv:
+ if arg == "-h":
+ print("Usage: python test_tag.py [options]")
+ print("Options:")
+ print("\t-i\tShow info")
+ print("\t-p\tShow files with no test tags")
+ exit()
+ elif arg == "-i":
+ show_info = True
+ elif arg == "-p":
+ show_missing_files = True
+#####
+
+##### GET ALL TEST FILES #####
+for root, dirs, files in os.walk("../test/"):
+ path = root.split(os.sep)
+ for file in files:
+ filename = os.path.join('/'.join(path), file)
+ if filename.endswith("main.c") or filename.endswith(".py"):
+ test_files.append(filename)
+#####
+
+##### RETRIEVE VALID TAGS #####
+validation_file = open("test_tags.ok", "r")
+
+# The file has the following pattern
+# <COMPONENT>:<TESTING_TYPE>:<TESTING_AREA>:<DESCRIPTION>
+# A tag is made of the three first values: COMPONENT, TEST_TYPE and TESTING_AREA
+tags = validation_file.readlines()
+tags = [tag.replace('\n', '') for tag in tags]
+
+for tag in tags:
+ current_line = tag.split(':')
+ # Createa key value pair <TAG>:<DESCRIPTION>
+ valid_tags[':'.join(current_line[:NB_TAG_ARGS])] = ':'.join(current_line[NB_TAG_ARGS:])
+
+validation_file.close()
+#####
+
+##### PARSE TEST FILES #####
+for filename in test_files:
+ input_file = open(filename, "r")
+ lines = input_file.readlines()
+
+ is_start = False
+ is_end = False
+ is_file_ignored = False
+ is_file_tagged = False
+
+ # Read line by line
+ for line in lines:
+ # Format line
+ line = line.replace('\n', '').replace('\r', '') \
+ .replace(' ', '').replace('#', '') \
+ .replace('*', '')
+
+ # Check if line is valid
+ if not line:
+ # Check if invalid line after START_TAG
+ if is_start == True:
+ print("Error syntax in file " + filename)
+ exit()
+ else:
+ continue
+
+ # Check if end of test tag
+ if END_TAG in line:
+ # END_TAG should not be before START_TAG
+ if is_start == False:
+ print("Error syntax in file " + filename + ". Unexpected tag: " + END_TAG)
+ exit()
+ # END_TAG should not be met before a test tag
+ if is_file_ignored == False and is_file_tagged == False:
+ print("Error syntax in file " + filename + ". Missing test tag.")
+ exit()
+ is_end = True
+ nb_valid_files = nb_valid_files + 1
+ # Go to next file
+ break
+
+ # Check if start of test tag
+ if START_TAG in line:
+ # Only one START_TAG is allowed
+ if is_start == True:
+ print("Error syntax in file " + filename + ". Unexpected tag: " + START_TAG)
+ exit()
+ is_start = True
+ continue
+
+ if is_start == True:
+ # Check if file is ignored
+ if is_file_ignored == True:
+ print("Unexpected value in ignored file: " + filename)
+ exit()
+ if line == IGNORE_FILE:
+ nb_ignored_files = nb_ignored_files + 1
+ is_file_ignored = True
+ continue
+ # Check if current tag is valid
+ if not line in valid_tags:
+ print("Tag is not valid ! Add the new tag to test_tags.ok:\n" + line)
+ exit()
+ else:
+ is_file_tagged = True
+ # Check if current tag has already matched test files
+ if line in tagged_files:
+ tagged_files[line].append(filename)
+ else:
+ tagged_files[line] = [filename]
+
+ if is_file_ignored == False and is_file_tagged == False:
+ nb_missing_files = nb_missing_files + 1
+ if show_missing_files == True:
+ print("Missing test tag in file: " + filename)
+
+ input_file.close()
+#####
+
+##### GENERATE OUTPUT #####
+output_file = open("../test/test_coverage.md", "w")
+
+# Table headers
+output_file.write("|Component|Test Type|Testing Area|Description|Existing tests|" + '\n')
+output_file.write("|---|---|---|---|---|" + '\n')
+
+# Sort tags
+sorted_tags = list(tagged_files.keys())
+sorted_tags.sort()
+
+for tag in sorted_tags:
+ # Split line
+ current_line = tag.split(":")
+
+ # Parse tag
+ component = current_line[0]
+ test_type = current_line[1]
+ testing_area = current_line[2]
+
+ # Format output
+ component = component.replace("_", " ").title()
+ test_type = test_type.replace("_", " ").title()
+ testing_area = testing_area.replace("_", " ").title()
+
+ # Relative path to test file
+ link = ""
+ for name in tagged_files[tag]:
+ link += "[" + name + "](" + name + "), "
+ # Remove the extra ", " at the end
+ link = link[:-2]
+
+ # Write to output
+ output_file.write('|' + component + '|' + test_type + '|' + \
+ testing_area + '|' + valid_tags[tag] + '|' \
+ + link + '\n')
+
+output_file.close()
+#####
+
+##### STATS #####
+if show_info == True:
+ print("Tagged files:\t" + str(nb_valid_files - nb_ignored_files))
+ print("Missing files:\t" + str(nb_missing_files))
+ print("Ignored files:\t" + str(nb_ignored_files))
+ print("Total files:\t" + str(nb_valid_files + nb_missing_files))
+#####
+
+# Enforce tagging
+#if nb_missing_files > 0:
+# print("Files missing a tag: " + str(nb_missing_files))
+# if show_missing_files == False:
+# print("Call \'python test_tag.py -p\' to list all files with no tags")
diff --git a/src/third_party/wiredtiger/dist/test_tags.ok b/src/third_party/wiredtiger/dist/test_tags.ok
new file mode 100644
index 00000000000..017af50ee15
--- /dev/null
+++ b/src/third_party/wiredtiger/dist/test_tags.ok
@@ -0,0 +1,13 @@
+backup:correctness:full_backup:Full backup contains correct data
+checkpoints:correctness:checkpoint_data:On system with a complex, concurrent workload the correct versions of data appear in checkpoints
+checkpoints:correctness:creation_and_deletion:Deleting arbitrary checkpoints doesn’t corrupt data held in other checkpoints
+checkpoints:correctness:cross_table:Ensure that checkpoints across many tables are correct.
+checkpoints:fault_injection:data_correctness:After a fault-induced failure, all data for completed checkpoints is properly preserved
+checkpoints:liveness:liveness:Identify bugs and race conditions related to checkpoints that can cause deadlocks or livelocks
+checkpoints:scalability:many_checkpoints:Ensure that we can take as many checkpoints as there is storage space for
+caching_eviction:fault_injection:cache_corruption:If dirty or clean data is corrupted in cache, what happens?
+caching_eviction:correctness:written_data:Ensure that the data written out by eviction is correct after reading
+caching_eviction:liveness:cache_stuck:Identify bugs and bad workloads that can cause the WiredTiger cache to live lock (a.k.a., “cache stuck” errors).
+caching_eviction:scalability:large_caches:Test that WT behaves correctly with very large cache sizes.
+caching_eviction:scalability:large_dataset:Test workloads constructed to never/rarely hit in the cache. WT may perform slowly, but should be correct.
+caching_eviction:scalability:large_metadata:Test workloads constructed so that the metadata is larger than the cache (i.e., very many files and small cache).
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index 3bbabe4e470..5888a1fcf53 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-4.4",
- "commit": "332dddfe0e48eb1c263455d3db9219ec5f7cdc30"
+ "commit": "33f1a6eedf856ada5dc598831863378769ce2670"
}
diff --git a/src/third_party/wiredtiger/src/btree/bt_delete.c b/src/third_party/wiredtiger/src/btree/bt_delete.c
index bdf8618edc7..fd4bc669f1e 100644
--- a/src/third_party/wiredtiger/src/btree/bt_delete.c
+++ b/src/third_party/wiredtiger/src/btree/bt_delete.c
@@ -109,13 +109,15 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, bool *skipp)
* discarded. The way we figure that out is to check the page's cell type, cells for leaf pages
* without overflow items are special.
*
- * Additionally, if the aggregated start time point on the page is not visible to us then we
- * cannot truncate the page.
+ * Additionally, if the page has prepared updates or the aggregated start time point on the page
+ * is not visible to us then we cannot truncate the page.
*/
if (!__wt_ref_addr_copy(session, ref, &addr))
goto err;
if (addr.type != WT_ADDR_LEAF_NO)
goto err;
+ if (addr.ta.prepare)
+ goto err;
if (!__wt_txn_visible(session, addr.ta.newest_txn, addr.ta.newest_start_durable_ts))
goto err;
diff --git a/src/third_party/wiredtiger/src/config/config_api.c b/src/third_party/wiredtiger/src/config/config_api.c
index f0c1d799089..d5c343e422c 100644
--- a/src/third_party/wiredtiger/src/config/config_api.c
+++ b/src/third_party/wiredtiger/src/config/config_api.c
@@ -85,12 +85,13 @@ wiredtiger_config_parser_open(
}
/*
- * wiredtiger_config_validate --
- * Validate a configuration string.
+ * __config_validate --
+ * Validate a configuration string. Taking a function pointer to the matching function for the
+ * given configuration set.
*/
-int
-wiredtiger_config_validate(
- WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config)
+static int
+__config_validate(WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name,
+ const char *config, const WT_CONFIG_ENTRY *config_matcher(const char *))
{
const WT_CONFIG_ENTRY *ep, **epp;
WT_CONNECTION_IMPL *conn, dummy_conn;
@@ -132,7 +133,7 @@ wiredtiger_config_validate(
* added).
*/
if (session == NULL || conn == NULL || conn->config_entries == NULL)
- ep = __wt_conn_config_match(name);
+ ep = config_matcher(name);
else {
ep = NULL;
for (epp = conn->config_entries; *epp != NULL && (*epp)->method != NULL; ++epp)
@@ -148,6 +149,28 @@ wiredtiger_config_validate(
}
/*
+ * wiredtiger_config_validate --
+ * Validate a configuration string.
+ */
+int
+wiredtiger_config_validate(
+ WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config)
+{
+ return (__config_validate(wt_session, event_handler, name, config, __wt_conn_config_match));
+}
+
+/*
+ * wiredtiger_test_config_validate --
+ * Validate a test configuration string.
+ */
+int
+wiredtiger_test_config_validate(
+ WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config)
+{
+ return (__config_validate(wt_session, event_handler, name, config, __wt_test_config_match));
+}
+
+/*
* __conn_foc_add --
* Add a new entry into the connection's free-on-close list.
*/
diff --git a/src/third_party/wiredtiger/src/docs/Doxyfile b/src/third_party/wiredtiger/src/docs/Doxyfile
index 7a855b2f94d..1a66e35fa74 100644
--- a/src/third_party/wiredtiger/src/docs/Doxyfile
+++ b/src/third_party/wiredtiger/src/docs/Doxyfile
@@ -14,90 +14,90 @@
# Project related configuration options
#---------------------------------------------------------------------------
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
# http://www.gnu.org/software/libiconv for the list of possible encodings.
DOXYFILE_ENCODING = UTF-8
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
# to put quotes around the project name if it contains spaces.
PROJECT_NAME = WiredTiger
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = "Version 1.0"
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
# a quick idea about the purpose of the project. Keep the description short.
-PROJECT_BRIEF =
+PROJECT_BRIEF =
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
# Doxygen will copy the logo to the output directory.
PROJECT_LOGO = images/LogoFinal-header.png
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
OUTPUT_DIRECTORY = ../../docs
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
# otherwise cause performance problems for the file system.
CREATE_SUBDIRS = NO
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
OUTPUT_LANGUAGE = English
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
# Set to NO to disable this.
BRIEF_MEMBER_DESC = YES
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
REPEAT_BRIEF = YES
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
# "represents" "a" "an" "the"
ABBREVIATE_BRIEF = "The $name class" \
@@ -112,97 +112,97 @@ ABBREVIATE_BRIEF = "The $name class" \
an \
the
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
# description.
ALWAYS_DETAILED_SEC = NO
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
INLINE_INHERITED_MEMB = NO
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
# to NO the shortest path that makes the file name unique will be used.
FULL_PATH_NAMES = NO
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
# started.
-STRIP_FROM_PATH =
+STRIP_FROM_PATH =
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
# are normally passed to the compiler using the -I flag.
STRIP_FROM_INC_PATH = ../include/
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
# doesn't support long names like on DOS, Mac, or CD-ROM.
SHORT_NAMES = NO
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
# (thus requiring an explicit @brief command for a brief description.)
JAVADOC_AUTOBRIEF = NO
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
# an explicit \brief command for a brief description.)
QT_AUTOBRIEF = YES
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
# comments) as a brief description. This used to be the default behavior.
-# The new default is to treat a multi-line C++ comment block as a detailed
+# The new default is to treat a multi-line C++ comment block as a detailed
# description. Set this tag to YES if you prefer the old behavior instead.
MULTILINE_CPP_IS_BRIEF = NO
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
# re-implements.
INHERIT_DOCS = YES
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
# be part of the file/class/namespace that contains it.
SEPARATE_MEMBER_PAGES = NO
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
TAB_SIZE = 8
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
# You can put \n's in the value part of an alias to insert newlines.
ALIASES = \
@@ -243,84 +243,84 @@ ALIASES = \
row{9}="<tr><td>\1</td><td>\2</td><td>\3</td><td>\4</td><td>\5</td><td>\6</td><td>\7</td><td>\8</td><td>\9</td></tr>" \
subpage_single="@subpage" \
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
# itcl::class meaning.
-TCL_SUBST =
+TCL_SUBST =
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
# of all members will be omitted, etc.
OPTIMIZE_OUTPUT_FOR_C = NO
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
# scopes will look different, etc.
OPTIMIZE_OUTPUT_JAVA = YES
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
# Fortran.
OPTIMIZE_FOR_FORTRAN = NO
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
# VHDL.
OPTIMIZE_OUTPUT_VHDL = NO
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
# files are not read by doxygen.
EXTENSION_MAPPING = in=C
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
# Disable only in case of backward compatibilities issues.
MARKDOWN_SUPPORT = YES
-# When enabled doxygen tries to link words that correspond to documented classes,
-# or namespaces to their corresponding documentation. Such a link can be
-# prevented in individual cases by by putting a % sign in front of the word or
+# When enabled doxygen tries to link words that correspond to documented classes,
+# or namespaces to their corresponding documentation. Such a link can be
+# prevented in individual cases by by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
AUTOLINK_SUPPORT = YES
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
BUILTIN_STL_SUPPORT = NO
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
CPP_CLI_SUPPORT = NO
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
# instead of private inheritance when no explicit protection keyword is present.
SIP_SUPPORT = NO
@@ -334,54 +334,54 @@ SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
DISTRIBUTE_GROUP_DOC = NO
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
# the \nosubgrouping command.
SUBGROUPING = YES
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
# section (for LaTeX and RTF).
INLINE_GROUPED_CLASSES = NO
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields will be shown inline in the documentation
+# of the scope in which they are defined (i.e. file, namespace, or group
+# documentation), provided this scope is documented. If set to NO (the default),
+# structs, classes, and unions are shown on a separate page (for HTML and Man
# pages) or section (for LaTeX and RTF).
INLINE_SIMPLE_STRUCTS = YES
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
TYPEDEF_HIDES_STRUCT = YES
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
+# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
+# their name and scope. Since this can be an expensive process and often the
+# same symbol appear multiple times in the code, doxygen keeps a cache of
+# pre-resolved symbols. If the cache is too small doxygen will become slower.
+# If the cache is too large, memory is wasted. The cache size is given by this
+# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols.
LOOKUP_CACHE_SIZE = 0
@@ -390,298 +390,298 @@ LOOKUP_CACHE_SIZE = 0
# Build related configuration options
#---------------------------------------------------------------------------
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
EXTRACT_ALL = NO
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
EXTRACT_PRIVATE = NO
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
# scope will be included in the documentation.
EXTRACT_PACKAGE = NO
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
EXTRACT_STATIC = NO
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
# If set to NO only classes defined in header files are included.
EXTRACT_LOCAL_CLASSES = YES
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
# If set to NO (the default) only methods in the interface are included.
EXTRACT_LOCAL_METHODS = NO
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
# anonymous namespaces are hidden.
EXTRACT_ANON_NSPACES = NO
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
# This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_MEMBERS = NO
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
# overviews. This option has no effect if EXTRACT_ALL is enabled.
HIDE_UNDOC_CLASSES = NO
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
# documentation.
HIDE_FRIEND_COMPOUNDS = NO
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
# function's detailed documentation block.
HIDE_IN_BODY_DOCS = NO
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
# Set it to YES to include the internal documentation.
INTERNAL_DOCS = NO
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
CASE_SENSE_NAMES = NO
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
# documentation. If set to YES the scope will be hidden.
HIDE_SCOPE_NAMES = NO
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
# of that file.
SHOW_INCLUDE_FILES = NO
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
# rather than with sharp brackets.
FORCE_LOCAL_INCLUDES = NO
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
# is inserted in the documentation for inline members.
INLINE_INFO = YES
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
# declaration order.
SORT_MEMBER_DOCS = YES
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
# declaration order.
SORT_BRIEF_DOCS = NO
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
SORT_MEMBERS_CTORS_1ST = NO
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
# the group names will appear in their defined order.
SORT_GROUP_NAMES = YES
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
# alphabetical list.
SORT_BY_SCOPE_NAME = YES
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
# will still accept a match between prototype and implementation in such cases.
STRICT_PROTO_MATCHING = NO
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
# commands in the documentation.
GENERATE_TODOLIST = NO
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
# commands in the documentation.
GENERATE_TESTLIST = YES
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
# commands in the documentation.
GENERATE_BUGLIST = YES
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
GENERATE_DEPRECATEDLIST= YES
-# The ENABLED_SECTIONS tag can be used to enable conditional
+# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
-ENABLED_SECTIONS =
+ENABLED_SECTIONS =
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
# command in the documentation regardless of this setting.
MAX_INITIALIZER_LINES = 0
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
SHOW_USED_FILES = NO
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
SHOW_FILES = NO
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page. This will remove the Namespaces entry from the Quick Index
# and from the Folder Tree View (if specified). The default is YES.
SHOW_NAMESPACES = NO
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
# is used as the file version. See the manual for examples.
-FILE_VERSION_FILTER =
+FILE_VERSION_FILTER =
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
# DoxygenLayout.xml will be used as the name of the layout file.
LAYOUT_FILE = style/DoxygenLayout.xml
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
# feature you need bibtex and perl available in the search path.
-CITE_BIB_FILES =
+CITE_BIB_FILES =
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
-# The QUIET tag can be used to turn on/off the messages that are generated
+# The QUIET tag can be used to turn on/off the messages that are generated
# by doxygen. Possible values are YES and NO. If left blank NO is used.
QUIET = NO
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
# NO is used.
WARNINGS = YES
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
# automatically be disabled.
WARN_IF_UNDOCUMENTED = YES
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
# don't exist or using markup commands wrongly.
WARN_IF_DOC_ERROR = YES
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
# documentation.
WARN_NO_PARAMDOC = YES
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
# be obtained via FILE_VERSION_FILTER)
WARN_FORMAT = "$file:$line: $text"
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
# to stderr.
WARN_LOGFILE = doxygen.log
@@ -690,29 +690,29 @@ WARN_LOGFILE = doxygen.log
# configuration options related to the input files
#---------------------------------------------------------------------------
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
INPUT = ../include/wiredtiger.in \
../include/wiredtiger_ext.h \
.
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
# the list of possible encodings.
INPUT_ENCODING = UTF-8
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
# *.f90 *.f *.for *.vhd *.vhdl
FILE_PATTERNS = *.c \
@@ -747,16 +747,16 @@ FILE_PATTERNS = *.c \
*.vhd \
*.vhdl
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
# If left blank NO is used.
RECURSIVE = YES
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE = bdb-map.dox \
@@ -766,144 +766,145 @@ EXCLUDE = bdb-map.dox \
tools \
top
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
EXCLUDE_SYMLINKS = NO
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
# for example use the pattern */test/*
-EXCLUDE_PATTERNS =
+EXCLUDE_PATTERNS =
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
EXCLUDE_SYMBOLS = __F \
doc_*
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
# the \include command).
EXAMPLE_PATH = ../../examples/c \
../../ext/compressors/nop \
../../ext/encryptors/nop \
../../ext/encryptors/rotn \
- ../../examples/python
+ ../../examples/python \
+ ../../test/fuzz
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
# blank all files are included.
-EXAMPLE_PATTERNS =
+EXAMPLE_PATTERNS =
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
# Possible values are YES and NO. If left blank NO is used.
EXAMPLE_RECURSIVE = NO
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
# the \image command).
IMAGE_PATH = images
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
# ignored.
-INPUT_FILTER =
+INPUT_FILTER =
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
# non of the patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS = *.py=tools/pyfilter \
*.dox=tools/doxfilter
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
# files to browse (i.e. when SOURCE_BROWSER is set to YES).
FILTER_SOURCE_FILES = NO
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
# FILTER_SOURCE_FILES is enabled.
-FILTER_SOURCE_PATTERNS =
+FILTER_SOURCE_PATTERNS =
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
# VERBATIM_HEADERS is set to NO.
SOURCE_BROWSER = NO
-# Setting the INLINE_SOURCES tag to YES will include the body
+# Setting the INLINE_SOURCES tag to YES will include the body
# of functions and classes directly in the documentation.
INLINE_SOURCES = NO
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
# fragments. Normal C, C++ and Fortran comments will always remain visible.
STRIP_CODE_COMMENTS = YES
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
# functions referencing it will be listed.
REFERENCED_BY_RELATION = NO
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
# called/used by that function will be listed.
REFERENCES_RELATION = NO
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
# link to the source code. Otherwise they will link to the documentation.
REFERENCES_LINK_SOURCE = NO
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
# will need version 4.8.6 or higher.
USE_HTAGS = NO
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
VERBATIM_HEADERS = NO
@@ -912,21 +913,21 @@ VERBATIM_HEADERS = NO
# configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
# contains a lot of classes, structs, unions or interfaces.
ALPHABETICAL_INDEX = NO
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 5
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
# should be ignored while generating the index headers.
IGNORE_PREFIX = WT_
@@ -935,147 +936,147 @@ IGNORE_PREFIX = WT_
# configuration options related to the HTML output
#---------------------------------------------------------------------------
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
# generate HTML output.
GENERATE_HTML = YES
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `html' will be used as the default path.
HTML_OUTPUT = .
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
# doxygen will generate files with .html extension.
HTML_FILE_EXTENSION = .html
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
# changing the value of configuration settings such as GENERATE_TREEVIEW!
HTML_HEADER = style/header.html
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER = style/footer.html
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
# tag will in the future become obsolete.
-HTML_STYLESHEET =
+HTML_STYLESHEET =
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefor more
-# robust against future updates. Doxygen will copy the style sheet file to
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
# the output directory.
HTML_EXTRA_STYLESHEET = style/wiredtiger.css
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
# the files will be copied as-is; there are no commands or markers available.
-HTML_EXTRA_FILES =
+HTML_EXTRA_FILES =
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
# The allowed range is 0 to 359.
HTML_COLORSTYLE_HUE = 34
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
# grayscales only. A value of 255 will produce the most vivid colors.
HTML_COLORSTYLE_SAT = 81
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
# and 100 does not change the gamma.
HTML_COLORSTYLE_GAMMA = 96
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
# this to NO can help when comparing the output of multiple runs.
HTML_TIMESTAMP = YES
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
# page has loaded.
HTML_DYNAMIC_SECTIONS = YES
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
# and will result in a full expanded tree by default.
HTML_INDEX_NUM_ENTRIES = 100
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
GENERATE_DOCSET = NO
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
# can be grouped.
DOCSET_FEEDNAME = "Doxygen generated docs"
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
# will append .docset to the name.
DOCSET_BUNDLE_ID = org.doxygen.Project
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
# style string, e.g. com.mycompany.MyDocSet.documentation.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
@@ -1084,215 +1085,215 @@ DOCSET_PUBLISHER_ID = org.doxygen.Publisher
DOCSET_PUBLISHER_NAME = Publisher
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
# of the generated HTML documentation.
GENERATE_HTMLHELP = NO
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
# written to the html output directory.
-CHM_FILE =
+CHM_FILE =
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
# the HTML help compiler on the generated index.hhp.
-HHC_LOCATION =
+HHC_LOCATION =
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
# it should be included in the master .chm file (NO).
GENERATE_CHI = NO
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
# content.
-CHM_INDEX_ENCODING =
+CHM_INDEX_ENCODING =
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
# normal table of contents (NO) in the .chm file.
BINARY_TOC = NO
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
# to the contents of the HTML help documentation and to the tree view.
TOC_EXPAND = NO
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
# Qt Compressed Help (.qch) of the generated HTML documentation.
GENERATE_QHP = NO
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
# The path specified is relative to the HTML output folder.
-QCH_FILE =
+QCH_FILE =
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#namespace
QHP_NAMESPACE = org.doxygen.Project
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
# http://doc.trolltech.com/qthelpproject.html#virtual-folders
QHP_VIRTUAL_FOLDER = doc
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
# http://doc.trolltech.com/qthelpproject.html#custom-filters
-QHP_CUST_FILTER_NAME =
+QHP_CUST_FILTER_NAME =
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
# Qt Help Project / Custom Filters</a>.
-QHP_CUST_FILTER_ATTRS =
+QHP_CUST_FILTER_ATTRS =
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
# Qt Help Project / Filter Attributes</a>.
-QHP_SECT_FILTER_ATTRS =
+QHP_SECT_FILTER_ATTRS =
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
# .qhp file.
-QHG_LOCATION =
+QHG_LOCATION =
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
# the help appears.
GENERATE_ECLIPSEHELP = NO
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
# this name.
ECLIPSE_DOC_ID = org.doxygen.Project
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
# GENERATE_TREEVIEW to YES.
DISABLE_INDEX = NO
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
# could consider to set DISABLE_INDEX to NO when enabling this option.
GENERATE_TREEVIEW = YES
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
# values from appearing in the overview section.
ENUM_VALUES_PER_LINE = 4
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
# is shown.
TREEVIEW_WIDTH = 200
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
# links to external symbols imported via tag files in a separate window.
EXT_LINKS_IN_WINDOW = NO
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
# to force them to be regenerated.
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
# in the HTML output before the changes have effect.
FORMULA_TRANSPARENT = YES
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
USE_MATHJAX = NO
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax. However, it is strongly recommended to install a local
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax. However, it is strongly recommended to install a local
# copy of MathJax from http://www.mathjax.org before deployment.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
-MATHJAX_EXTENSIONS =
+MATHJAX_EXTENSIONS =
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
SEARCHENGINE = NO
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
# and does not have live searching capabilities.
SERVER_BASED_SEARCH = NO
@@ -1301,97 +1302,97 @@ SERVER_BASED_SEARCH = NO
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
# generate Latex output.
GENERATE_LATEX = NO
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `latex' will be used as the default path.
LATEX_OUTPUT = latex
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
# Makefile that is written to the output directory.
LATEX_CMD_NAME = latex
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
# default command name.
MAKEINDEX_CMD_NAME = makeindex
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_LATEX = NO
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
# executive. If left blank a4wide will be used.
PAPER_TYPE = a4wide
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
# packages that should be included in the LaTeX output.
-EXTRA_PACKAGES =
+EXTRA_PACKAGES =
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
# standard header. Notice: only use this tag if you know what you are doing!
-LATEX_HEADER =
+LATEX_HEADER =
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
# standard footer. Notice: only use this tag if you know what you are doing!
-LATEX_FOOTER =
+LATEX_FOOTER =
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
# This makes the output suitable for online browsing using a pdf viewer.
PDF_HYPERLINKS = YES
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
# higher quality PDF documentation.
USE_PDFLATEX = YES
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
# This option is also used when generating formulas in HTML.
LATEX_BATCHMODE = NO
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
# in the output.
LATEX_HIDE_INDICES = YES
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
# such as SOURCE_BROWSER.
LATEX_SOURCE_CODE = YES
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
# http://en.wikipedia.org/wiki/BibTeX for more info.
LATEX_BIB_STYLE = plain
@@ -1400,68 +1401,68 @@ LATEX_BIB_STYLE = plain
# configuration options related to the RTF output
#---------------------------------------------------------------------------
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
# other RTF readers or editors.
GENERATE_RTF = NO
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `rtf' will be used as the default path.
RTF_OUTPUT = rtf
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_RTF = NO
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
# Note: wordpad (write) and others do not support links.
RTF_HYPERLINKS = NO
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
-RTF_STYLESHEET_FILE =
+RTF_STYLESHEET_FILE =
-# Set optional variables used in the generation of an rtf document.
+# Set optional variables used in the generation of an rtf document.
# Syntax is similar to doxygen's config file.
-RTF_EXTENSIONS_FILE =
+RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
# generate man pages
GENERATE_MAN = YES
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `man' will be used as the default path.
MAN_OUTPUT = man
-# The MAN_EXTENSION tag determines the extension that is added to
+# The MAN_EXTENSION tag determines the extension that is added to
# the generated man pages (default is the subroutine's section .3)
MAN_EXTENSION = .3
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
# would be unable to find the correct page. The default is NO.
MAN_LINKS = NO
@@ -1470,8 +1471,8 @@ MAN_LINKS = NO
# configuration options related to the XML output
#---------------------------------------------------------------------------
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
# the code including all documentation.
GENERATE_XML = NO
@@ -1480,10 +1481,10 @@ GENERATE_XML = NO
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
# and incomplete at the moment.
GENERATE_AUTOGEN_DEF = NO
@@ -1492,82 +1493,82 @@ GENERATE_AUTOGEN_DEF = NO
# configuration options related to the Perl module output
#---------------------------------------------------------------------------
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
# moment.
GENERATE_PERLMOD = NO
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
# to generate PDF and DVI output from the Perl module output.
PERLMOD_LATEX = NO
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader. This is useful
-# if you want to understand what is going on. On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
# and Perl will parse it just the same.
PERLMOD_PRETTY = YES
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
# Makefile don't overwrite each other's variables.
-PERLMOD_MAKEVAR_PREFIX =
+PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
# files.
ENABLE_PREPROCESSING = YES
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
MACRO_EXPANSION = YES
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = NO
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# pointed to by INCLUDE_PATH will be searched when a #include is found.
SEARCH_INCLUDES = YES
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
# the preprocessor.
-INCLUDE_PATH =
+INCLUDE_PATH =
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
# be used.
-INCLUDE_FILE_PATTERNS =
+INCLUDE_FILE_PATTERNS =
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
PREDEFINED = DOXYGEN \
@@ -1594,17 +1595,17 @@ PREDEFINED = DOXYGEN \
WT_HANDLE_CLOSED(x):=x \
WT_HANDLE_NULLABLE(x):=x
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
# overrules the definition found in the source code.
-EXPAND_AS_DEFINED =
+EXPAND_AS_DEFINED =
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
# semicolon, because these will confuse the parser if not removed.
SKIP_FUNCTION_MACROS = YES
@@ -1613,37 +1614,37 @@ SKIP_FUNCTION_MACROS = YES
# Configuration::additions related to external references
#---------------------------------------------------------------------------
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
# doxygen is run, you must also specify the path to the tagfile here.
-TAGFILES =
+TAGFILES =
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
# a tag file that is based on the input files it reads.
-GENERATE_TAGFILE =
+GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
# will be listed.
ALLEXTERNALS = NO
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
# be listed.
EXTERNAL_GROUPS = YES
-# The PERL_PATH should be the absolute path and name of the perl script
+# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of `which perl').
PERL_PATH = /usr/bin/perl
@@ -1652,222 +1653,222 @@ PERL_PATH = /usr/bin/perl
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
# install and use dot, since it yields more powerful graphs.
CLASS_DIAGRAMS = NO
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
-MSCGEN_PATH =
+MSCGEN_PATH =
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
# or is not a class.
HIDE_UNDOC_RELATIONS = YES
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
# have no effect if this option is set to NO (the default)
HAVE_DOT = NO
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
# between CPU load and processing speed.
DOT_NUM_THREADS = 0
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
# directory containing the font.
-DOT_FONTNAME =
+DOT_FONTNAME =
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
# The default size is 10pt.
DOT_FONTSIZE = 10
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
# set the path where dot can find it.
-DOT_FONTPATH =
+DOT_FONTPATH =
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
# CLASS_DIAGRAMS tag to NO.
CLASS_GRAPH = YES
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
# class references variables) of the class with other documented classes.
COLLABORATION_GRAPH = YES
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for groups, showing the direct groups dependencies
GROUP_GRAPHS = YES
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
UML_LOOK = NO
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# managable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
-# If set to YES, the inheritance and collaboration graphs will show the
+# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
TEMPLATE_RELATIONS = NO
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
# other documented files.
INCLUDE_GRAPH = YES
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
# indirectly include this file.
INCLUDED_BY_GRAPH = YES
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
# for selected functions only using the \callgraph command.
CALL_GRAPH = NO
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
# graphs for selected functions only using the \callergraph command.
CALLER_GRAPH = NO
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
# will generate a graphical hierarchy of all classes instead of a textual one.
GRAPHICAL_HIERARCHY = YES
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
# relations between the files in the directories.
DIRECTORY_GRAPH = YES
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible in IE 9+ (other browsers do not have this requirement).
DOT_IMAGE_FORMAT = png
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
# visible. Older versions of IE do not have SVG support.
INTERACTIVE_SVG = NO
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
-DOT_PATH =
+DOT_PATH =
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
# \dotfile command).
-DOTFILE_DIRS =
+DOTFILE_DIRS =
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
# \mscfile command).
-MSCFILE_DIRS =
+MSCFILE_DIRS =
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
DOT_GRAPH_MAX_NODES = 50
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
# a graph (i.e. they become hard to read).
DOT_TRANSPARENT = NO
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
# support this, this feature is disabled by default.
DOT_MULTI_TARGETS = NO
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
# arrows in the dot generated graphs.
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
# the various graphs.
DOT_CLEANUP = YES
diff --git a/src/third_party/wiredtiger/src/docs/arch-checkpoint.dox b/src/third_party/wiredtiger/src/docs/arch-checkpoint.dox
new file mode 100644
index 00000000000..aff4f72bb89
--- /dev/null
+++ b/src/third_party/wiredtiger/src/docs/arch-checkpoint.dox
@@ -0,0 +1,88 @@
+/*! @arch_page arch-checkpoint Checkpoint
+
+# Overview #
+
+A checkpoint is a known point in time from which WiredTiger can recover in the event of a
+crash or unexpected shutdown. WiredTiger checkpoints are created either via the API
+WT_SESSION::checkpoint, or internally. Internally checkpoints are created on startup, shutdown
+and during compaction.
+
+A checkpoint is performed within the context of snapshot isolation transaction as such the
+checkpoint has a consistent view of the database from beginning to end. Typically when running a
+checkpoint the configuration \c "use_timestamp=true" is specified. This instructs WiredTiger to set
+the \c checkpoint_timestamp to be the current \c stable_timestamp. As of the latest version of
+WiredTiger the \c checkpoint_timestamp timestamp is not used as a \c read_timestamp for the
+checkpoint transaction. The \c checkpoint_timestamp is written out with the metadata information for
+the checkpoint. On startup WiredTiger will set the \c stable_timestamp internally to the timestamp
+contained in the metadata, and rollback updates which are newer to the \c stable_timestamp see:
+WT_CONNECTION::rollback_to_stable.
+
+# The checkpoint algorithm #
+
+A checkpoint can be broken up into 5 main stages:
+
+_The prepare stage:_
+
+Checkpoint prepare sets up the checkpoint, it begins the checkpoint transaction, updates the global
+checkpoint state and gathers a list of handles to be checkpointed. A global schema lock wraps
+checkpoint prepare to avoid any tables being created or dropped during this phase, additionally the
+global transaction lock is taken during this process as it must modify the global transaction state,
+and to ensure the \c stable_timestamp doesn't move ahead of the snapshot taken by the checkpoint
+transaction. Each handle gathered refers to a specific b-tree. The set of b-trees gathered by the
+checkpoint varies based off configuration. Additionally clean b-trees, i.e. b-trees without any
+modifications are excluded from the list, with an exception for specific checkpoint configuration
+scenarios.
+
+_The data files checkpoint:_
+
+Data files in this instance refer to all the user created files. The main work of checkpoint is done
+here, the array of b-tree's collected in the prepare stage are iterated over. For each b-tree, the
+tree is walked and all the dirty pages are reconciled. Clean pages are skipped to avoid unnecessary
+work. Pages made clean ahead of the checkpoint by eviction are still skipped regardless of whether
+the update written by eviction is visible to the checkpoint transaction. The checkpoint guarantees
+that a clean version of every page in the tree exists and can be written to disk.
+
+_The history store checkpoint:_
+
+The history store is checkpointed after the data files intentionally as during the reconciliation
+of the data files additional writes may be created in the history store and its important to include
+them in the checkpoint.
+
+_Flushing the files to disk:_
+
+All the b-trees checkpointed and the history are flushed to disk at this stage, WiredTiger will wait
+until that process has completed to continue with the checkpoint.
+
+_The metadata checkpoint:_
+
+A new entry into the metadata file is created for every data file checkpointed, including the
+history store. As such the metadata file is the last file to be checkpointed. As WiredTiger
+maintains two checkpoints, the location of the most recent checkpoint is written to the turtle file.
+
+# Skipping checkpoints #
+
+It is possible that a checkpoint will be skipped. If no modifications to the database have been
+made since the last checkpoint, and the last checkpoint timestamp is equal to the current stable
+timestamp then a checkpoint will not be taken. This logic can be overridden by forcing a checkpoint
+via configuration.
+
+# Checkpoint generations #
+
+The checkpoint generation indicates which iteration of checkpoint a file has undergone, at the start
+of a checkpoint the generation is incremented. Then after processing any b-tree its
+\c checkpoint_gen is set to the latest checkpoint generation. Checkpoint generations impact
+visibility checks within WiredTiger, essentially if a b-tree is behind a checkpoint, i.e. its
+checkpoint generation is less than the current checkpoint generation, then the checkpoint
+transaction id and checkpoint timestamp are included in certain visibility checks.
+This prevents eviction from evicting updates from a given b-tree ahead of the checkpoint.
+
+# Garbage collection #
+
+While processing a b-tree, checkpoint can mark pages as obsolete. Any page that has an aggregated
+stop time pair which is globally visible will no longer be required by any reader and can be marked
+as deleted. This occurs prior to the page being reconciled, allowing for the page to be removed
+during the reconciliation. However this does not mean that the deleted page is available for re-use
+as it may be referenced by older checkpoints, once the older checkpoint is deleted the page is free
+to be used. Given the freed pages exist at the end of the file the file can be truncated. Otherwise
+compaction will need to be initiated to shrink the file, see: WT_SESSION::compact.
+*/
diff --git a/src/third_party/wiredtiger/src/docs/arch-glossary.dox b/src/third_party/wiredtiger/src/docs/arch-glossary.dox
index 3ae6a90eb81..ea71fe5893b 100644
--- a/src/third_party/wiredtiger/src/docs/arch-glossary.dox
+++ b/src/third_party/wiredtiger/src/docs/arch-glossary.dox
@@ -24,7 +24,11 @@ tree - it describes terms internal to the storage engine.
<table>
<caption id="transaction_terms">Transaction Term Table</caption>
<tr><th>Term <th>Definition
-<tr><td>Hello<td>The typical next word is World
+<tr><td>transaction<td>A unit of work performed in WiredTiger that is atomic and consistent with the specified isolation levels, and durability levels.
+<tr><td>session<td>The container that manages transactions and performs the transactional operations on behalf of the users in a single thread.
+<tr><td>atomicity<td>Atomicity is a guarantee provided within the context of a transaction that all operations made within that transaction either succeed or fail.
+<tr><td>isolation<td>Isolation determines which versions of data are visible to a running transaction, and whether it can see changes made by concurrently running transactions.
+<tr><td>durability<td>Durability is the property that guarantees that transactions that have been committed will survive permanently.
</table>
*/
diff --git a/src/third_party/wiredtiger/src/docs/arch-index.dox b/src/third_party/wiredtiger/src/docs/arch-index.dox
index 52ebc0120d4..c91a53129f2 100644
--- a/src/third_party/wiredtiger/src/docs/arch-index.dox
+++ b/src/third_party/wiredtiger/src/docs/arch-index.dox
@@ -132,6 +132,10 @@ The Block Manager manages the reading and writing of disk blocks.
Cache is represented by the various shared data structures that
make up in-memory Btrees and subordinate data structures.
+@subpage arch-checkpoint
+
+A checkpoint is created by WiredTiger to serve as a point from which it can recover.
+
@subpage arch-column
Column Stores are Btrees that have as their key a record id.
diff --git a/src/third_party/wiredtiger/src/docs/arch-transaction.dox b/src/third_party/wiredtiger/src/docs/arch-transaction.dox
index bc3c4e59722..d15a3cbb8d5 100644
--- a/src/third_party/wiredtiger/src/docs/arch-transaction.dox
+++ b/src/third_party/wiredtiger/src/docs/arch-transaction.dox
@@ -5,4 +5,198 @@ A caller of WiredTiger uses @ref transactions within the API to start and stop t
a session (thread of control).
Internally, the current transaction state is represented by the WT_TXN structure.
+
+Except schema operations, WiredTiger performs all the read and write operations within a
+transaction. If the user doesn't explicitly begin a transaction, WiredTiger will automatically
+create a transaction for the user's operation.
+
+@section Lifecycle
+
+A WiredTiger session creates and manages the transactions' lifecycle. One transaction can be
+run at a time per session, and that transaction must complete before another transaction can be
+started. Since every session is singly-threaded, all the operations in the transaction are executed
+on the same thread.
+
+@plantuml_start{transaction_lifecycle.png}
+@startuml{transaction_lifecycle.png}
+:Transaction Lifecycle;
+
+split
+ :perform a read operation
+ (Create an auto transaction);
+split again
+ :perform a write operation
+ (Create an auto transaction with a transaction id);
+split again
+ :declare the beginning of a transaction;
+ :perform read operations;
+ :perform a write operation
+ (Assign a transaction id);
+ :perform read write operations;
+split again
+ :declare the beginning of a transaction;
+ :perform read operations;
+ :perform a write operation
+ (Assign a transaction id);
+ :perform read write operations;
+ :prepare the transaction;
+end split
+
+split
+ :rollback;
+split again
+ :commit;
+end split
+
+Stop
+@enduml
+@plantuml_end
+
+A transaction starts in two scenarios, when the user calls begin via
+WT_SESSION::begin_transaction or internally when the user performs either a read or write
+operation. Internally they are only started if they are not already within the context of a running
+transaction. If declared explicitly the transaction will be active until it is committed or rolled
+back. If it is created internally, it will cease to be active after the user operation either
+successfully completes or fails.
+
+If the transaction is committed successfully, any write operation it performs is accepted by the
+database and will be durable to some extent based on the durability setting. Otherwise, all the
+write operations it has done will be reverted and will not be available any more.
+
+@section ACID Properties
+
+Like other databases, transactions in WiredTiger enforce the ACID properties (atomicity,
+consistency, isolation, and durability).
+
+@subsection Atomicity
+
+All write operations initially happen in memory in WiredTiger and will not be written to disk until
+the entire transaction is committed. Therefore, the size of the transaction must fit in memory.
+
+To rollback the transaction, WiredTiger only needs to mark all the write operations of that
+transaction as aborted in memory. To ensure no partial transaction is persisted to disk, the
+eviction threads and the checkpoint threads will do proper visibility checks to make sure each
+persisted operations are actually visible in regards to their snapshot.
+
+There is one case that atomicity of transactions is not honored using timestamps in WiredTiger. If
+the operations in the same transaction are conducted at different timestamps and the checkpoint
+happens in between the timestamps, only the operations happen before or at the checkpoint timestamp
+will be persisted in the checkpoint and the operations happen after the checkpoint timestamp in the
+transaction will be discarded.
+
+There is another case that atomicity may be violated if a transaction operates both on tables with
+logging enabled and disabled after restart. The operations on the tables with logging enabled will
+survive the restart, while the operations on the non-logged tables may be lost if it is not
+included in the latest checkpoint.
+
+@subsection Isolation
+
+Isolation is one of the important features of a database, which is used to determine whether one
+transaction can read updates done by the other concurrent transactions. WiredTiger supports three
+isolation levels, read uncommitted, read committed, and snapshot. However, only snapshot is
+supported for write operations. By default, WiredTiger runs in snapshot isolation.
+
+1. Under snapshot isolation, a transaction is able to see updates done by other transactions
+that are committed before it starts.
+
+2. Under read committed isolation, a transaction is able to see updates done by other
+transactions that have been committed when the reading happens.
+
+3. Under read uncommitted isolation, a transaction is able to see updates done by all the
+existing transactions, including the concurrent ones.
+
+Each transaction in WiredTiger is given a globally unique transaction id before doing the first
+write operation and this id is written to each operation done by the same transaction. If the
+transaction is running under snapshot isolation or read committed isolation, it will obtain a
+transaction snapshot which includes a list of uncommitted concurrent transactions' ids at the
+appropriate time to check the visibility of updates. For snapshot transaction, it is at the
+beginning of the transaction and it will use the same snapshot across its whole life cycle. For
+read committed transaction, it will obtain a new snapshot every time it does a search before
+reading. Due to the overhead of obtaining snapshot, it uses the same snapshot for all the reads
+before calling another search. Read uncommitted transactions don't have a snapshot.
+
+If the transaction has a snapshot, each read will check whether the update's transaction id is in
+its snapshot. The updates with transaction ids in the snapshot or larger than the largest
+transaction id in the snapshot are not visible to the reading transaction.
+
+When operating in read committed or read uncommitted isolation levels, it is possible to read
+different values of the same key, seeing records not seen before, or finding records disappear in
+the same transaction. This is called a phantom read. Under snapshot isolation, WiredTiger guarantees
+repeated reads returning the same result except in one scenario using timestamps.
+
+@subsection Timestamps
+
+WiredTiger provides a mechanism to control when operations should be visible, called timestamps.
+Timestamps are user specified sequence numbers that are associated with each operation. In
+addition, users can assign an immutable read timestamp to a transaction at the beginning. A
+transaction can only see updates with timestamps smaller or equal to its read timestamp. Note that
+read timestamp 0 means no read timestamp and the transaction can see the updates regardless of
+timestamps. Also note that timestamps don't have to be derived from physical times. Users can use
+any 64 bit unsigned integer as logical timestamps. For a single operation, the timestamps
+associated with the operations in the same transaction don't have to be the same as long as they
+are monotonically increasing.
+
+Apart from the operation level timestamps, the users are also responsible for managing the global
+level timestamps, i.e, the oldest timestamp, and the stable timestamp. The oldest timestamp is the
+timestamp that should be visible by all concurrent transactions. The stable timestamp is the
+minimum timestamp that a new operation can commit at.
+
+Only transactions running in snapshot isolation can run with timestamps.
+
+@subsection Visibility
+
+The visibility of the transactions in WiredTiger considers both the operations' transaction ids and
+timestamps. The operation is visible only when both its transaction id and its timestamp are
+visible to the reading transaction.
+
+To read a key, WiredTiger first traverses all the updates of that key still in memory until a
+visible update is found. The in-memory updates in WiredTiger are organized as a singly linked list
+with the newest update at the head, called the update chain. If no value is visible on the update
+chain, it checks the version on the disk image, which is the version that was chosen to be written
+to disk in the last reconciliation. If it is still invisible, WiredTiger will search the history
+store to check if there is a version visible to the reader there.
+
+The repeated read guarantee under snapshot isolation may break in one case if the timestamps
+committed to the updates are out of order, e.g,
+
+`U@20 -> U@30 -> U@15`
+
+In the above example, reading with timestamp 15 doesn't guarantee to return the third update. In
+some cases, users may read the second update U@30 if it is moved to the history store.
+
+@subsection Durability
+
+WiredTiger transactions support commit level durability and checkpoint level durability. An
+operation is commit level durable if logging is enabled on the table (@ref arch-logging). After it
+has been successfully committed, the operation is guaranteed to survive restart. An operation will
+only survive across restart under checkpoint durability if it is included in the last successful
+checkpoint.
+
+@section Prepared Transactions
+
+WiredTiger introduces prepared transactions to meet the needs of implementing distributed
+transactions through two-phase commit. Prepared transactions only work under snapshot isolation.
+
+Instead of just having the beginning, operating, and rollback or commit phase, it has a prepared
+phase before the rollback or commit phase. After prepare is called, WiredTiger releases the
+transaction's snapshot and prohibits any more read or write operations on the transaction.
+
+By introducing the prepared stage, a two-phase distributed transaction algorithm can rely on the
+prepared state to reach consensus among all the nodes for committing.
+
+Along with the prepared phase, WiredTiger introduces the prepared timestamp and durable timestamp.
+They are to prevent the slow prepared transactions blocking the movement of the global stable
+timestamp, which may cause excessive amounts of data to be pinned in memory. The stable timestamp
+is allowed to move beyond the prepared timestamp and at the commit time, the prepared transaction
+can then be committed after the current stable timestamp with a larger durable timestamp. The
+durable timestamp also marks the time the update is to be stable. If the stable timestamp is moved
+to or beyond the durable timestamp of an update, it will not be removed by rollback to stable from
+a checkpoint. See @ref arch-rts for more details.
+
+The visibility of the prepared transaction is also special when in the prepared state. Since in the
+prepared state, the transaction has released its snapshot, it should be visible to the transactions
+starting after that based on the normal visibility rule. However, the prepared transaction has not
+been committed and cannot be visible yet. In this situation, WiredTiger will return a
+WT_PREPARE_CONFLICT to indicate to the caller to retry later, or if configured WiredTiger will
+ignore the prepared update and read older updates.
*/
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_create.png b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_create.png
index d2fc00448f0..b87a2cd1a98 100644
--- a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_create.png
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_create.png
Binary files differ
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_generic.png b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_generic.png
index f046734ce52..79c42fa6938 100644
--- a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_generic.png
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_generic.png
Binary files differ
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_rename.png b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_rename.png
index 65ac52d6fa1..807aff3f791 100644
--- a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_rename.png
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/schema_rename.png
Binary files differ
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/transaction_lifecycle.png b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/transaction_lifecycle.png
new file mode 100644
index 00000000000..5ccf760edf7
--- /dev/null
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/transaction_lifecycle.png
Binary files differ
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.cmapx b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.cmapx
index 0400be53354..2fdeef6a039 100644
--- a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.cmapx
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.cmapx
@@ -1,21 +1,21 @@
<map id="wt_diagram_map" name="wt_diagram_map">
-<area shape="rect" id="id1" href="modules.html" title="modules.html" alt="" coords="248,128,284,145"/>
-<area shape="rect" id="id2" href="arch-cache.html" title="arch-cache.html" alt="" coords="198,546,240,563"/>
-<area shape="rect" id="id3" href="arch-cursor.html" title="arch-cursor.html" alt="" coords="206,224,253,241"/>
-<area shape="rect" id="id4" href="arch-eviction.html" title="arch-eviction.html" alt="" coords="295,546,348,563"/>
-<area shape="rect" id="id5" href="arch-schema.html" title="arch-schema.html" alt="" coords="98,224,151,241"/>
-<area shape="rect" id="id6" href="arch-logging.html" title="arch-logging.html" alt="" coords="388,651,444,667"/>
-<area shape="rect" id="id7" href="command_line.html" title="command_line.html" alt="" coords="374,24,433,40"/>
-<area shape="rect" id="id8" href="arch-log-file.html" title="arch-log-file.html" alt="" coords="307,865,338,898"/>
-<area shape="rect" id="id9" href="arch-metadata.html" title="arch-metadata.html" alt="" coords="25,329,88,345"/>
-<area shape="rect" id="id10" href="arch-snapshot.html" title="arch-snapshot.html" alt="" coords="381,442,452,458"/>
-<area shape="rect" id="id11" href="arch-python.html" title="arch-python.html" alt="" coords="84,24,157,40"/>
-<area shape="rect" id="id12" href="arch-transaction.html" title="arch-transaction.html" alt="" coords="340,329,428,345"/>
-<area shape="rect" id="id13" href="arch-hs.html" title="arch-hs.html" alt="" coords="93,642,141,675"/>
-<area shape="rect" id="id14" href="arch-row.html" title="arch-row.html" alt="" coords="163,433,216,466"/>
-<area shape="rect" id="id15" href="arch-column.html" title="arch-column.html" alt="" coords="272,433,326,466"/>
-<area shape="rect" id="id16" href="arch-block.html" title="arch-block.html" alt="" coords="196,642,256,675"/>
-<area shape="rect" id="id17" href="arch-dhandle.html" title="arch-dhandle.html" alt="" coords="181,320,244,353"/>
-<area shape="rect" id="id18" href="arch-data-file.html" title="arch-data-file.html" alt="" coords="181,865,244,898"/>
-<area shape="rect" id="id19" href="arch-fs-os.html" title="arch-fs-os.html" alt="" coords="177,755,354,772"/>
+<area shape="rect" id="id1" href="modules.html" title="modules.html" alt="" coords="248,128,283,144"/>
+<area shape="rect" id="id2" href="arch-cache.html" title="arch-cache.html" alt="" coords="196,546,240,562"/>
+<area shape="rect" id="id3" href="arch-cursor.html" title="arch-cursor.html" alt="" coords="206,224,252,240"/>
+<area shape="rect" id="id4" href="arch-eviction.html" title="arch-eviction.html" alt="" coords="295,546,348,562"/>
+<area shape="rect" id="id5" href="arch-logging.html" title="arch-logging.html" alt="" coords="388,650,443,666"/>
+<area shape="rect" id="id6" href="arch-schema.html" title="arch-schema.html" alt="" coords="95,224,151,240"/>
+<area shape="rect" id="id7" href="command_line.html" title="command_line.html" alt="" coords="374,23,430,39"/>
+<area shape="rect" id="id8" href="arch-log-file.html" title="arch-log-file.html" alt="" coords="308,865,339,897"/>
+<area shape="rect" id="id9" href="arch-metadata.html" title="arch-metadata.html" alt="" coords="25,328,89,344"/>
+<area shape="rect" id="id10" href="arch-python.html" title="arch-python.html" alt="" coords="84,23,157,39"/>
+<area shape="rect" id="id11" href="arch-snapshot.html" title="arch-snapshot.html" alt="" coords="381,441,455,457"/>
+<area shape="rect" id="id12" href="arch-transaction.html" title="arch-transaction.html" alt="" coords="339,328,428,344"/>
+<area shape="rect" id="id13" href="arch-hs.html" title="arch-hs.html" alt="" coords="93,642,140,674"/>
+<area shape="rect" id="id14" href="arch-row.html" title="arch-row.html" alt="" coords="163,433,217,465"/>
+<area shape="rect" id="id15" href="arch-column.html" title="arch-column.html" alt="" coords="272,433,326,465"/>
+<area shape="rect" id="id16" href="arch-block.html" title="arch-block.html" alt="" coords="196,642,256,674"/>
+<area shape="rect" id="id17" href="arch-dhandle.html" title="arch-dhandle.html" alt="" coords="181,320,242,352"/>
+<area shape="rect" id="id18" href="arch-data-file.html" title="arch-data-file.html" alt="" coords="179,865,245,897"/>
+<area shape="rect" id="id19" href="arch-fs-os.html" title="arch-fs-os.html" alt="" coords="175,755,357,771"/>
</map>
diff --git a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.png b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.png
index 99ae107fa95..92bf5dc1f2e 100644
--- a/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.png
+++ b/src/third_party/wiredtiger/src/docs/images/plantuml_gen_img/wt_diagram.png
Binary files differ
diff --git a/src/third_party/wiredtiger/src/docs/spell.ok b/src/third_party/wiredtiger/src/docs/spell.ok
index 5a37d260c1e..ed4609e2bfe 100644
--- a/src/third_party/wiredtiger/src/docs/spell.ok
+++ b/src/third_party/wiredtiger/src/docs/spell.ok
@@ -7,6 +7,7 @@ Atomicity
BLOBs
BLRrVv
CFLAGS
+COV
CPPFLAGS
CPUs
CRC
@@ -60,9 +61,12 @@ JavaScript
KMS
LD
LDFLAGS
+LibFuzzer
LIBS
+LINKFLAGS
LLC
LLVM
+LLVMFuzzerTestOneInput
LOGREC
LRU
LRrVv
@@ -87,6 +91,7 @@ OPTYPE
PMU
PPC
PRELOAD
+PROFDATA
README
RedHat
RepMgr
@@ -96,6 +101,7 @@ Roelofs
RrVv
Rrx
SCons
+Sanitizer
Seward's
SiH
TSC
@@ -131,6 +137,7 @@ apiflags
ar
archiver
arg
+args
atomicity
autoconf
autogen
@@ -161,6 +168,7 @@ cd
cdb
cds
changelog
+checkpointed
checksum
checksums
ckp
@@ -178,6 +186,7 @@ conn
const
control's
copydoc
+cov
cp
cpp
cpu
@@ -250,6 +259,7 @@ extensibility
fadvise
failchk
fallocate
+fcoverage
fd's
fdatasync
fextend
@@ -271,11 +281,16 @@ flamegraph
flamegraphs
fnv
forw
+fprofile
fput
freelist
fs
+fsanitize
fsync
ftruncate
+fuzzer
+fuzzers
+fuzzutil
fvisibility
fxray
gcc
@@ -334,6 +349,7 @@ len
lgnd
li
libdir
+libfuzzer
libhe
libkvs
libtool
@@ -447,6 +463,8 @@ printlog
printvalue
priv
proc
+profdata
+profraw
pthread
pthreads
putKey
@@ -471,6 +489,7 @@ recoverability
recs
rectype
relinking
+req
rerequests
ret
rf
@@ -493,6 +512,7 @@ scons
screenshots
secretkey
selectable
+sep
seqname
seqno
serializable
@@ -589,6 +609,13 @@ wrlock
wtperf
wtstats
xa
+xaa
+xad
+xbe
+xde
+xdeadbeef
+xef
+xff
xray
yieldcpu
zlib
diff --git a/src/third_party/wiredtiger/src/docs/tool-index.dox b/src/third_party/wiredtiger/src/docs/tool-index.dox
index afcad075022..a4b506d5422 100644
--- a/src/third_party/wiredtiger/src/docs/tool-index.dox
+++ b/src/third_party/wiredtiger/src/docs/tool-index.dox
@@ -35,6 +35,13 @@ Why is my CPU busy? FlameGraphs help visually summarize on-CPU call stacks and
allow for the quick identification of hot code paths. Here we explain how to
generate FlameGraphs from WiredTiger `perf` data.
+@subpage tool-libfuzzer
+
+LLVM LibFuzzer is an in-process, coverage-guided, evolutionary fuzzing engine. It feeds a series of
+fuzzed inputs via a "target" function and attempts to trigger crashes, memory bugs and undefined
+behavior. This article explains how to build and run existing fuzzers, implement new ones and
+visualize coverage provided by a fuzzer.
+
@section tool-other Other Resources
The WiredTiger @ref command_line has facilities for examining tables
diff --git a/src/third_party/wiredtiger/src/docs/tool-libfuzzer.dox b/src/third_party/wiredtiger/src/docs/tool-libfuzzer.dox
new file mode 100644
index 00000000000..43525faff39
--- /dev/null
+++ b/src/third_party/wiredtiger/src/docs/tool-libfuzzer.dox
@@ -0,0 +1,206 @@
+/*! @page tool-libfuzzer Testing with LLVM LibFuzzer
+
+# Building and running an LLVM LibFuzzer fuzzer
+
+LLVM LibFuzzer is an in-process, coverage-guided, evolutionary fuzzing engine. It feeds a series of
+fuzzed inputs via a user-provided "target" function and attempts to trigger crashes, memory bugs and
+undefined behavior.
+
+A fuzzer is a program that consists of the aforementioned target function linked against the
+LibFuzzer runtime. The LibFuzzer runtime provides the entry-point to the program and repeatedly
+calls the target function with generated inputs.
+
+## Step 1: Configure with Clang as your C compiler and enable LibFuzzer
+
+Support for LibFuzzer is implemented as a compiler flag in Clang. WiredTiger's build configuration
+checks whether the compiler in use supports \c -fsanitize=fuzzer-no-link and if so, elects to build
+the tests.
+
+Compiling with Clang's Address Sanitizer isn't mandatory but it is recommended since fuzzing often
+exposes memory bugs.
+
+@code
+$ cd build_posix/
+$ ../configure --enable-libfuzzer CC="clang-8" CFLAGS="-fsanitize=address"
+@endcode
+
+## Step 2: Build as usual
+
+@code
+$ cd build_posix
+$ make
+@endcode
+
+## Step 3: Run a fuzzer
+
+Each fuzzer is a program under \c build_posix/test/fuzz/. WiredTiger provides the
+\c test/fuzz/fuzz_run.sh script to quickly get started using a fuzzer. It performs a limited
+number of runs, automatically cleans up after itself in between runs and provides a sensible set of
+parameters which users can add to. For example:
+
+@code
+$ cd build_posix/test/fuzz/
+$ bash ../../../test/fuzz/fuzz_run.sh ./fuzz_modify
+@endcode
+
+In general the usage is:
+
+@code
+fuzz_run.sh <fuzz-test-binary> [fuzz-test-args]
+@endcode
+
+Each fuzzer will produce a few outputs:
+
+- \c crash-<input-hash>: If an error occurs, a file will be produced containing the input that
+crashed the target.
+
+- \c fuzz-N.log: The LibFuzzer log for worker N. This is just an ID that LibFuzzer assigns to each
+worker ranging from 0 => the number of workers - 1.
+
+- \c WT_TEST_<pid>: The home directory for a given worker process.
+
+- \c WT_TEST_<pid>.profraw: If the fuzzer is running with Clang coverage (more on this later), files
+containing profiling data for a given worker will be produced. These will be used by
+\c fuzz_coverage.
+
+### Corpus
+
+LibFuzzer is a coverage based fuzzer meaning that it notices when a particular input hits a new code
+path and adds it to a corpus of interesting data inputs. It then uses existing data in the corpus to
+mutate and come up with new inputs.
+
+While LibFuzzer will automatically add to its corpus when it finds an interesting input, some fuzz
+targets (especially those that expect data in a certain format) require a corpus to start things off
+in order to be effective. The fuzzer \c fuzz_config is one example of this as it expects its data
+sliced with a separator so the fuzzing engine needs some examples to guide it. The corpus is
+supplied as the first positional argument to both \c fuzz_run.sh and the underlying fuzzer itself.
+For example:
+
+@code
+$ cd build_posix/test/fuzz/
+$ bash ../../../test/fuzz/fuzz_run.sh ./fuzz_config ../../../test/fuzz/config/corpus/
+@endcode
+
+# Implementing an LLVM LibFuzzer fuzzer
+
+## Overview
+
+Creating a fuzzer with LLVM LibFuzzer requires an implementation of a single function called
+\c LLVMFuzzerTestOneInput. It has the following prototype:
+
+@code
+int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);
+@endcode
+
+When supplied with the \c -fsanitize=fuzzer flag, Clang will use its own \c main function and
+repeatedly call the provided \c LLVMFuzzerTestOneInput with various inputs. There is a lot of
+information online about best practices when writing fuzzing targets but to summarize, the
+requirements are much like those for writing unit tests: they should be fast, deterministic and
+stateless (as possible). The [LLVM LibFuzzer reference page](https://llvm.org/docs/LibFuzzer.html)
+is a good place to start to learn more.
+
+## Fuzz Utilities
+
+WiredTiger has a small fuzz utilities library containing common functionality required for writing
+fuzz targets. Most of the important functions here are about manipulating fuzz input into something
+that targets can use with the WiredTiger API.
+
+### Slicing inputs
+
+If the function that a target is testing can accept a binary blob of data, then the target will be
+straightforward as it'll more or less just pass the input to the function under test. But for
+functions that require more structured input, this can pose a challenge. As an example, the
+\c fuzz_config target requires two inputs that can be variable length: a configuration string and a
+key to search for. In order to do this, the target can use an arbitrary separator sequence to split
+the input into multiple parts with the following API:
+
+@snippet fuzz_util.h fuzzutil sliced input api
+
+Using this API, the target can supply a data buffer, a separator sequence and the number of inputs
+it needs. If it doesn't find the right number of separators in the provided input,
+\c fuzzutil_sliced_input_init will return \c false and the target should return out of the
+\c LLVMFuzzerTestOneInput function. While this may seem like the target will reject a lot of input,
+the fuzzing engine is able to figure out (especially if an initial corpus is supplied), that inputs
+with the right number of separators tend to yield better code coverage and will bias its generated
+inputs towards this format.
+
+In \c fuzz_config, we use the \c | character as a separator since this cannot appear in a
+configuration string. So an input separated correctly will look like this:
+
+@code
+allocation_size|key_format=u,value_format=u,allocation_size=512,log=(enabled)
+@endcode
+
+But for data where there is no distinct character that can be used as a sentinel, we can provide
+a byte sequence such as \c 0xdeadbeef. So in that case, a valid input may look like this:
+
+@code
+0xaa 0xaa 0xde 0xad 0xbe 0xef 0xff 0xff
+@endcode
+
+# Viewing code coverage for an LLVM LibFuzzer fuzzer
+
+After implementing a new fuzzing target, developers typically want to validate that it's doing
+something useful. If the fuzzer is not producing failures, it's either because the code under test
+is robust or the fuzzing target isn't doing a good job of exercising different code paths.
+
+## Step 1: Configure your build to compile with Clang coverage
+
+In order to view code coverage information, the build will need to be configured with the
+\c -fprofile-instr-generate and \c -fcoverage-mapping flags to tell Clang to instrument WiredTiger
+with profiling information. It's important that these are added to both the \c CFLAGS and
+\c LINKFLAGS variables.
+
+@code
+$ cd build_posix/
+$ ../configure CC="clang-8" CFLAGS="-fprofile-instr-generate -fcoverage-mapping" LINKFLAGS="-fprofile-instr-generate -fcoverage-mapping"
+@endcode
+
+## Step 2: Build and run fuzzer
+
+Build and invoke \c fuzz_run.sh for the desired fuzzer as described in the section above.
+
+## Step 3: Generate code coverage information
+
+After running the fuzzer with Clang coverage switched on, there should be a number of \c profraw
+files in the working directory.
+
+Those files contain the raw profiling data however, some post-processing is required to get it into
+a readable form. WiredTiger provides a script called \c fuzz_coverage.sh that handles this. It
+expects to be called from the same directory that the fuzzer was executed in.
+
+@code
+$ cd build_posix/test/fuzz
+$ bash ../../../test/fuzz/fuzz_coverage.sh ./fuzz_modify
+@endcode
+
+In general the usage is:
+
+@code
+fuzz_coverage.sh <fuzz-test-binary>
+@endcode
+
+The \c fuzz_coverage.sh script produces a few outputs:
+
+- \c <fuzz-test-binary>_cov.txt: A coverage report in text format. It can be inspected with the
+\c less command and searched for functions of interest. The numbers on the left of each line of code
+indicate how many times they were hit in the fuzzer.
+- \c <fuzz-test-binary>_cov.html: A coverage report in html format. If a web browser is available,
+this might be a nicer way to visualize the coverage.
+
+The \c fuzz_coverage.sh script uses a few optional environment variables to modify its behavior.
+
+- \c PROFDATA_BINARY: The binary used to merge the profiling data. The script defaults to using
+\c llvm-profdata.
+- \c COV_BINARY: The binary used to generate coverage information. The script defaults to using
+\c llvm-cov.
+
+For consistency, the script should use the \c llvm-profdata and \c llvm-cov binaries from the same
+LLVM release as the \c clang compiler used to build with. In the example above, \c clang-8 was used
+in the configuration, so the corresponding \c fuzz_coverage.sh invocation should look like this:
+
+@code
+$ PROFDATA_BINARY=llvm-profdata-8 COV_BINARY=llvm-cov-8 bash ../../../test/fuzz/fuzz_coverage.sh ./fuzz_modify
+@endcode
+
+*/
diff --git a/src/third_party/wiredtiger/src/history/hs_rec.c b/src/third_party/wiredtiger/src/history/hs_rec.c
index e8a7c737d3f..3640b4d0adb 100644
--- a/src/third_party/wiredtiger/src/history/hs_rec.c
+++ b/src/third_party/wiredtiger/src/history/hs_rec.c
@@ -225,9 +225,16 @@ __hs_insert_record_with_btree(WT_SESSION_IMPL *session, WT_CURSOR *cursor, WT_BT
WT_ERR(cursor->get_value(cursor, &hs_stop_durable_ts_diag, &durable_timestamp_diag,
&upd_type_full_diag, existing_val));
WT_ERR(__wt_compare(session, NULL, existing_val, hs_value, &cmp));
+ /*
+ * Check if the existing HS value is same as the new value we are about to insert.
+ * We can skip this check if the existing value has a globally visible stop time,
+ * i.e., the value has been deleted from the HS.
+ */
if (cmp == 0)
WT_ASSERT(session,
- tw->start_txn == WT_TXN_NONE ||
+ (WT_TIME_WINDOW_HAS_STOP(&hs_cbt->upd_value->tw) &&
+ __wt_txn_tw_stop_visible_all(session, &hs_cbt->upd_value->tw)) ||
+ tw->start_txn == WT_TXN_NONE ||
tw->start_txn != hs_cbt->upd_value->tw.start_txn ||
tw->start_ts != hs_cbt->upd_value->tw.start_ts);
counter = hs_counter + 1;
@@ -683,10 +690,18 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi)
/*
* Calculate reverse modify and clear the history store records with timestamps when
- * inserting the first update.
+ * inserting the first update. Always write on-disk data store updates to the history
+ * store as a full update because the on-disk update will be the base update for all the
+ * updates that are older than the on-disk update.
+ *
+ * Due to concurrent operation of checkpoint and eviction, it is possible that history
+ * store may have more recent versions of a key than the on-disk version. Without a
+ * proper base value in the history store, it can lead to wrong value being restored by
+ * the RTS.
*/
nentries = MAX_REVERSE_MODIFY_NUM;
- if (upd->type == WT_UPDATE_MODIFY && enable_reverse_modify &&
+ if (!F_ISSET(upd, WT_UPDATE_DS) && upd->type == WT_UPDATE_MODIFY &&
+ enable_reverse_modify &&
__wt_calc_modify(session, prev_full_value, full_value, prev_full_value->size / 10,
entries, &nentries) == 0) {
WT_ERR(__wt_modify_pack(cursor, entries, nentries, &modify_value));
diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h
index 405ae73401b..9941c9ba34a 100644
--- a/src/third_party/wiredtiger/src/include/extern.h
+++ b/src/third_party/wiredtiger/src/include/extern.h
@@ -47,6 +47,8 @@ extern char *__wt_timestamp_to_string(wt_timestamp_t ts, char *ts_string)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern const WT_CONFIG_ENTRY *__wt_conn_config_match(const char *method)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
+extern const WT_CONFIG_ENTRY *__wt_test_config_match(const char *test_name)
+ WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern const char *__wt_addr_string(WT_SESSION_IMPL *session, const uint8_t *addr, size_t addr_size,
WT_ITEM *buf) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern const char *__wt_buf_set_printable(WT_SESSION_IMPL *session, const void *p, size_t size,
diff --git a/src/third_party/wiredtiger/src/include/log.h b/src/third_party/wiredtiger/src/include/log.h
index 63921e21194..955fcf67be4 100644
--- a/src/third_party/wiredtiger/src/include/log.h
+++ b/src/third_party/wiredtiger/src/include/log.h
@@ -400,9 +400,10 @@ struct __wt_txn_printlog_args {
WT_FSTREAM *fs;
/* AUTOMATIC FLAG VALUE GENERATION START */
-#define WT_TXN_PRINTLOG_HEX 0x1u /* Add hex output */
-#define WT_TXN_PRINTLOG_MSG 0x2u /* Messages only */
- /* AUTOMATIC FLAG VALUE GENERATION STOP */
+#define WT_TXN_PRINTLOG_HEX 0x1u /* Add hex output */
+#define WT_TXN_PRINTLOG_MSG 0x2u /* Messages only */
+#define WT_TXN_PRINTLOG_UNREDACT 0x4u /* Don't redact user data from output */
+ /* AUTOMATIC FLAG VALUE GENERATION STOP */
uint32_t flags;
};
diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in
index 8b760c6df42..5b0f2e57279 100644
--- a/src/third_party/wiredtiger/src/include/wiredtiger.in
+++ b/src/third_party/wiredtiger/src/include/wiredtiger.in
@@ -3329,6 +3329,13 @@ struct __wt_config_item {
int wiredtiger_config_validate(WT_SESSION *session,
WT_EVENT_HANDLER *event_handler, const char *name, const char *config)
WT_ATTRIBUTE_LIBRARY_VISIBLE;
+
+/*
+ * Validate a configuration string for a WiredTiger test program.
+ */
+int wiredtiger_test_config_validate(WT_SESSION *session,
+ WT_EVENT_HANDLER *event_handler, const char *name, const char *config)
+ WT_ATTRIBUTE_LIBRARY_VISIBLE;
#endif
/*!
diff --git a/src/third_party/wiredtiger/src/log/log_auto.c b/src/third_party/wiredtiger/src/log/log_auto.c
index 75506ef3666..43752585932 100644
--- a/src/third_party/wiredtiger/src/log/log_auto.c
+++ b/src/third_party/wiredtiger/src/log/log_auto.c
@@ -136,6 +136,11 @@ __wt_logop_col_modify_print(
escaped = NULL;
WT_RET(__wt_logop_col_modify_unpack(session, pp, end, &fileid, &recno, &value));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"col_modify\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -203,6 +208,11 @@ __wt_logop_col_put_print(
escaped = NULL;
WT_RET(__wt_logop_col_put_unpack(session, pp, end, &fileid, &recno, &value));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"col_put\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -266,6 +276,11 @@ __wt_logop_col_remove_print(
WT_RET(__wt_logop_col_remove_unpack(session, pp, end, &fileid, &recno));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"col_remove\",\n"));
WT_RET(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -321,6 +336,11 @@ __wt_logop_col_truncate_print(
WT_RET(__wt_logop_col_truncate_unpack(session, pp, end, &fileid, &start, &stop));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"col_truncate\",\n"));
WT_RET(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -380,6 +400,11 @@ __wt_logop_row_modify_print(
escaped = NULL;
WT_RET(__wt_logop_row_modify_unpack(session, pp, end, &fileid, &key, &value));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"row_modify\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -452,6 +477,11 @@ __wt_logop_row_put_print(
escaped = NULL;
WT_RET(__wt_logop_row_put_unpack(session, pp, end, &fileid, &key, &value));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"row_put\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -522,6 +552,11 @@ __wt_logop_row_remove_print(
escaped = NULL;
WT_RET(__wt_logop_row_remove_unpack(session, pp, end, &fileid, &key));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"row_remove\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
@@ -589,6 +624,11 @@ __wt_logop_row_truncate_print(
escaped = NULL;
WT_RET(__wt_logop_row_truncate_unpack(session, pp, end, &fileid, &start, &stop, &mode));
+ if (!FLD_ISSET(args->flags, WT_TXN_PRINTLOG_UNREDACT) && fileid != WT_METAFILE_ID) {
+ WT_RET(__wt_fprintf(session, args->fs, " REDACTED"));
+ return (0);
+ }
+
WT_RET(__wt_fprintf(session, args->fs, " \"optype\": \"row_truncate\",\n"));
WT_ERR(__wt_fprintf(
session, args->fs, " \"fileid\": %" PRIu32 " 0x%" PRIx32 ",\n", fileid, fileid));
diff --git a/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c b/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c
index 9ad6b7abd6d..0001d09302b 100644
--- a/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c
+++ b/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c
@@ -254,10 +254,15 @@ __rollback_row_ondisk_fixup_key(WT_SESSION_IMPL *session, WT_PAGE *page, WT_ROW
/*
* Do not include history store updates greater than on-disk data store version to construct
- * a full update to restore. Comparing with timestamps here has no problem unlike in search
- * flow where the timestamps may be reset during reconciliation. RTS detects an on-disk
- * update is unstable based on the written proper timestamp, so comparing against it with
- * history store shouldn't have any problem.
+ * a full update to restore. Include the most recent updates than the on-disk version
+ * shouldn't be problem as the on-disk version in history store is always a full update. It
+ * is better to not to include those updates as it unnecessarily increases the rollback to
+ * stable time.
+ *
+ * Comparing with timestamps here has no problem unlike in search flow where the timestamps
+ * may be reset during reconciliation. RTS detects an on-disk update is unstable based on
+ * the written proper timestamp, so comparing against it with history store shouldn't have
+ * any problem.
*/
if (hs_start_ts <= unpack->tw.start_ts) {
if (type == WT_UPDATE_MODIFY)
@@ -267,7 +272,13 @@ __rollback_row_ondisk_fixup_key(WT_SESSION_IMPL *session, WT_PAGE *page, WT_ROW
WT_ASSERT(session, type == WT_UPDATE_STANDARD);
WT_ERR(__wt_buf_set(session, &full_value, hs_value->data, hs_value->size));
}
- }
+ } else
+ __wt_verbose(session, WT_VERB_RECOVERY_RTS(session),
+ "history store update more recent than on-disk update with start timestamp: %s,"
+ " durable timestamp: %s, stop timestamp: %s and type: %" PRIu8,
+ __wt_timestamp_to_string(hs_start_ts, ts_string[0]),
+ __wt_timestamp_to_string(hs_durable_ts, ts_string[1]),
+ __wt_timestamp_to_string(hs_stop_durable_ts, ts_string[2]), type);
/*
* Verify the history store timestamps are in order. The start timestamp may be equal to the
diff --git a/src/third_party/wiredtiger/src/utilities/util_printlog.c b/src/third_party/wiredtiger/src/utilities/util_printlog.c
index 615f11768ab..8581e896313 100644
--- a/src/third_party/wiredtiger/src/utilities/util_printlog.c
+++ b/src/third_party/wiredtiger/src/utilities/util_printlog.c
@@ -15,10 +15,10 @@ usage(void)
"display key and value items in hexadecimal format", "-l",
"the start LSN from which the log will be printed, optionally the end LSN can also be "
"specified",
- NULL, NULL};
+ "-m", "output log message records only", "-u", "print user data, don't redact", NULL, NULL};
util_usage(
- "printlog [-x] [-f output-file] [-l start-file,start-offset]|[-l "
+ "printlog [-mux] [-f output-file] [-l start-file,start-offset]|[-l "
"start-file,start-offset,end-file,end-offset]",
"options:", options);
return (1);
@@ -39,8 +39,12 @@ util_printlog(WT_SESSION *session, int argc, char *argv[])
end_set = start_set = false;
flags = 0;
ofile = NULL;
-
- while ((ch = __wt_getopt(progname, argc, argv, "f:l:mx")) != EOF)
+ /*
+ * By default redact user data. This way if any support people are using this on customer data,
+ * it is redacted unless they make the effort to keep it in. It lessens the risk of doing the
+ * wrong command.
+ */
+ while ((ch = __wt_getopt(progname, argc, argv, "f:l:mux")) != EOF)
switch (ch) {
case 'f': /* output file */
ofile = __wt_optarg;
@@ -62,6 +66,9 @@ util_printlog(WT_SESSION *session, int argc, char *argv[])
case 'm': /* messages only */
LF_SET(WT_TXN_PRINTLOG_MSG);
break;
+ case 'u': /* print user data, don't redact */
+ LF_SET(WT_TXN_PRINTLOG_UNREDACT);
+ break;
case 'x': /* hex output */
LF_SET(WT_TXN_PRINTLOG_HEX);
break;
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_config.c b/src/third_party/wiredtiger/test/cppsuite/test_config.c
new file mode 100644
index 00000000000..6d1585172c8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/cppsuite/test_config.c
@@ -0,0 +1,28 @@
+/* DO NOT EDIT: automatically built by dist/api_config.py. */
+
+#include "wt_internal.h"
+
+static const WT_CONFIG_CHECK confchk_poc_test[] = {
+ {"collection_count", "int", NULL, "min=1,max=10", NULL, 0},
+ {"key_size", "int", NULL, "min=1,max=10000", NULL, 0},
+ {"values", "string", NULL, "choices=[\"first\",\"second\",\"third\"]", NULL, 0},
+ {NULL, NULL, NULL, NULL, NULL, 0}};
+
+static const WT_CONFIG_ENTRY config_entries[] = {
+ {"poc_test", "collection_count=1,key_size=10,values=first", confchk_poc_test, 3},
+ {NULL, NULL, NULL, 0}};
+
+/*
+ * __wt_test_config_match --
+ * Return the static configuration entry for a test.
+ */
+const WT_CONFIG_ENTRY *
+__wt_test_config_match(const char *test_name)
+{
+ const WT_CONFIG_ENTRY *ep;
+
+ for (ep = config_entries; ep->method != NULL; ++ep)
+ if (strcmp(test_name, ep->method) == 0)
+ return (ep);
+ return (NULL);
+}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h
new file mode 100644
index 00000000000..9f1d43773c9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h
@@ -0,0 +1,107 @@
+/* Include guard. */
+#ifndef CONFIGURATION_SETTINGS_H
+#define CONFIGURATION_SETTINGS_H
+
+#include "wt_internal.h"
+#include <string>
+#include <stdexcept>
+
+namespace test_harness {
+class configuration {
+ private:
+ std::string _config;
+ WT_CONFIG_PARSER *_config_parser;
+
+ public:
+ configuration(const char *test_config_name, const char *config) : _config(config)
+ {
+ int ret = wiredtiger_config_parser_open(nullptr, config, strlen(config), &_config_parser);
+ if (ret != 0)
+ throw std::invalid_argument(
+ "failed to create configuration parser for provided config");
+ if (wiredtiger_test_config_validate(nullptr, nullptr, test_config_name, config) != 0)
+ throw std::invalid_argument(
+ "failed to validate given config, ensure test config exists");
+ }
+
+ configuration(const char *test_config_name, const WT_CONFIG_ITEM &nested)
+ {
+ if (nested.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT)
+ throw std::invalid_argument("provided config item isn't a structure");
+ int ret = wiredtiger_config_parser_open(nullptr, nested.str, nested.len, &_config_parser);
+ if (ret != 0)
+ throw std::invalid_argument(
+ "failed to create configuration parser for provided sub config");
+ }
+
+ ~configuration()
+ {
+ if (_config_parser != nullptr) {
+ _config_parser->close(_config_parser);
+ _config_parser = nullptr;
+ }
+ }
+
+ std::string
+ get_config()
+ {
+ return _config;
+ }
+
+ /*
+ * Wrapper functions for retrieving basic configuration values. Ideally the tests can avoid
+ * using the config item struct provided by wiredtiger. However if they still wish to use it the
+ * get and next functions can be used.
+ */
+ int
+ get_string(const char *key, std::string &value)
+ {
+ WT_CONFIG_ITEM temp_value;
+ WT_RET(_config_parser->get(_config_parser, key, &temp_value));
+ if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRING ||
+ temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_ID)
+ return (-1);
+ value = std::string(temp_value.str, temp_value.len);
+ return (0);
+ }
+
+ int
+ get_bool(const char *key, bool &value)
+ {
+ WT_CONFIG_ITEM temp_value;
+ WT_RET(_config_parser->get(_config_parser, key, &temp_value));
+ if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_BOOL)
+ return (-1);
+ value = temp_value.val != 0;
+ return (0);
+ }
+
+ int
+ get_int(const char *key, int64_t &value)
+ {
+ WT_CONFIG_ITEM temp_value;
+ WT_RET(_config_parser->get(_config_parser, key, &temp_value));
+ if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM)
+ return (-1);
+ value = temp_value.val;
+ return (0);
+ }
+
+ /*
+ * Basic configuration parsing helper functions.
+ */
+ int
+ next(WT_CONFIG_ITEM *key, WT_CONFIG_ITEM *value)
+ {
+ return _config_parser->next(_config_parser, key, value);
+ }
+
+ int
+ get(const char *key, WT_CONFIG_ITEM *value)
+ {
+ return _config_parser->get(_config_parser, key, value);
+ }
+};
+} // namespace test_harness
+
+#endif
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h
index 5660944ae67..b489c84b8ec 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h
@@ -5,19 +5,27 @@
/* Required to build using older versions of g++. */
#include <cinttypes>
-extern "C" {
+/* Include various wiredtiger libs. */
#include "wiredtiger.h"
#include "wt_internal.h"
-}
+
+#include "configuration_settings.h"
namespace test_harness {
class test {
public:
+ configuration *_configuration;
+ static const std::string _name;
/*
* All tests will implement this initially, the return value from it will indicate whether the
* test was successful or not.
*/
virtual int run() = 0;
+
+ test(std::string config)
+ {
+ _configuration = new configuration(_name.c_str(), config.c_str());
+ }
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx b/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx
index 0bf50387344..d41ff8dfa6b 100644
--- a/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx
+++ b/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx
@@ -8,23 +8,28 @@ class poc_test : public test_harness::test {
WT_CONNECTION *conn;
int ret = 0;
/* Setup basic test directory. */
- const std::string default_dir = "WT_TEST";
+ const char *default_dir = "WT_TEST";
/*
* Csuite tests utilise a test_util.h command to make their directory, currently that doesn't
* compile under c++ and some extra work will be needed to make it work. Its unclear if the
* test framework will use test_util.h yet.
*/
- const std::string mkdir_cmd = "mkdir " + default_dir;
- ret = system(mkdir_cmd.c_str());
+ const char *mkdir_cmd = "mkdir WT_TEST";
+ ret = system(mkdir_cmd);
if (ret != 0)
return (ret);
- ret = wiredtiger_open(default_dir.c_str(), NULL, "create,cache_size=1G", &conn);
+ ret = wiredtiger_open(default_dir, NULL, "create,cache_size=1G", &conn);
return (ret);
}
+
+ poc_test(std::string config) : test(config) {}
};
+const std::string poc_test::test::_name = "poc_test";
+
int main(int argc, char *argv[]) {
- return poc_test().run();
+ const char *cfg = "collection_count=1,key_size=5";
+ return poc_test(cfg).run();
}
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index c4c79f5e39d..127c99ea040 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -110,7 +110,7 @@ functions:
set -o verbose
sh reconf
if [ "$OS" != "Windows_NT" ]; then
- CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer -fsanitize=address" \
+ CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer -fsanitize=address" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb -fPIC" \
../configure ${configure_python_setting|} \
--enable-diagnostic --with-builtins=lz4,snappy,zlib
fi
@@ -429,7 +429,7 @@ variables:
vars:
configure_env_vars:
CC="/opt/mongodbtoolchain/v3/bin/clang -fsanitize=address"
- PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer"
+ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb -fPIC"
posix_configure_flags: --enable-strict --enable-diagnostic --with-builtins=lz4,snappy,zlib
- func: "format test script"
vars:
@@ -477,7 +477,7 @@ tasks:
- func: "get project"
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static
- func: "upload artifact"
- func: "cleanup"
@@ -487,7 +487,7 @@ tasks:
- func: "get project"
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" CXXFLAGS="-fsanitize=memory -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static
- func: "upload artifact"
- func: "cleanup"
@@ -497,7 +497,7 @@ tasks:
- func: "get project"
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" CXXFLAGS="-fsanitize=undefined -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic
- func: "upload artifact"
- func: "cleanup"
@@ -608,7 +608,7 @@ tasks:
dependent_task: compile-msan
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" CXXFLAGS="-fsanitize=memory -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static
- func: "make check all"
vars:
@@ -624,7 +624,7 @@ tasks:
dependent_task: compile-asan
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static
- func: "make check all"
vars:
@@ -677,7 +677,7 @@ tasks:
dependent_task: compile-asan
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -ggdb" CXXFLAGS="-fsanitize=address -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static
- func: "make check directory"
vars:
@@ -857,7 +857,7 @@ tasks:
dependent_task: compile-ubsan
- func: "compile wiredtiger"
vars:
- configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb"
+ configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" CXXFLAGS="-fsanitize=undefined -ggdb"
posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic
- command: shell.exec
params:
@@ -2322,6 +2322,8 @@ tasks:
CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH
CFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer
-I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include"
+ CXXFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer
+ -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include"
posix_configure_flags: --enable-diagnostic --with-builtins=lz4,snappy,zlib
- func: "format test script"
vars:
@@ -2347,6 +2349,8 @@ tasks:
CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH
CFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer
-I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include"
+ CXXFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer
+ -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include"
posix_configure_flags: --enable-diagnostic --with-builtins=lz4,snappy,zlib
- func: "format test script"
# To emulate the original Jenkins job's test coverage, we are running the smoke test 16 times
diff --git a/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh b/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
index 754270490a8..d9d313e1057 100755
--- a/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
+++ b/src/third_party/wiredtiger/test/evergreen/compatibility_test_for_releases.sh
@@ -116,6 +116,10 @@ verify_branches()
echo "$1/wt verifying $2 access method $am..."
dir="$2/test/format/RUNDIR.$am"
WIREDTIGER_CONFIG="$EXT" ./wt $(bflag $1) -h "../$dir" verify table:wt
+
+ echo "$1/wt dump and load $2 access method $am..."
+ WIREDTIGER_CONFIG="$EXT" ./wt $(bflag $1) -h "../$dir" dump table:wt > dump_wt.txt
+ WIREDTIGER_CONFIG="$EXT" ./wt $(bflag $1) -h "../$dir" load -f dump_wt.txt
done
}
diff --git a/src/third_party/wiredtiger/test/fuzz/Makefile.am b/src/third_party/wiredtiger/test/fuzz/Makefile.am
new file mode 100644
index 00000000000..0d768e17db2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/Makefile.am
@@ -0,0 +1,20 @@
+AM_CPPFLAGS = -fsanitize=fuzzer-no-link
+AM_CPPFLAGS += -I$(top_builddir)
+AM_CPPFLAGS += -I$(top_srcdir)/src/include
+AM_CPPFLAGS += -I$(top_srcdir)/test/fuzz
+AM_CPPFLAGS += -I$(top_srcdir)/test/utility
+
+AM_LDFLAGS = -fsanitize=fuzzer -static
+
+noinst_LTLIBRARIES = libfuzz_util.la
+noinst_PROGRAMS = fuzz_config fuzz_modify
+
+libfuzz_util_la_SOURCES = fuzz_util.c
+libfuzz_util_la_LIBADD = $(top_builddir)/libwiredtiger.la
+libfuzz_util_la_LIBADD += $(top_builddir)/test/utility/libtest_util.la
+
+fuzz_config_SOURCES = config/fuzz_config.c
+fuzz_config_LDADD = libfuzz_util.la
+
+fuzz_modify_SOURCES = modify/fuzz_modify.c
+fuzz_modify_LDADD = libfuzz_util.la
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-0 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-0
new file mode 100644
index 00000000000..a8521935429
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-0
@@ -0,0 +1 @@
+allocation_size|key_format=u,value_format=u,allocation_size=512,log=(enabled)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-1 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-1
new file mode 100644
index 00000000000..e1881a7353c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-1
@@ -0,0 +1 @@
+log.enabled|key_format=u,allocation_size=1024,value_format=u,log=(enabled)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-10 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-10
new file mode 100644
index 00000000000..9bd88341b83
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-10
@@ -0,0 +1 @@
+value_format|lsm=(chunk_size=1MB,merge_min=2)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-2 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-2
new file mode 100644
index 00000000000..0112526c4a2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-2
@@ -0,0 +1 @@
+log|value_format=S,allocation_size=1024,key_format=u,log=(enabled)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-3 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-3
new file mode 100644
index 00000000000..8c32ed06720
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-3
@@ -0,0 +1 @@
+value_format|value_format=S,allocation_size=1024,key_format=u,log=(enabled)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-4 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-4
new file mode 100644
index 00000000000..36fe2a7c94a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-4
@@ -0,0 +1 @@
+key_format|columns=(country,year,population),colgroups=(year,population),key_format=S
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-5 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-5
new file mode 100644
index 00000000000..a02262c08ab
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-5
@@ -0,0 +1 @@
+key_format|columns=(name,age,city),key_format=S,colgroups=(name,age)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-6 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-6
new file mode 100644
index 00000000000..05c440dfae5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-6
@@ -0,0 +1 @@
+log.file_max|log=(enabled,file_max=512,compressor=snappy),create,statistics=(fast)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-7 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-7
new file mode 100644
index 00000000000..97d92f9bb56
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-7
@@ -0,0 +1 @@
+compatibility.release|create,compatibility=(release="2.9")
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-8 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-8
new file mode 100644
index 00000000000..da364e8d2ae
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-8
@@ -0,0 +1 @@
+log.compressor|value_format=u,create,log=(enabled,compressor=zlib)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-9 b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-9
new file mode 100644
index 00000000000..671a666ba16
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/corpus/corpus-9
@@ -0,0 +1 @@
+statistics_log.on_close|split_pct=90,statistics=(fast),statistics_log=(on_close=true)
diff --git a/src/third_party/wiredtiger/test/fuzz/config/fuzz_config.c b/src/third_party/wiredtiger/test/fuzz/config/fuzz_config.c
new file mode 100644
index 00000000000..10513dec432
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/config/fuzz_config.c
@@ -0,0 +1,68 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "fuzz_util.h"
+
+#include <assert.h>
+
+int LLVMFuzzerTestOneInput(const uint8_t *, size_t);
+
+/*
+ * LLVMFuzzerTestOneInput --
+ * A fuzzing target for configuration parsing.
+ */
+int
+LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
+{
+ FUZZ_SLICED_INPUT input;
+ WT_CONFIG_ITEM cval;
+ char *config, *key;
+ static const uint8_t separator[] = {'|'};
+
+ WT_CLEAR(input);
+ config = key = NULL;
+
+ fuzzutil_setup();
+ if (!fuzzutil_sliced_input_init(&input, data, size, separator, sizeof(separator), 2))
+ return (0);
+
+ assert(input.num_slices == 2);
+ key = fuzzutil_slice_to_cstring(input.slices[0], input.sizes[0]);
+ if (key == NULL)
+ testutil_die(ENOMEM, "Failed to allocate key");
+ config = fuzzutil_slice_to_cstring(input.slices[1], input.sizes[1]);
+ if (config == NULL)
+ testutil_die(ENOMEM, "Failed to allocate config");
+
+ (void)__wt_config_getones((WT_SESSION_IMPL *)fuzz_state.session, config, key, &cval);
+ (void)cval;
+
+ fuzzutil_sliced_input_free(&input);
+ free(config);
+ free(key);
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/test/fuzz/fuzz_coverage.sh b/src/third_party/wiredtiger/test/fuzz/fuzz_coverage.sh
new file mode 100644
index 00000000000..abe40bea1cc
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/fuzz_coverage.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+#
+# fuzz_coverage.sh - generate coverage information after running a fuzz test.
+#
+# This script assumes it is running in the directory that the fuzz test was executed in and requires
+# that WiredTiger was running with "-fprofile-instr-generate" and "-fcoverage-mapping".
+#
+# Usage
+# fuzz_coverage.sh <fuzz-test-binary>
+#
+# Environment variables
+# PROFDATA_BINARY --
+# The binary to use to merge the profiling data. (default: llvm-profdata)
+# COV_BINARY --
+# The binary to use to generate coverage information. (default: llvm-cov)
+#
+# Output
+# <fuzz-test-binary>_cov.txt --
+# A coverage report in text format. It can be inspect with the "less" command and searched for
+# functions of interest. The numbers on the left of each line of code indicate how many times
+# they were hit in the fuzz test.
+# <fuzz-test-binary>_cov.html --
+# A coverage report in html format. If a web browser is available, this might be a nicer way
+# visualize the coverage.
+
+if test "$#" -lt "1"; then
+ echo "$0: must specify fuzz test to generate coverage for"
+ exit 1
+fi
+
+fuzz_test_bin="$1"
+
+if test -z "$PROFDATA_BINARY"; then
+ profdata_bin="llvm-profdata"
+ echo "$0: PROFDATA_BINARY is unset, defaulting to $profdata_bin"
+else
+ profdata_bin="$PROFDATA_BINARY"
+fi
+
+if test -z "$COV_BINARY"; then
+ cov_bin="llvm-cov"
+ echo "$0: COV_BINARY is unset, defaulting to $cov_bin"
+else
+ cov_bin="$COV_BINARY"
+fi
+
+# Remove anything from previous runs.
+rm *_cov.profdata *_cov.txt *_cov.html &> /dev/null
+
+fuzz_cov_name="${fuzz_test_bin}_cov"
+combined_profdata_name="${fuzz_cov_name}.profdata"
+
+# Check that there is coverage data.
+ls *.profraw &> /dev/null
+if test $? -ne "0"; then
+ echo "$0: could not find any .profraw files in the current directory"
+ echo "$0: ensure that -fprofile-instr-generate and -fcoverage-mapping are added to your CFLAGS and LINKFLAGS when configuring"
+ exit 1
+fi
+
+$profdata_bin merge -sparse *.profraw -o $combined_profdata_name || exit 1
+$cov_bin show $fuzz_test_bin -instr-profile=$combined_profdata_name > "${fuzz_cov_name}.txt"
+$cov_bin show $fuzz_test_bin -instr-profile=$combined_profdata_name -format=html > "${fuzz_cov_name}.html"
diff --git a/src/third_party/wiredtiger/test/fuzz/fuzz_run.sh b/src/third_party/wiredtiger/test/fuzz/fuzz_run.sh
new file mode 100644
index 00000000000..b0958ba1679
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/fuzz_run.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# fuzz_run.sh - run a fuzz test.
+#
+# This script will emit all data in the current working directory including: fuzzing logs, home
+# directories and profiling data (if we've compiled with Clang coverage).
+#
+# Running fuzzers compiled with ASan (-fsanitize=address) is recommended. If you want to also run
+# calculate coverage, you should also add "-fprofile-instr-generate" and "-fcoverage-mapping" to
+# your CFLAGS and LINKFLAGS when configuring.
+#
+# Usage
+# fuzz_run.sh <fuzz-test-binary> [fuzz-test-args]
+#
+# If the fuzzer you're running has an existing corpus directory, you may want to run with the corpus
+# supplied:
+# e.g. fuzz_run.sh ../../build_posix/test/fuzz/fuzz_config corpus/
+#
+# Output
+# crash-<input-hash> --
+# If an error occurs, a file will be produced containing the input that crashed the target.
+# fuzz-N.log --
+# The LibFuzzer log for worker N. This is just an ID that LibFuzzer assigns to each worker
+# ranging from 0 => the number of workers - 1.
+# WT_TEST_<pid> --
+# The home directory for a given worker process.
+# WT_TEST_<pid>.profraw --
+# If a fuzzer is running with Clang coverage, files containing profiling data for a given
+# worker will be produced. These will be used by fuzz_coverage.
+
+if test "$#" -lt "1"; then
+ echo "$0: must specify fuzz test to run"
+ exit 1
+fi
+
+# Take the binary name and shift.
+# We don't want to forward this as an argument.
+fuzz_test_bin="$1"
+shift
+
+# Remove anything from previous runs.
+rm -rf WT_TEST_* &> /dev/null
+rm *.profraw fuzz-*.log &> /dev/null
+
+# If we've compiled to emit coverage information, each worker process should write their own
+# performance data.
+export LLVM_PROFILE_FILE="WT_TEST_%p.profraw"
+
+# The rationale for each flag is below:
+# - jobs=8
+# Choosing 8 workers is a reasonable starting point. Depending on their machine, they can bump
+# this number up but most machines will be able to handle this and it finishes jobs much faster
+# than without this flag (equivalent to jobs=1).
+# - runs=100000000
+# Do 100 million runs to make sure that we're stressing the system and hitting lots of
+# branches. Ideally, we'd just let the fuzzer run until the process is killed by the user but
+# unfortunately, coverage data won't get written out in that case.
+# - close_fd_mask=3
+# Suppress stdout and stderr. This isn't ideal but any fuzzing target that prints an error
+# will quickly fill up your disk. Better to just replay the input without this flag if you
+# uncover a bug.
+$fuzz_test_bin -jobs=8 -runs=100000000 -close_fd_mask=3 "$@"
diff --git a/src/third_party/wiredtiger/test/fuzz/fuzz_util.c b/src/third_party/wiredtiger/test/fuzz/fuzz_util.c
new file mode 100644
index 00000000000..b6db660b0d8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/fuzz_util.c
@@ -0,0 +1,173 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "fuzz_util.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <stddef.h>
+
+FUZZ_GLOBAL_STATE fuzz_state = {.conn = NULL, .session = NULL};
+
+/*
+ * fuzzutil_generate_home_name --
+ * Create a unique home directory per worker that LibFuzzer creates.
+ */
+static void
+fuzzutil_generate_home_name(char *buf)
+{
+ pid_t pid;
+
+ /*
+ * Once we're exercising the fuzz target, we don't know which worker we are. An easy way to make
+ * sure that workers don't clobber each other's home directories is to tack the process ID on
+ * the end of the name.
+ */
+ pid = getpid();
+ sprintf(buf, "WT_TEST_%d", pid);
+}
+
+/*
+ * fuzzutil_setup --
+ * Initialize the connection and session the first time LibFuzzer executes the target.
+ */
+void
+fuzzutil_setup(void)
+{
+ char home[100];
+
+ if (fuzz_state.conn != NULL) {
+ assert(fuzz_state.session != NULL);
+ return;
+ }
+
+ WT_CLEAR(home);
+ fuzzutil_generate_home_name(home);
+ testutil_make_work_dir(home);
+ testutil_check(wiredtiger_open(home, NULL, "create,cache_size=5MB", &fuzz_state.conn));
+ testutil_check(fuzz_state.conn->open_session(fuzz_state.conn, NULL, NULL, &fuzz_state.session));
+}
+
+/*
+ * fuzzutil_sliced_input_init --
+ * Often, our fuzz target requires multiple inputs. For example, for configuration parsing we'd
+ * need a configuration string and a key to search for. We can do this by requiring the fuzzer
+ * to provide data with a number of arbitrary multi-byte separators. If you have a sentinel
+ * character, you can use that, otherwise patterns like 0xdeadbeef work fine too but make the
+ * corpus less readable. If the fuzzer doesn't supply data in that format, we can return out of
+ * the fuzz target. While our fuzz target will reject lots of input to begin with, the fuzzer
+ * will figure out that inputs with these separators yield better coverage and will craft more
+ * sensible inputs over time. This is what the sliced input component is designed for. It takes
+ * the data input, a separator and the number of slices that it should expect and populates a
+ * heap allocated array of data pointers to each separate input and their respective size.
+ */
+bool
+fuzzutil_sliced_input_init(FUZZ_SLICED_INPUT *input, const uint8_t *data, size_t size,
+ const uint8_t *sep, size_t sep_size, size_t req_slices)
+{
+ size_t *sizes;
+ const uint8_t *begin, *end, *pos, **slices;
+ u_int i;
+
+ pos = NULL;
+ i = 0;
+ begin = data;
+ end = data + size;
+
+ /*
+ * It might be better to do an initial pass to check that we have the right number of separators
+ * before actually storing them. Currently, we're dynamically allocating even in the case of
+ * invalid input.
+ */
+ slices = malloc(sizeof(uint8_t *) * req_slices);
+ sizes = malloc(sizeof(size_t) * req_slices);
+ if (slices == NULL || sizes == NULL)
+ goto err;
+
+ /*
+ * Store pointers and sizes for each slice of the input. This code is implementing the idea
+ * described at:
+ * https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#magic-separator.
+ */
+ while ((pos = memmem(begin, (size_t)(end - begin), sep, sep_size)) != NULL) {
+ if (i >= req_slices)
+ goto err;
+ slices[i] = begin;
+ sizes[i] = (size_t)(pos - begin);
+ begin = pos + sep_size;
+ ++i;
+ }
+ if (begin < end) {
+ if (i >= req_slices)
+ goto err;
+ slices[i] = begin;
+ sizes[i] = (size_t)(end - begin);
+ ++i;
+ }
+ if (i != req_slices)
+ goto err;
+ input->slices = slices;
+ input->sizes = sizes;
+ input->num_slices = req_slices;
+ return (true);
+
+err:
+ free(slices);
+ free(sizes);
+ return (false);
+}
+
+/*
+ * fuzzutil_sliced_input_free --
+ * Free any resources on the sliced input.
+ */
+void
+fuzzutil_sliced_input_free(FUZZ_SLICED_INPUT *input)
+{
+ free(input->slices);
+ free(input->sizes);
+ input->slices = NULL;
+ input->sizes = NULL;
+}
+
+/*
+ * fuzzutil_slice_to_cstring --
+ * A conversion function to help convert from a data, size pair to a cstring.
+ */
+char *
+fuzzutil_slice_to_cstring(const uint8_t *data, size_t size)
+{
+ char *str;
+
+ str = malloc(size + 1);
+ if (str == NULL)
+ return NULL;
+ memcpy(str, data, size);
+ str[size] = '\0';
+
+ return (str);
+}
diff --git a/src/third_party/wiredtiger/test/fuzz/fuzz_util.h b/src/third_party/wiredtiger/test/fuzz/fuzz_util.h
new file mode 100644
index 00000000000..6e8caa8cccf
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/fuzz_util.h
@@ -0,0 +1,51 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "test_util.h"
+
+typedef struct {
+ WT_CONNECTION *conn;
+ WT_SESSION *session;
+} FUZZ_GLOBAL_STATE;
+
+extern FUZZ_GLOBAL_STATE fuzz_state;
+
+void fuzzutil_setup(void);
+
+/* ![fuzzutil sliced input api] */
+typedef struct {
+ const uint8_t **slices;
+ size_t *sizes;
+ size_t num_slices;
+} FUZZ_SLICED_INPUT;
+
+bool fuzzutil_sliced_input_init(FUZZ_SLICED_INPUT *input, const uint8_t *data, size_t size,
+ const uint8_t *sep, size_t sep_size, size_t req_slices);
+void fuzzutil_sliced_input_free(FUZZ_SLICED_INPUT *input);
+/* ![fuzzutil sliced input api] */
+
+char *fuzzutil_slice_to_cstring(const uint8_t *data, size_t size);
diff --git a/src/third_party/wiredtiger/test/fuzz/modify/fuzz_modify.c b/src/third_party/wiredtiger/test/fuzz/modify/fuzz_modify.c
new file mode 100644
index 00000000000..14307a15dc2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/fuzz/modify/fuzz_modify.c
@@ -0,0 +1,75 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "fuzz_util.h"
+
+int LLVMFuzzerTestOneInput(const uint8_t *, size_t);
+
+/*
+ * LLVMFuzzerTestOneInput --
+ * A fuzzing target for modifies.
+ */
+int
+LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
+{
+ WT_CURSOR *cursor;
+ WT_DECL_ITEM(packed_modify);
+ WT_ITEM buf;
+ WT_MODIFY modify;
+ WT_SESSION_IMPL *session_impl;
+
+ /* We can't do anything sensible with small inputs. */
+ if (size < 10)
+ return (0);
+
+ WT_CLEAR(cursor);
+ WT_CLEAR(buf);
+ WT_CLEAR(modify);
+
+ fuzzutil_setup();
+ session_impl = (WT_SESSION_IMPL *)fuzz_state.session;
+
+ /* Choose some portion of the buffer for the underlying value. */
+ buf.data = &data[0];
+ buf.size = data[0] % size;
+
+ /* The modify data takes the rest. */
+ modify.data.data = &data[buf.size];
+ modify.data.size = modify.size = size - buf.size;
+ modify.offset = data[buf.size] % size;
+
+ /* We're doing this in order to get a cursor since we need one to call the modify helper. */
+ testutil_check(
+ fuzz_state.session->open_cursor(fuzz_state.session, "metadata:", NULL, NULL, &cursor));
+ testutil_check(__wt_modify_pack(cursor, &modify, 1, &packed_modify));
+ testutil_check(__wt_modify_apply_item(session_impl, "u", &buf, packed_modify->data));
+
+ testutil_check(cursor->close(cursor));
+ __wt_scr_free(session_impl, &packed_modify);
+ __wt_buf_free(session_impl, &buf);
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/test/suite/run.py b/src/third_party/wiredtiger/test/suite/run.py
index 7d342a7a471..14642731be0 100755
--- a/src/third_party/wiredtiger/test/suite/run.py
+++ b/src/third_party/wiredtiger/test/suite/run.py
@@ -26,6 +26,10 @@
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
+# [TEST_TAGS]
+# ignored_file
+# [END_TAGS]
+#
# run.py
# Command line test runner
#
diff --git a/src/third_party/wiredtiger/test/suite/test_backup01.py b/src/third_party/wiredtiger/test/suite/test_backup01.py
index 65b6b3735f3..f556ff3ac8a 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup01.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup01.py
@@ -25,6 +25,11 @@
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
+#
+# [TEST_TAGS]
+# backup:correctness:full_backup
+# [END_TAGS]
+#
import glob
import os
diff --git a/src/third_party/wiredtiger/test/suite/test_backup04.py b/src/third_party/wiredtiger/test/suite/test_backup04.py
index a378e5dbd63..80053f5016c 100755
--- a/src/third_party/wiredtiger/test/suite/test_backup04.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup04.py
@@ -76,41 +76,17 @@ class test_backup_target(backup_base):
cursor[simple_key(cursor, i)] = str(i) + ':' + upd * dsize
cursor.close()
- def take_full_backup(self, dir):
- # Open up the backup cursor, and copy the files. Do a full backup.
- cursor = self.session.open_cursor('backup:', None, None)
- self.pr('Full backup to ' + dir + ': ')
- os.mkdir(dir)
- while True:
- ret = cursor.next()
- if ret != 0:
- break
- newfile = cursor.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + dir)
- shutil.copy(newfile, dir)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- cursor.close()
-
# Take an incremental backup and then truncate/archive the logs.
- def take_incr_backup(self, dir):
- config = 'target=("log:")'
- cursor = self.session.open_cursor('backup:', None, config)
- while True:
- ret = cursor.next()
- if ret != 0:
- break
- newfile = cursor.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + dir)
- shutil.copy(newfile, dir)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- self.session.truncate('log:', cursor, None, None)
- cursor.close()
+ def take_log_incr_backup(self, dir):
+ config = 'target=("log:")'
+ cursor = self.session.open_cursor('backup:', None, config)
+ self.take_full_backup(dir, cursor)
+ self.session.truncate('log:', cursor, None, None)
+ cursor.close()
# Run background inserts while running checkpoints and incremental backups
# repeatedly.
- def test_incremental_backup(self):
+ def test_log_incremental_backup(self):
import sys
# Create the backup directory.
self.session.create(self.uri, "key_format=S,value_format=S")
@@ -118,8 +94,9 @@ class test_backup_target(backup_base):
self.populate_with_string(self.uri, self.dsize, self.nops)
# We need to start the directory for the incremental backup with
- # a full backup. The full backup function creates the directory.
+ # a full backup.
dir = self.dir
+ os.mkdir(dir)
self.take_full_backup(dir)
self.session.checkpoint(None)
@@ -137,11 +114,12 @@ class test_backup_target(backup_base):
self.session.checkpoint(None)
self.pr('Iteration: ' + str(increment))
- self.take_incr_backup(self.dir)
+ self.take_log_incr_backup(self.dir)
# After running, take a full backup. Compare the incremental
# backup to the original database and the full backup database.
full_dir = self.dir + ".full"
+ os.mkdir(full_dir)
self.take_full_backup(full_dir)
self.compare_backups(self.uri, self.dir, full_dir)
self.compare_backups(self.uri, self.dir, './')
diff --git a/src/third_party/wiredtiger/test/suite/test_backup07.py b/src/third_party/wiredtiger/test/suite/test_backup07.py
index 360432690eb..9f51a05e199 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup07.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup07.py
@@ -34,8 +34,7 @@ from wtdataset import simple_key
from wtscenario import make_scenarios
# test_backup07.py
-# Test cursor backup with target URIs, logging and create during backup
-
+# Test cursor backup with target URIs, logging and create during backup.
class test_backup07(backup_base):
dir='backup.dir' # Backup directory name
logmax="100K"
@@ -67,10 +66,8 @@ class test_backup07(backup_base):
# when the backup metadata is created on cursor open and the newly
# created file is not in the cursor list.
- # Open up the backup cursor, create and add data to a new table
- # and then copy the files.
+ # Create and add data to a new table and then copy the files with a full backup.
os.mkdir(self.dir)
- bkup_c = self.session.open_cursor('backup:', None, None)
# Now create and populate the new table. Make sure the log records
# are on disk and will be copied to the backup.
@@ -78,19 +75,9 @@ class test_backup07(backup_base):
self.add_data(self.newuri, 'key', 'value')
self.session.log_flush('sync=on')
- # Now copy the files returned by the backup cursor. This should not
- # include the newly created table.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- self.assertNotEqual(newfile, self.newuri)
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
+ # Now copy the files using full backup. This should not include the newly
+ # created table.
+ self.take_full_backup(self.dir)
# After the full backup, open and recover the backup database.
# Make sure we properly recover even though the log file will have
diff --git a/src/third_party/wiredtiger/test/suite/test_backup10.py b/src/third_party/wiredtiger/test/suite/test_backup10.py
index 9a74e190b3f..68a14307407 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup10.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup10.py
@@ -73,37 +73,14 @@ class test_backup10(backup_base):
self.add_data(self.uri, 'key', 'value')
self.session.log_flush('sync=on')
- # Now copy the files returned by the backup cursor.
- orig_logs = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- if "WiredTigerLog" in newfile:
- orig_logs.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ # Now make a full backup and track the log files.
+ all_files = self.take_full_backup(self.dir, bkup_c)
+ orig_logs = [file for file in all_files if "WiredTigerLog" in file]
# Now open a duplicate backup cursor.
config = 'target=("log:")'
dupc = self.session.open_cursor(None, bkup_c, config)
- dup_logs = []
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- newfile = dupc.get_key()
- self.assertTrue("WiredTigerLog" in newfile)
- sz = os.path.getsize(newfile)
- if (newfile not in orig_logs):
- self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- # Record all log files returned for later verification.
- dup_logs.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ dup_logs = self.take_log_backup(bkup_c, self.dir, orig_logs, dupc)
# We expect that the duplicate logs are a superset of the
# original logs. And we expect the difference to be the
@@ -129,7 +106,6 @@ class test_backup10(backup_base):
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.assertEquals(self.session.open_cursor(None,
dupc, config), 0), msg)
-
dupc.close()
# Test we must use the log target.
diff --git a/src/third_party/wiredtiger/test/suite/test_backup11.py b/src/third_party/wiredtiger/test/suite/test_backup11.py
index a974d505654..2b313a2dc9b 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup11.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup11.py
@@ -54,41 +54,15 @@ class test_backup11(backup_base):
# Add data while the backup cursor is open.
self.add_data(self.uri, 'key', 'value', True)
- # Now copy the files returned by the backup cursor.
- orig_logs = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- if "WiredTigerLog" in newfile:
- orig_logs.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ # Now make a full backup and track the log files.
+ all_files = self.take_full_backup(self.dir, bkup_c)
+ orig_logs = [file for file in all_files if "WiredTigerLog" in file]
# Now open a duplicate backup cursor.
# We *can* use a log target duplicate on an incremental primary backup so that
# a backup process can get all the log files that occur while that primary cursor
# is open.
- config = 'target=("log:")'
- dupc = self.session.open_cursor(None, bkup_c, config)
- dup_logs = []
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- newfile = dupc.get_key()
- self.assertTrue("WiredTigerLog" in newfile)
- sz = os.path.getsize(newfile)
- if (newfile not in orig_logs):
- self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- # Record all log files returned for later verification.
- dup_logs.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- dupc.close()
+ dup_logs = self.take_log_backup(bkup_c, self.dir, orig_logs)
bkup_c.close()
# Add more data
diff --git a/src/third_party/wiredtiger/test/suite/test_backup12.py b/src/third_party/wiredtiger/test/suite/test_backup12.py
index 53ad7845634..7fe72dd43fe 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup12.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup12.py
@@ -58,12 +58,12 @@ class test_backup12(backup_base):
self.add_data(self.uri2, self.bigkey, self.bigval, True)
self.add_data(self.uri_rem, self.bigkey, self.bigval, True)
+ os.mkdir(self.dir)
+ #
# Open up the backup cursor. This causes a new log file to be created.
# That log file is not part of the list returned. This is a full backup
# primary cursor with incremental configured.
- os.mkdir(self.dir)
- #
- # Note, this first backup is actually done before a checkpoint is taken.
+ # Note: this first backup is actually done before a checkpoint is taken.
#
config = 'incremental=(enabled,granularity=1M,this_id="ID1")'
bkup_c = self.session.open_cursor('backup:', None, config)
@@ -71,41 +71,15 @@ class test_backup12(backup_base):
# Add more data while the backup cursor is open.
self.add_data(self.uri, self.bigkey, self.bigval, True)
- # Now copy the files returned by the backup cursor.
- all_files = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- all_files.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ # Now make a full backup.
+ all_files = self.take_full_backup(self.dir, bkup_c)
# Now open a duplicate backup cursor.
# We *can* use a log target duplicate on an incremental primary backup so that
# a backup process can get all the log files that occur while that primary cursor
# is open.
- config = 'target=("log:")'
- dupc = self.session.open_cursor(None, bkup_c, config)
- dup_logs = []
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- newfile = dupc.get_key()
- self.assertTrue("WiredTigerLog" in newfile)
- sz = os.path.getsize(newfile)
- if (newfile not in all_files):
- self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- # Record all log files returned for later verification.
- dup_logs.append(newfile)
- all_files.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- dupc.close()
+ dup_logs = self.take_log_backup(bkup_c, self.dir, all_files)
+ all_files += dup_logs
bkup_c.close()
# Add more data.
@@ -115,50 +89,9 @@ class test_backup12(backup_base):
# Drop a table.
self.session.drop(self.uri_rem)
- # Now do an incremental backup.
- config = 'incremental=(src_id="ID1",this_id="ID2")'
- bkup_c = self.session.open_cursor('backup:', None, config)
- self.pr('Open backup cursor ID1')
- bkup_files = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- config = 'incremental=(file=' + newfile + ')'
- self.pr('Open incremental cursor with ' + config)
- dup_cnt = 0
- dupc = self.session.open_cursor(None, bkup_c, config)
- bkup_files.append(newfile)
- all_files.append(newfile)
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- incrlist = dupc.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- # 1 is WT_BACKUP_FILE
- # 2 is WT_BACKUP_RANGE
- self.assertTrue(curtype == 1 or curtype == 2)
- if curtype == 1:
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- else:
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- rfp = open(newfile, "r+b")
- wfp = open(self.dir + '/' + newfile, "w+b")
- rfp.seek(offset, 0)
- wfp.seek(offset, 0)
- buf = rfp.read(size)
- wfp.write(buf)
- rfp.close()
- wfp.close()
- dup_cnt += 1
- dupc.close()
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
+ # Now do an incremental backup with id 2.
+ (bkup_files, _) = self.take_incr_backup(self.dir, 2)
+ all_files += bkup_files
# We need to remove files in the backup directory that are not in the current backup.
all_set = set(all_files)
diff --git a/src/third_party/wiredtiger/test/suite/test_backup13.py b/src/third_party/wiredtiger/test/suite/test_backup13.py
index 8992440c038..73bc4aad9a5 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup13.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup13.py
@@ -57,18 +57,18 @@ class test_backup13(backup_base):
def simulate_crash_restart(self, olddir, newdir):
''' Simulate a crash from olddir and restart in newdir. '''
- # with the connection still open, copy files to new directory
+ # with the connection still open, copy files to new directory.
shutil.rmtree(newdir, ignore_errors=True)
os.mkdir(newdir)
for fname in os.listdir(olddir):
fullname = os.path.join(olddir, fname)
- # Skip lock file on Windows since it is locked
+ # Skip lock file on Windows since it is locked.
if os.path.isfile(fullname) and \
"WiredTiger.lock" not in fullname and \
"Tmplog" not in fullname and \
"Preplog" not in fullname:
shutil.copy(fullname, newdir)
- # close the original connection and open to new directory
+ # close the original connection and open to new directory.
self.close_conn()
self.conn = self.setUpConnectionOpen(newdir)
self.session = self.setUpSessionOpen(self.conn)
@@ -87,79 +87,26 @@ class test_backup13(backup_base):
def test_backup13(self):
self.session.create(self.uri, "key_format=S,value_format=S")
self.add_data_and_check()
+
+ os.mkdir(self.dir)
+
+ # Add more data while the backup cursor is open.
+ self.add_data_and_check()
+
# Open up the backup cursor. This causes a new log file to be created.
# That log file is not part of the list returned. This is a full backup
# primary cursor with incremental configured.
- os.mkdir(self.dir)
config = 'incremental=(enabled,granularity=1M,this_id="ID1")'
bkup_c = self.session.open_cursor('backup:', None, config)
- # Add more data while the backup cursor is open.
- self.add_data_and_check()
-
- # Now copy the files returned by the backup cursor.
- all_files = []
-
- # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- all_files.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ # Now make a full backup and track the files.
+ all_files = self.take_full_backup(self.dir, bkup_c)
bkup_c.close()
-
# Add more data.
self.add_data_and_check()
- # Now do an incremental backup.
- config = 'incremental=(src_id="ID1",this_id="ID2")'
- bkup_c = self.session.open_cursor('backup:', None, config)
- self.pr('Open backup cursor ID1')
- bkup_files = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- config = 'incremental=(file=' + newfile + ')'
- self.pr('Open incremental cursor with ' + config)
- dup_cnt = 0
- dupc = self.session.open_cursor(None, bkup_c, config)
- bkup_files.append(newfile)
- all_files.append(newfile)
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- incrlist = dupc.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE)
- if curtype == wiredtiger.WT_BACKUP_FILE:
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- else:
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- rfp = open(newfile, "r+b")
- wfp = open(self.dir + '/' + newfile, "w+b")
- rfp.seek(offset, 0)
- wfp.seek(offset, 0)
- buf = rfp.read(size)
- wfp.write(buf)
- rfp.close()
- wfp.close()
- dup_cnt += 1
- dupc.close()
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
+ # Now do an incremental backup with id 2.
+ (bkup_files, _) = self.take_incr_backup(self.dir, 2)
all_set = set(all_files)
bkup_set = set(bkup_files)
@@ -178,7 +125,6 @@ class test_backup13(backup_base):
# Make sure after a force stop we cannot access old backup info.
config = 'incremental=(src_id="ID1",this_id="ID3")'
-
self.assertRaises(wiredtiger.WiredTigerError,
lambda: self.session.open_cursor('backup:', None, config))
diff --git a/src/third_party/wiredtiger/test/suite/test_backup14.py b/src/third_party/wiredtiger/test/suite/test_backup14.py
index a4933140833..803dffd2562 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup14.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup14.py
@@ -57,125 +57,8 @@ class test_backup14(backup_base):
bigkey = 'Key' * 100
bigval = 'Value' * 100
- def take_full_backup(self):
- if self.counter != 0:
- hdir = self.home_full + '.' + str(self.counter)
- else:
- hdir = self.home_incr
-
- #
- # First time through we take a full backup into the incremental directories. Otherwise only
- # into the appropriate full directory.
- #
- buf = None
- if self.initial_backup == True:
- buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)'
-
- cursor = self.session.open_cursor('backup:', None, buf)
- while True:
- ret = cursor.next()
- if ret != 0:
- break
- newfile = cursor.get_key()
-
- if self.counter == 0:
- # Take a full backup into each incremental directory
- for i in range(0, self.max_iteration):
- copy_from = newfile
- # If it is a log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath
- else:
- copy_to = self.home_incr + '.' + str(i)
- shutil.copy(copy_from, copy_to)
- else:
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = hdir + '/' + self.logpath
- else:
- copy_to = hdir
-
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- cursor.close()
-
- def take_incr_backup(self):
- # Open the backup data source for incremental backup.
- buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '",this_id="ID' + str(self.counter) + '")'
- bkup_c = self.session.open_cursor('backup:', None, buf)
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- h = self.home_incr + '.0'
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
-
- shutil.copy(copy_from, copy_to)
- first = True
- config = 'incremental=(file=' + newfile + ')'
- dup_cnt = 0
- incr_c = self.session.open_cursor(None, bkup_c, config)
-
- # For each file listed, open a duplicate backup cursor and copy the blocks.
- while True:
- ret = incr_c.next()
- if ret != 0:
- break
- incrlist = incr_c.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- # 1 is WT_BACKUP_FILE
- # 2 is WT_BACKUP_RANGE
- self.assertTrue(curtype == 1 or curtype == 2)
- if curtype == 1:
- if first == True:
- h = self.home_incr + '.' + str(self.counter)
- first = False
-
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- else:
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- read_from = newfile
- write_to = self.home_incr + '.' + str(self.counter) + '/' + newfile
- rfp = open(read_from, "r+b")
- wfp = open(write_to, "w+b")
- rfp.seek(offset, 0)
- wfp.seek(offset, 0)
- buf = rfp.read(size)
- wfp.write(buf)
- rfp.close()
- wfp.close()
- dup_cnt += 1
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- incr_c.close()
-
- # For each file, we want to copy the file into each of the later incremental directories
- for i in range(self.counter, self.max_iteration):
- h = self.home_incr + '.' + str(i)
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
-
#
- # Remove data from uri (table:main)
+ # Remove data from uri (table:main).
#
def remove_data(self):
c = self.session.open_cursor(self.uri)
@@ -191,7 +74,7 @@ class test_backup14(backup_base):
self.assertEquals(c.remove(), 0)
c.close()
# Increase the counter so that later backups have unique ids.
- self.counter += 1
+ self.bkup_id += 1
#
# This function will add records to the table (table:main), take incremental/full backups and
@@ -202,15 +85,16 @@ class test_backup14(backup_base):
self.initial_backup = True
self.add_data(self.uri, self.bigkey, self.bigval)
- self.take_full_backup()
+ self.take_full_backup(self.home_incr)
self.initial_backup = False
self.session.checkpoint()
self.add_data(self.uri, self.bigkey, self.bigval)
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter))
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
#
# This function will remove all the records from table (table:main), take backup and validate the
@@ -218,9 +102,10 @@ class test_backup14(backup_base):
#
def remove_all_records_validate(self):
self.remove_data()
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter))
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
#
# This function will drop the existing table uri (table:main) that is part of the backups and
@@ -231,18 +116,19 @@ class test_backup14(backup_base):
# Drop main table.
self.session.drop(self.uri)
- # Create uri2 (table:extra)
+ # Create uri2 (table:extra).
self.session.create(self.uri2, "key_format=S,value_format=S")
self.new_table = True
self.add_data(self.uri2, self.bigkey, self.bigval)
- self.take_incr_backup()
+ self.take_incr_backup(self.home_incr)
table_list = 'tablelist.txt'
# Assert if the dropped table (table:main) exists in the incremental folder.
self.runWt(['-R', '-h', self.home, 'list'], outfilename=table_list)
ret = os.system("grep " + self.uri + " " + table_list)
self.assertNotEqual(ret, 0, self.uri + " dropped, but table exists in " + self.home)
+ self.setup_directories(self.home_incr, self.home_full)
#
# This function will create previously dropped table uri (table:main) and add different content to
@@ -251,9 +137,10 @@ class test_backup14(backup_base):
def create_dropped_table_add_new_content(self):
self.session.create(self.uri, "key_format=S,value_format=S")
self.add_data(self.uri, self.bigkey, self.bigval)
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter))
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
#
# This function will insert bulk data in logged and not-logged table, take backups and validate the
@@ -266,25 +153,27 @@ class test_backup14(backup_base):
self.session.create(self.uri_logged, "key_format=S,value_format=S")
self.add_data(self.uri_logged, self.bigkey, self.bigval)
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri_logged, self.home_full, self.home_incr, str(self.counter))
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri_logged, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
#
# Insert bulk data into uri4 (table:not_logged_table).
#
self.session.create(self.uri_not_logged, "key_format=S,value_format=S,log=(enabled=false)")
self.add_data(self.uri_not_logged, self.bigkey, self.bigval)
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri_not_logged, self.home_full, self.home_incr, str(self.counter))
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri_not_logged, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
def test_backup14(self):
os.mkdir(self.bkp_home)
self.home = self.bkp_home
self.session.create(self.uri, "key_format=S,value_format=S")
- self.setup_directories(self.max_iteration, self.home_incr, self.home_full, self.logpath)
+ self.setup_directories(self.home_incr, self.home_full)
self.pr('*** Add data, checkpoint, take backups and validate ***')
self.add_data_validate_backups()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup15.py b/src/third_party/wiredtiger/test/suite/test_backup15.py
index 669618c151c..2cbb36d3a1b 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup15.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup15.py
@@ -37,7 +37,7 @@ import glob
# Test cursor backup with a block-based incremental cursor.
class test_backup15(backup_base):
bkp_home = "WT_BLOCK"
- counter=0
+ bkup_id=0
conn_config='cache_size=1G,log=(enabled,file_max=100K)'
logmax="100K"
max_iteration=5
@@ -53,153 +53,12 @@ class test_backup15(backup_base):
logpath = "logpath"
new_table=False
- initial_backup=False
pfx = 'test_backup'
# Set the key and value big enough that we modify a few blocks.
bigkey = 'Key' * 100
bigval = 'Value' * 100
- def range_copy(self, filename, offset, size):
- read_from = filename
- old_to = self.home_incr + '.' + str(self.counter - 1) + '/' + filename
- write_to = self.home_incr + '.' + str(self.counter) + '/' + filename
- rfp = open(read_from, "r+b")
- self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size))
- rfp2 = open(old_to, "r+b")
- rfp.seek(offset, 0)
- rfp2.seek(offset, 0)
- buf = rfp.read(size)
- buf2 = rfp2.read(size)
- # This assertion tests that the offset range we're given actually changed
- # from the previous backup.
- self.assertNotEqual(buf, buf2)
- wfp = open(write_to, "w+b")
- wfp.seek(offset, 0)
- wfp.write(buf)
- rfp.close()
- rfp2.close()
- wfp.close()
-
- def take_full_backup(self):
- if self.counter != 0:
- hdir = self.home_full + '.' + str(self.counter)
- else:
- hdir = self.home_incr
-
- #
- # First time through we take a full backup into the incremental directories. Otherwise only
- # into the appropriate full directory.
- #
- buf = None
- if self.initial_backup == True:
- buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)'
-
- bkup_c = self.session.open_cursor('backup:', None, buf)
- # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
-
- if self.counter == 0:
- # Take a full backup into each incremental directory
- for i in range(0, self.max_iteration):
- copy_from = newfile
- # If it is a log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath
- else:
- copy_to = self.home_incr + '.' + str(i)
- shutil.copy(copy_from, copy_to)
- else:
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = hdir + '/' + self.logpath
- else:
- copy_to = hdir
-
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
-
- def take_incr_backup(self):
- self.assertTrue(self.counter > 0)
- # Open the backup data source for incremental backup.
- buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '",this_id="ID' + str(self.counter) + '")'
- self.pr(buf)
- bkup_c = self.session.open_cursor('backup:', None, buf)
-
- # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- h = self.home_incr + '.0'
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
-
- shutil.copy(copy_from, copy_to)
- first = True
- config = 'incremental=(file=' + newfile + ')'
- dup_cnt = 0
- # For each file listed, open a duplicate backup cursor and copy the blocks.
- incr_c = self.session.open_cursor(None, bkup_c, config)
-
- # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = incr_c.next()
- if ret != 0:
- break
- incrlist = incr_c.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE)
- if curtype == wiredtiger.WT_BACKUP_FILE:
- # Copy the whole file.
- if first == True:
- h = self.home_incr + '.' + str(self.counter)
- first = False
-
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- else:
- # Copy the block range.
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- self.range_copy(newfile, offset, size)
- dup_cnt += 1
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- incr_c.close()
-
- # For each file, we want to copy it into each of the later incremental directories.
- for i in range(self.counter, self.max_iteration):
- h = self.home_incr + '.' + str(i)
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
#
# Add data to the given uri.
#
@@ -228,23 +87,22 @@ class test_backup15(backup_base):
self.mult += 1
# Increase the counter so that later backups have unique ids.
if self.initial_backup == False:
- self.counter += 1
+ self.bkup_id += 1
def test_backup15(self):
os.mkdir(self.bkp_home)
self.home = self.bkp_home
self.session.create(self.uri, "key_format=S,value_format=S")
- self.setup_directories(self.max_iteration, self.home_incr, self.home_full, self.logpath)
+ self.setup_directories(self.home_incr, self.home_full)
self.pr('*** Add data, checkpoint, take backups and validate ***')
self.pr('Adding initial data')
self.initial_backup = True
self.add_complex_data(self.uri)
- self.take_full_backup()
+ self.take_full_backup(self.home_incr)
self.initial_backup = False
self.session.checkpoint()
-
# Each call now to take a full backup will make a copy into a full directory. Then
# each incremental will take an incremental backup and we can compare them.
for i in range(1, self.max_iteration):
@@ -253,12 +111,13 @@ class test_backup15(backup_base):
# Swap the order of the full and incremental backups. It should not matter. They
# should not interfere with each other.
if i % 2 == 0:
- self.take_full_backup()
- self.take_incr_backup()
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
else:
- self.take_incr_backup()
- self.take_full_backup()
- self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter))
+ self.take_incr_backup(self.home_incr)
+ self.take_full_backup(self.home_full)
+ self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id))
+ self.setup_directories(self.home_incr, self.home_full)
if __name__ == '__main__':
wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup16.py b/src/third_party/wiredtiger/test/suite/test_backup16.py
index a589cbd9691..1d3395e91fc 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup16.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup16.py
@@ -38,7 +38,6 @@ from wtscenario import make_scenarios
class test_backup16(backup_base):
conn_config='cache_size=1G,log=(enabled,file_max=100K)'
- counter=1
logmax='100K'
# Define the table name and its on-disk file name together.
@@ -61,14 +60,14 @@ class test_backup16(backup_base):
bigval = 'Value' * 10
mult = 1
- counter = 1
+ bkup_id = 1
nops = 10
-
+ initial_backup = True
def verify_incr_backup(self, expected_file_list):
- bkup_config = ('incremental=(src_id="ID' + str(self.counter - 1) +
- '",this_id="ID' + str(self.counter) + '")')
+ bkup_config = ('incremental=(src_id="ID' + str(self.bkup_id - 1) +
+ '",this_id="ID' + str(self.bkup_id) + '")')
bkup_cur = self.session.open_cursor('backup:', None, bkup_config)
- self.counter += 1
+ self.bkup_id += 1
num_files = 0
# Verify the files included in the incremental backup are the ones we expect.
diff --git a/src/third_party/wiredtiger/test/suite/test_backup17.py b/src/third_party/wiredtiger/test/suite/test_backup17.py
index 04bff3ea9d8..3639bbef58e 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup17.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup17.py
@@ -52,60 +52,21 @@ class test_backup17(backup_base):
nops = 1000
- def take_incr_backup(self, id, consolidate):
- # Open the backup data source for incremental backup.
- buf = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"'
- if consolidate:
- buf += ',consolidate=true'
- buf += ')'
- bkup_c = self.session.open_cursor('backup:', None, buf)
- lens = []
+ #
+ # With a file length list, and the consolidate option is used, we expect the incremental
+ # backup to collapse adjacent blocks and return us lengths that exceed the granularity setting
+ # and verify that we see multiple blocks. If consolidate is not used, no block lengths should
+ # ever be greater than the granularity setting.
+ #
+ def check_consolidate_sizes(self, file_lens, consolidate):
saw_multiple = False
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- config = 'incremental=(file=' + newfile + ')'
- self.pr('Open incremental cursor with ' + config)
- dup_cnt = 0
- dupc = self.session.open_cursor(None, bkup_c, config)
- while True:
- ret = dupc.next()
- if ret != 0:
- break
- incrlist = dupc.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- # 1 is WT_BACKUP_FILE
- # 2 is WT_BACKUP_RANGE
- self.assertTrue(curtype == 1 or curtype == 2)
- if curtype == 1:
- self.pr('Copy from: ' + newfile + ' (' + str(size) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- else:
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- lens.append(size)
- rfp = open(newfile, "r+b")
- wfp = open(self.dir + '/' + newfile, "w+b")
- rfp.seek(offset, 0)
- wfp.seek(offset, 0)
- if size > self.granval:
- saw_multiple = True
- buf = rfp.read(size)
- wfp.write(buf)
- rfp.close()
- wfp.close()
- dup_cnt += 1
- dupc.close()
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
+ for size in file_lens:
+ if size > self.granval:
+ saw_multiple = True
if consolidate:
self.assertTrue(saw_multiple)
else:
self.assertFalse(saw_multiple)
- return lens
def test_backup17(self):
@@ -115,25 +76,15 @@ class test_backup17(backup_base):
self.mult = 0
self.add_data(self.uri2, self.bigkey, self.bigval, True)
+ os.mkdir(self.dir)
# Open up the backup cursor. This causes a new log file to be created.
# That log file is not part of the list returned. This is a full backup
# primary cursor with incremental configured.
- os.mkdir(self.dir)
config = 'incremental=(enabled,granularity=%s,this_id="ID1")' % self.gran
bkup_c = self.session.open_cursor('backup:', None, config)
- # Now copy the files returned by the backup cursor.
- all_files = []
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- sz = os.path.getsize(newfile)
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
- all_files.append(newfile)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ # Now make a full backup and track the log files.
+ self.take_full_backup(self.dir, bkup_c)
bkup_c.close()
# This is the main part of the test for consolidate. Add data to the first table.
@@ -143,12 +94,16 @@ class test_backup17(backup_base):
self.mult = 1
self.add_data(self.uri, self.bigkey, self.bigval, True)
- uri1_lens = self.take_incr_backup(2, False)
+ # Do an incremental backup with id 2.
+ (_, uri1_lens) = self.take_incr_backup(self.dir, 2, False)
+ self.check_consolidate_sizes(uri1_lens, False)
self.mult = 1
self.add_data(self.uri2, self.bigkey, self.bigval, True)
- uri2_lens = self.take_incr_backup(3, True)
+ # Now do an incremental backup with id 3.
+ (_, uri2_lens) = self.take_incr_backup(self.dir, 3, True)
+ self.check_consolidate_sizes(uri2_lens, True)
# Assert that we recorded fewer lengths on the consolidated backup.
self.assertLess(len(uri2_lens), len(uri1_lens))
diff --git a/src/third_party/wiredtiger/test/suite/test_backup19.py b/src/third_party/wiredtiger/test/suite/test_backup19.py
index c94bf381790..20b43f39ea7 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup19.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup19.py
@@ -37,7 +37,7 @@ import glob
# Test cursor backup with a block-based incremental cursor source id only.
class test_backup19(backup_base):
bkp_home = "WT_BLOCK"
- counter=0
+ bkup_id=0
conn_config='cache_size=1G,log=(enabled,file_max=100K)'
logmax="100K"
mult=0
@@ -45,6 +45,7 @@ class test_backup19(backup_base):
savefirst=0
savekey='NOTSET'
uri="table:main"
+ max_iteration=2
dir='backup.dir' # Backup directory name
home_full = "WT_BLOCK_LOG_FULL"
@@ -52,154 +53,12 @@ class test_backup19(backup_base):
logpath = "logpath"
new_table=False
- initial_backup=False
pfx = 'test_backup'
# Set the key and value big enough that we modify a few blocks.
bigkey = 'Key' * 100
bigval = 'Value' * 100
- def range_copy(self, filename, offset, size):
- read_from = filename
- old_to = self.home_incr + '.' + str(self.counter - 1) + '/' + filename
- write_to = self.home_incr + '.' + str(self.counter) + '/' + filename
- rfp = open(read_from, "r+b")
- self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size))
- rfp2 = open(old_to, "r+b")
- rfp.seek(offset, 0)
- rfp2.seek(offset, 0)
- buf = rfp.read(size)
- buf2 = rfp2.read(size)
- # This assertion tests that the offset range we're given actually changed
- # from the previous backup.
- self.assertNotEqual(buf, buf2)
- wfp = open(write_to, "w+b")
- wfp.seek(offset, 0)
- wfp.write(buf)
- rfp.close()
- rfp2.close()
- wfp.close()
-
- def take_full_backup(self):
- if self.counter != 0:
- hdir = self.home_full + '.' + str(self.counter)
- else:
- hdir = self.home_incr
-
- #
- # First time through we take a full backup into the incremental directories. Otherwise only
- # into the appropriate full directory.
- #
- buf = None
- if self.initial_backup == True:
- buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)'
-
- bkup_c = self.session.open_cursor('backup:', None, buf)
- # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
-
- if self.counter == 0:
- # Take a full backup into each incremental directory
- for i in range(0, 2):
- copy_from = newfile
- # If it is a log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath
- else:
- copy_to = self.home_incr + '.' + str(i)
- shutil.copy(copy_from, copy_to)
- else:
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = hdir + '/' + self.logpath
- else:
- copy_to = hdir
-
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
-
- def take_incr_backup(self):
- self.assertTrue(self.counter > 0)
- # Open the backup data source for incremental backup.
- buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '")'
- self.pr(buf)
- bkup_c = self.session.open_cursor('backup:', None, buf)
-
- # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = bkup_c.next()
- if ret != 0:
- break
- newfile = bkup_c.get_key()
- h = self.home_incr + '.0'
- copy_from = newfile
- # If it is log file, prepend the path.
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
-
- shutil.copy(copy_from, copy_to)
- first = True
- config = 'incremental=(file=' + newfile + ')'
- dup_cnt = 0
- # For each file listed, open a duplicate backup cursor and copy the blocks.
- incr_c = self.session.open_cursor(None, bkup_c, config)
-
- # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have
- # values and adding in get_values returns ENOTSUP and causes the usage to fail.
- # If that changes then this, and the use of the duplicate below can change.
- while True:
- ret = incr_c.next()
- if ret != 0:
- break
- incrlist = incr_c.get_keys()
- offset = incrlist[0]
- size = incrlist[1]
- curtype = incrlist[2]
- self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE)
- if curtype == wiredtiger.WT_BACKUP_FILE:
- # Copy the whole file.
- if first == True:
- h = self.home_incr + '.' + str(self.counter)
- first = False
-
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- else:
- # Copy the block range.
- self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
- self.range_copy(newfile, offset, size)
- dup_cnt += 1
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- incr_c.close()
-
- # For each file, we want to copy it into each of the later incremental directories.
- for i in range(self.counter, 2):
- h = self.home_incr + '.' + str(i)
- copy_from = newfile
- if ("WiredTigerLog" in newfile):
- copy_to = h + '/' + self.logpath
- else:
- copy_to = h
- shutil.copy(copy_from, copy_to)
- self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
- bkup_c.close()
-
#
# Add data to the given uri.
#
@@ -228,27 +87,28 @@ class test_backup19(backup_base):
self.mult += 1
# Increase the counter so that later backups have unique ids.
if self.initial_backup == False:
- self.counter += 1
+ self.bkup_id += 1
def test_backup19(self):
os.mkdir(self.bkp_home)
self.home = self.bkp_home
self.session.create(self.uri, "key_format=S,value_format=S")
- self.setup_directories(2, self.home_incr, self.home_full, self.logpath)
+ self.setup_directories(self.home_incr, self.home_full)
self.pr('*** Add data, checkpoint, take backups and validate ***')
self.pr('Adding initial data')
self.initial_backup = True
self.add_complex_data(self.uri)
- self.take_full_backup()
+ self.take_full_backup(self.home_incr)
self.initial_backup = False
self.session.checkpoint()
self.add_complex_data(self.uri)
self.session.checkpoint()
- self.take_full_backup()
- self.take_incr_backup()
- self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter))
+
+ self.take_full_backup(self.home_full)
+ self.take_incr_backup(self.home_incr)
+ self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id))
if __name__ == '__main__':
wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_checkpoint02.py b/src/third_party/wiredtiger/test/suite/test_checkpoint02.py
index c64e1c32596..7777d9b0c80 100755
--- a/src/third_party/wiredtiger/test/suite/test_checkpoint02.py
+++ b/src/third_party/wiredtiger/test/suite/test_checkpoint02.py
@@ -27,7 +27,7 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
-# checkpoint:correctness:checkpoint_data
+# checkpoints:correctness:checkpoint_data
# [END_TAGS]
#
diff --git a/src/third_party/wiredtiger/test/suite/test_checkpoint03.py b/src/third_party/wiredtiger/test/suite/test_checkpoint03.py
index 2f4abdb532e..3cc694fd7a5 100644
--- a/src/third_party/wiredtiger/test/suite/test_checkpoint03.py
+++ b/src/third_party/wiredtiger/test/suite/test_checkpoint03.py
@@ -27,7 +27,7 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
-# checkpoint:correctness:checkpoint_data
+# checkpoints:correctness:checkpoint_data
# [END_TAGS]
#
# test_checkpoint03.py
diff --git a/src/third_party/wiredtiger/test/suite/test_hs15.py b/src/third_party/wiredtiger/test/suite/test_hs15.py
index ffebad7afa9..7690a11a444 100644
--- a/src/third_party/wiredtiger/test/suite/test_hs15.py
+++ b/src/third_party/wiredtiger/test/suite/test_hs15.py
@@ -25,6 +25,11 @@
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
+#
+# [TEST_TAGS]
+# caching_eviction:correctness:written_data
+# [END_TAGS]
+#
import time, wiredtiger, wttest
diff --git a/src/third_party/wiredtiger/test/suite/test_prepare13.py b/src/third_party/wiredtiger/test/suite/test_prepare13.py
new file mode 100644
index 00000000000..e99c3090120
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_prepare13.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# test_prepare13.py
+# Fast-truncate fails when a page contains prepared updates.
+import wiredtiger, wttest
+from wtdataset import simple_key, simple_value
+
+def timestamp_str(t):
+ return '%x' % t
+class test_prepare13(wttest.WiredTigerTestCase):
+ # Force a small cache.
+ conn_config = 'cache_size=10MB,statistics=(all),statistics_log=(json,on_close,wait=1)'
+
+ def test_prepare(self):
+ nrows = 20000
+ # Pin oldest and stable to timestamp 1.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
+ ',stable_timestamp=' + timestamp_str(1))
+
+ # Create a large table with lots of pages.
+ uri = "table:test_prepare13"
+ config = 'allocation_size=512,leaf_page_max=512,key_format=S,value_format=S'
+ self.session.create(uri, config)
+ cursor = self.session.open_cursor(uri)
+ for i in range(1, nrows):
+ cursor[simple_key(cursor, i)] = simple_value(cursor, i)
+ cursor.close()
+
+ # Prepare a record.
+ self.session.begin_transaction()
+ cursor = self.session.open_cursor(uri)
+ cursor[simple_key(cursor, 1000)] = "replacement_value"
+ cursor.close()
+ self.session.prepare_transaction('prepare_timestamp=' + timestamp_str(10))
+
+ try:
+ # Pin oldest and stable to timestamp 10.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
+ ',stable_timestamp=' + timestamp_str(10))
+
+ # Open a separate session and cursor and perform updates to let prepared update to evict.
+ s = self.conn.open_session()
+ cursor = s.open_cursor(uri, None)
+ for i in range(2000, nrows):
+ s.begin_transaction()
+ cursor[simple_key(cursor, i)] = simple_value(cursor, i)
+ s.commit_transaction('commit_timestamp=' + timestamp_str(20))
+ cursor.close()
+
+ # Truncate the middle chunk and expect a conflict.
+ preparemsg = '/conflict with a prepared update/'
+ s.begin_transaction()
+ c1 = s.open_cursor(uri, None)
+ c1.set_key(simple_key(c1, 100))
+ c2 = s.open_cursor(uri, None)
+ c2.set_key(simple_key(c1, nrows))
+ self.assertRaisesException(wiredtiger.WiredTigerError, lambda:s.truncate(None, c1, c2, None), preparemsg)
+ c1.close()
+ c2.close()
+ s.rollback_transaction()
+
+ finally:
+ self.session.timestamp_transaction('commit_timestamp=' + timestamp_str(50))
+ self.session.timestamp_transaction('durable_timestamp=' + timestamp_str(50))
+ self.session.commit_transaction()
+
+ s.close()
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py
index ea88a33a066..948ed1b2a8f 100755
--- a/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py
+++ b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py
@@ -41,6 +41,9 @@ def timestamp_str(t):
def mod_val(value, char, location, nbytes=1):
return value[0:location] + char + value[location+nbytes:]
+def append_val(value, char):
+ return value + char
+
def retry_rollback(self, name, txn_session, code):
retry_limit = 100
retries = 0
@@ -300,5 +303,101 @@ class test_rollback_to_stable14(test_rollback_to_stable_base):
# The test may output the following message in eviction under cache pressure. Ignore that.
self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
+ def test_rollback_to_stable_same_ts_append(self):
+ nrows = 1500
+
+ # Create a table without logging.
+ self.pr("create/populate table")
+ uri = "table:rollback_to_stable14"
+ ds = SimpleDataSet(
+ self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)')
+ ds.populate()
+
+ # Pin oldest and stable to timestamp 10.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
+ ',stable_timestamp=' + timestamp_str(10))
+
+ value_a = "aaaaa" * 100
+
+ value_modQ = append_val(value_a, 'Q')
+ value_modR = append_val(value_modQ, 'R')
+ value_modS = append_val(value_modR, 'S')
+ value_modT = append_val(value_modS, 'T')
+
+ # Perform a combination of modifies and updates.
+ self.pr("large updates and modifies")
+ self.large_updates(uri, value_a, ds, nrows, 20)
+ self.large_modifies(uri, 'Q', ds, len(value_a), 1, nrows, 30)
+ # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
+ if self.prepare:
+ self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, 51)
+ self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, 55)
+ self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, 60)
+ else:
+ self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, 60)
+ self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, 60)
+ self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, 60)
+
+ # Verify data is visible and correct.
+ self.check(value_a, uri, nrows, 20)
+ self.check(value_modQ, uri, nrows, 30)
+ self.check(value_modT, uri, nrows, 60)
+
+ self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))
+
+ # Create a checkpoint thread
+ done = threading.Event()
+ ckpt = checkpoint_thread(self.conn, done)
+ try:
+ self.pr("start checkpoint")
+ ckpt.start()
+
+ # Perform several modifies in parallel with checkpoint.
+ # Rollbacks may occur when checkpoint is running, so retry as needed.
+ self.pr("modifies")
+ retry_rollback(self, 'modify ds1, W', None,
+ lambda: self.large_modifies(uri, 'W', ds, len(value_modT), 1, nrows, 70))
+ retry_rollback(self, 'modify ds1, X', None,
+ lambda: self.large_modifies(uri, 'X', ds, len(value_modT) + 1, 1, nrows, 80))
+ retry_rollback(self, 'modify ds1, Y', None,
+ lambda: self.large_modifies(uri, 'Y', ds, len(value_modT) + 2, 1, nrows, 90))
+ retry_rollback(self, 'modify ds1, Z', None,
+ lambda: self.large_modifies(uri, 'Z', ds, len(value_modT) + 3, 1, nrows, 100))
+ finally:
+ done.set()
+ ckpt.join()
+
+ # Simulate a server crash and restart.
+ self.pr("restart")
+ self.simulate_crash_restart(".", "RESTART")
+ self.pr("restart complete")
+
+ stat_cursor = self.session.open_cursor('statistics:', None, None)
+ calls = stat_cursor[stat.conn.txn_rts][2]
+ hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
+ hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
+ hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
+ keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
+ keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
+ pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
+ upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
+ stat_cursor.close()
+
+ self.assertEqual(calls, 0)
+ self.assertEqual(keys_removed, 0)
+ self.assertEqual(hs_restore_updates, nrows)
+ self.assertEqual(keys_restored, 0)
+ self.assertEqual(upd_aborted, 0)
+ self.assertGreater(pages_visited, 0)
+ self.assertGreaterEqual(hs_removed, nrows * 3)
+ self.assertGreaterEqual(hs_sweep, 0)
+
+ # Check that the correct data is seen at and after the stable timestamp.
+ self.check(value_a, uri, nrows, 20)
+ self.check(value_modQ, uri, nrows, 30)
+
+ # The test may output the following message in eviction under cache pressure. Ignore that.
+ self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
+
if __name__ == '__main__':
wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_rollback_to_stable15.py b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable15.py
new file mode 100644
index 00000000000..9fbb9bf552a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable15.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+from helper import copy_wiredtiger_home
+import wiredtiger, wttest, unittest
+from wiredtiger import stat
+from wtdataset import SimpleDataSet
+from wtscenario import make_scenarios
+
+def timestamp_str(t):
+ return '%x' % t
+
+# test_rollback_to_stable15.py
+# Test that roll back to stable handles updates present in the
+# update-list for both fixed length and variable length column store.
+# Eviction is set to false, so that everything persists in memory.
+class test_rollback_to_stable15(wttest.WiredTigerTestCase):
+ conn_config = 'cache_size=200MB,statistics=(all),debug_mode=(eviction=false)'
+ session_config = 'isolation=snapshot'
+ key_format_values = [
+ ('column', dict(key_format='r')),
+ ('integer', dict(key_format='i')),
+ ]
+ value_format_values = [
+ # Fixed length
+ ('fixed', dict(value_format='8t')),
+ # Variable length
+ ('variable', dict(value_format='i')),
+ ]
+ scenarios = make_scenarios(key_format_values, value_format_values)
+
+ def check(self, check_value, uri, nrows, read_ts):
+ session = self.session
+ if read_ts == 0:
+ session.begin_transaction()
+ else:
+ session.begin_transaction('read_timestamp=' + timestamp_str(read_ts))
+ cursor = session.open_cursor(uri)
+ count = 0
+ for k, v in cursor:
+ self.assertEqual(v, check_value)
+ count += 1
+ session.commit_transaction()
+ self.assertEqual(count, nrows)
+
+ def test_rollback_to_stable(self):
+ # Create a table.
+ uri = "table:rollback_to_stable15"
+ nrows = 2000
+ create_params = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
+ self.session.create(uri, create_params)
+ cursor = self.session.open_cursor(uri)
+
+ # Pin oldest and stable to timestamp 1.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
+ ',stable_timestamp=' + timestamp_str(1))
+
+ value20 = 0x20
+ value30 = 0x30
+ value30 = 0x40
+ value40 = 0x50
+
+ #Insert value20 at timestamp 2
+ for i in range(1, nrows):
+ self.session.begin_transaction()
+ cursor[i] = value20
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(2))
+
+ #First Update to value 30 at timestamp 5
+ for i in range(1, nrows):
+ self.session.begin_transaction()
+ cursor[i] = value30
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(5))
+
+ #Set stable timestamp to 2
+ self.conn.set_timestamp('stable_timestamp=' + timestamp_str(2))
+ self.conn.rollback_to_stable()
+ # Check that only value20 is available
+ self.check(value20, uri, nrows - 1, 2)
+
+ #Second Update to value30 at timestamp 7
+ for i in range(1, nrows):
+ self.session.begin_transaction()
+ cursor[i] = value30
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(7))
+
+ #Third Update to value40 at timestamp 9
+ for i in range(1, nrows):
+ self.session.begin_transaction()
+ cursor[i] = value40
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(9))
+
+ #Set stable timestamp to 7
+ self.conn.set_timestamp('stable_timestamp=' + timestamp_str(7))
+ self.conn.rollback_to_stable()
+ #Check that only value30 is available
+ self.check(value30, uri, nrows - 1, 7)
+
+ stat_cursor = self.session.open_cursor('statistics:', None, None)
+ calls = stat_cursor[stat.conn.txn_rts][2]
+ upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
+ stat_cursor.close()
+ self.assertEqual(upd_aborted, (nrows*2) - 2)
+ self.assertEqual(calls, 2)
+
+ self.session.close()
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_txn08.py b/src/third_party/wiredtiger/test/suite/test_txn08.py
index 0f82f079867..f750f0d8dae 100644
--- a/src/third_party/wiredtiger/test/suite/test_txn08.py
+++ b/src/third_party/wiredtiger/test/suite/test_txn08.py
@@ -64,10 +64,10 @@ class test_txn08(wttest.WiredTigerTestCase, suite_subprocess):
#
# Run printlog and make sure it exits with zero status.
#
- self.runWt(['printlog'], outfilename='printlog.out')
+ self.runWt(['printlog', '-u'], outfilename='printlog.out')
self.check_file_contains('printlog.out',
'\\u0001\\u0002abcd\\u0003\\u0004')
- self.runWt(['printlog', '-x'], outfilename='printlog-hex.out')
+ self.runWt(['printlog', '-u','-x'], outfilename='printlog-hex.out')
self.check_file_contains('printlog-hex.out',
'\\u0001\\u0002abcd\\u0003\\u0004')
self.check_file_contains('printlog-hex.out',
diff --git a/src/third_party/wiredtiger/test/suite/test_txn24.py b/src/third_party/wiredtiger/test/suite/test_txn24.py
index 411fd5b2867..3acc8934994 100644
--- a/src/third_party/wiredtiger/test/suite/test_txn24.py
+++ b/src/third_party/wiredtiger/test/suite/test_txn24.py
@@ -72,22 +72,16 @@ class test_txn24(wttest.WiredTigerTestCase):
session2 = self.setUpSessionOpen(self.conn)
cursor2 = session2.open_cursor(uri)
start_row = int(n_rows/4)
- for i in range(0, 120):
- session2.begin_transaction('isolation=snapshot')
- for j in range(0,1000):
- cursor2[start_row] = new_val
- start_row += 1
- session2.commit_transaction()
+ for i in range(0, 120000):
+ cursor2[start_row] = new_val
+ start_row += 1
session3 = self.setUpSessionOpen(self.conn)
cursor3 = session3.open_cursor(uri)
start_row = int(n_rows/2)
- for i in range(0, 120):
- session3.begin_transaction('isolation=snapshot')
- for j in range(0,1000):
- cursor3[start_row] = new_val
- start_row += 1
- session3.commit_transaction()
+ for i in range(0, 120000):
+ cursor3[start_row] = new_val
+ start_row += 1
# At this point in time, we have made roughly 90% cache dirty. If we are not using
# snaphsots for eviction threads, the cache state will remain like this forever and we may
@@ -101,12 +95,9 @@ class test_txn24(wttest.WiredTigerTestCase):
session4 = self.setUpSessionOpen(self.conn)
cursor4 = session4.open_cursor(uri)
start_row = 1
- for i in range(0, 120):
- session4.begin_transaction('isolation=snapshot')
- for j in range(0,1000):
- cursor4[start_row] = new_val
- start_row += 1
- session4.commit_transaction()
+ for i in range(0, 120000):
+ cursor4[start_row] = new_val
+ start_row += 1
# If we have done all operations error free so far, eviction threads have been successful.
diff --git a/src/third_party/wiredtiger/test/suite/test_util18.py b/src/third_party/wiredtiger/test/suite/test_util18.py
new file mode 100644
index 00000000000..c66bfca13b0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_util18.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2021 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import codecs, filecmp
+from suite_subprocess import suite_subprocess
+import wiredtiger, wttest
+from wtscenario import make_scenarios
+
+# test_util18.py
+# Utilities: wt printlog
+class test_util18(wttest.WiredTigerTestCase, suite_subprocess):
+ tablename = 'test_util18.a'
+ uri = 'table:' + tablename
+ logmax = 100
+ nentries = 5
+ create_params = 'key_format=S,value_format=S'
+ key_prefix = 'KEY'
+ val_prefix = 'VAL'
+
+ # Whether user data is redacted or printed.
+ print_user_data = [
+ ('show_user_data', dict(print_user_data=True)),
+ ('no_user_data', dict(print_user_data=False)),
+ ]
+
+ scenarios = make_scenarios(print_user_data)
+
+ def conn_config(self):
+ return 'log=(archive=false,enabled,file_max=%dK)' % self.logmax
+
+ # Populate our test table with data we can check against in the printlog output.
+ def populate(self):
+ cursor = self.session.open_cursor(self.uri, None)
+ for i in range(0, self.nentries):
+ key = self.key_prefix + str(i)
+ val = self.val_prefix + str(i)
+ cursor[key] = val
+ cursor.close()
+
+ # Check the given printlog file reflects the data written by 'populate'.
+ def check_populated_printlog(self, log_file, expect_keyval, expect_keyval_hex):
+ for i in range(0, self.nentries):
+ key = self.key_prefix + str(i)
+ val = self.val_prefix + str(i)
+ # Check if the KEY/VAL commits exist in the log file.
+ if expect_keyval:
+ self.check_file_contains(log_file, '"key": "%s\\u0000"' % key)
+ self.check_file_contains(log_file, '"value": "%s\\u0000"' % val)
+ else:
+ self.check_file_not_contains(log_file, '"key": "%s\\u0000"' % key)
+ self.check_file_not_contains(log_file, '"value": "%s\\u0000"' % val)
+
+ # Convert our KEY/VAL strings to their expected hex value.
+ hex_key = codecs.encode(key.encode(), 'hex')
+ val_key = codecs.encode(val.encode(), 'hex')
+ # Check if the KEY/VAL commits exist in the log file (in hex form).
+ if expect_keyval_hex:
+ self.check_file_contains(log_file, '"key-hex": "%s00"' % str(hex_key, 'ascii'))
+ self.check_file_contains(log_file, '"value-hex": "%s00"' % str(val_key, 'ascii'))
+ else:
+ self.check_file_not_contains(log_file, '"key-hex": "%s00"' % str(hex_key, 'ascii'))
+ self.check_file_not_contains(log_file, '"value-hex": "%s00"' % str(val_key, 'ascii'))
+
+ def test_printlog_file(self):
+ """
+ Run printlog on a populated table.
+ """
+ self.session.create('table:' + self.tablename, self.create_params)
+ self.populate()
+ wt_args = ["printlog"]
+ # Append "-u" if we expect printlog to print user data.
+ if self.print_user_data:
+ wt_args.append("-u")
+ self.runWt(wt_args, outfilename='printlog.out')
+ self.check_non_empty_file('printlog.out')
+ self.check_populated_printlog('printlog.out', self.print_user_data, False)
+
+ def test_printlog_hex_file(self):
+ """
+ Run printlog with hexadecimal formatting on a populated table.
+ """
+ self.session.create('table:' + self.tablename, self.create_params)
+ self.populate()
+ wt_args = ["printlog", "-x"]
+ # Append "-u" if we expect printlog to print user data.
+ if self.print_user_data:
+ wt_args.append("-u")
+ self.runWt(wt_args, outfilename='printlog-hex.out')
+ self.check_non_empty_file('printlog-hex.out')
+ self.check_populated_printlog('printlog-hex.out', self.print_user_data, self.print_user_data)
+
+ def test_printlog_message(self):
+ """
+ Run printlog with messages-only formatting on a populated table.
+ """
+ self.session.create('table:' + self.tablename, self.create_params)
+ self.populate()
+ # Write a log message that we can specifically test the presence of.
+ log_message = "Test Message: %s" % self.tablename
+ self.session.log_printf(log_message)
+ wt_args = ["printlog", "-m"]
+ # Append "-u" if we expect printlog to print user data.
+ if self.print_user_data:
+ wt_args.append("-u")
+ self.runWt(wt_args, outfilename='printlog-message.out')
+ self.check_non_empty_file('printlog-message.out')
+ self.check_file_contains('printlog-message.out', log_message)
+ self.check_populated_printlog('printlog-message.out', False, False)
+
+ def test_printlog_lsn_offset(self):
+ """
+ Run printlog with an LSN offset provided.
+ """
+ self.session.create('table:' + self.tablename, self.create_params)
+ self.populate()
+
+ # Open a log cursor to accurately extract the first, second and last LSN from our
+ # log.
+ c = self.session.open_cursor("log:", None, None)
+ # Moving the cursor to the beginning of the file, extract our first LSN.
+ c.next()
+ first_lsn_keys = c.get_key()
+ # Moving the cursor, extract our second LSN.
+ c.next()
+ second_lsn_keys = c.get_key()
+ last_lsn_keys = []
+ # Moving the cursor to the last available key, extract the last LSN value.
+ while c.next() == 0:
+ last_lsn_keys = c.get_key()
+ c.next()
+ c.close()
+
+ # Construct the first, second and last LSN values, assuming the
+ # key elements follow the following sequence: [lsn.file, lsn.offset, opcount].
+ first_lsn = '%s,%s' % (first_lsn_keys[0], first_lsn_keys[1])
+ second_lsn = '%s,%s' % (second_lsn_keys[0], second_lsn_keys[1])
+ last_lsn = '%s,%s' % (last_lsn_keys[0], last_lsn_keys[1])
+
+ # Test printlog on a bounded range that starts and ends on our first LSN record. In doing so we want
+ # to assert that other log records won't be printed e.g. the second LSN record.
+ wt_args = ["printlog", '-l %s,%s' % (first_lsn, first_lsn)]
+ self.runWt(wt_args, outfilename='printlog-lsn-offset.out')
+ self.check_file_contains('printlog-lsn-offset.out', '"lsn" : [%s]' % first_lsn)
+ self.check_file_not_contains('printlog-lsn-offset.out', '"lsn" : [%s]' % second_lsn)
+ self.check_populated_printlog('printlog-lsn-offset.out', False, False)
+
+ # Test printlog from the starting LSN value to the end of the log. We expect to find the logs relating
+ # to the population of our table.
+ wt_args = ["printlog", '-l %s' % first_lsn]
+ # Append "-u" if we expect printlog to print user data.
+ if self.print_user_data:
+ wt_args.append("-u")
+ self.runWt(wt_args, outfilename='printlog-lsn-offset.out')
+ self.check_populated_printlog('printlog-lsn-offset.out', self.print_user_data, False)
+
+ # Test that using LSN '1,0' and our first LSN value produce the same output when passed to printlog.
+ # We expect printing from LSN '1,0' (which should denote to the beginning of the first log file)
+ # is equivalent to printing from our first extracted LSN value to the last LSN value.
+ wt_args_beginning = ["printlog", '-l 1,0,%s' % last_lsn]
+ wt_args_first = ["printlog", '-l %s,%s' % (first_lsn, last_lsn)]
+ if self.print_user_data:
+ wt_args_beginning.append("-u")
+ wt_args_first.append("-u")
+ self.runWt(wt_args_beginning, outfilename='printlog-lsn-offset-beginning.out')
+ self.runWt(wt_args_first, outfilename='printlog-lsn-offset-first.out')
+ self.assertTrue(filecmp.cmp('printlog-lsn-offset-beginning.out', 'printlog-lsn-offset-first.out'))
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_util19.py b/src/third_party/wiredtiger/test/suite/test_util19.py
new file mode 100644
index 00000000000..412e0eaeda4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_util19.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2021 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+from suite_subprocess import suite_subprocess
+import wiredtiger, wttest
+from wtscenario import make_scenarios
+
+# test_util19.py
+# Utilities: wt downgrade
+class test_util19(wttest.WiredTigerTestCase, suite_subprocess):
+ tablename = 'test_util19.a'
+ uri = 'table:' + tablename
+ entries = 100
+ log_max = "100K"
+ log_latest_compat = 5
+
+ create_release = [
+ ('def', dict(create_rel='none')),
+ ('100', dict(create_rel="10.0")),
+ ('33', dict(create_rel="3.3")),
+ ('32', dict(create_rel="3.2")),
+ ('31', dict(create_rel="3.1")),
+ ('30', dict(create_rel="3.0")),
+ ('26', dict(create_rel="2.6")),
+ ]
+
+ downgrade_release = [
+ ('100_rel', dict(downgrade_rel="10.0", log_downgrade_compat=5)),
+ ('33_rel', dict(downgrade_rel="3.3", log_downgrade_compat=4)),
+ ('32_rel', dict(downgrade_rel="3.2", log_downgrade_compat=3)),
+ ('31_rel', dict(downgrade_rel="3.1", log_downgrade_compat=3)),
+ ('30_rel', dict(downgrade_rel="3.0", log_downgrade_compat=2)),
+ ('26_rel', dict(downgrade_rel="2.6", log_downgrade_compat=1)),
+ ]
+
+ scenarios = make_scenarios(create_release, downgrade_release)
+
+ def conn_config(self):
+ conf_str = 'log=(archive=false,enabled,file_max=%s),' % self.log_max
+ if (self.create_rel != 'none'):
+ conf_str += 'compatibility=(release="%s"),' % (self.create_rel)
+ return conf_str
+
+ def test_downgrade(self):
+ """
+ Run wt downgrade on our created database and test its new compatibility version.
+ """
+ # Create the initial database at the compatibility level established by
+ # the connection config ('create_rel').
+ self.session.create(self.uri, 'key_format=S,value_format=S')
+ c = self.session.open_cursor(self.uri, None)
+ # Populate the table to generate some log files.
+ for i in range(self.entries):
+ key = 'KEY' + str(i)
+ val = 'VAL' + str(i)
+ c[key] = val
+ c.close()
+
+ # Call the downgrade utility to reconfigure our database with the specified compatibility version.
+ wt_config = 'log=(archive=false,enabled,file_max=%s),verbose=[log]' % self.log_max
+ downgrade_opt = '-V %s' % self.downgrade_rel
+ self.runWt(['-C', wt_config , 'downgrade', downgrade_opt], reopensession=False, outfilename='downgrade.out')
+ # Based on the downgrade version we can test if the corresponding log compatibility version
+ # has been set.
+ compat_str = '/WT_CONNECTION\.reconfigure: .*: COMPATIBILITY: Version now %d/' % self.log_downgrade_compat
+ if self.log_downgrade_compat != self.log_latest_compat:
+ self.check_file_contains('downgrade.out', compat_str)
+ else:
+ self.check_file_not_contains('downgrade.out', compat_str)
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_util20.py b/src/third_party/wiredtiger/test/suite/test_util20.py
new file mode 100644
index 00000000000..851ea504b70
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_util20.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2021 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+from suite_subprocess import suite_subprocess
+import wiredtiger, wttest
+from wtdataset import SimpleDataSet, ComplexDataSet
+
+# test_util20.py
+# Utilities: wt upgrade
+class test_util20(wttest.WiredTigerTestCase, suite_subprocess):
+ name = 'test_util20.a'
+ create_params = 'key_format=S,value_format=S'
+ num_rows = 10
+
+ def test_upgrade_table_complex_data(self):
+ # Run wt upgrade on a complex dataset and test for successful completion.
+ uri = 'table:' + self.name
+ ComplexDataSet(self, uri, self.num_rows).populate()
+ self.runWt(['upgrade', uri])
+
+ def test_upgrade_table_simple_data(self):
+ # Run wt upgrade on a simple dataset and test for successful completion.
+ uri = 'table:' + self.name
+ SimpleDataSet(self, uri, self.num_rows).populate()
+ self.runWt(['upgrade', uri])
diff --git a/src/third_party/wiredtiger/test/suite/wtbackup.py b/src/third_party/wiredtiger/test/suite/wtbackup.py
index f418d11ddff..8ac4dee2ce8 100644
--- a/src/third_party/wiredtiger/test/suite/wtbackup.py
+++ b/src/third_party/wiredtiger/test/suite/wtbackup.py
@@ -25,33 +25,38 @@
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
-import os, glob
+import os, glob, shutil
import wttest, wiredtiger
from suite_subprocess import suite_subprocess
from helper import compare_files
# Shared base class used by backup tests.
class backup_base(wttest.WiredTigerTestCase, suite_subprocess):
- cursor_config = None # a config string for cursors
- mult = 0 # counter to have variance in data
- nops = 100 # number of operations added to uri
-
- # We use counter to produce unique backup names for multiple iterations
- # of incremental backup tests.
- counter = 0
- # To determine whether to increase/decrease counter, which determines
- initial_backup = True
- # Used for populate function
+ data_cursor_config = None # a config string for cursors.
+ mult = 0 # counter to have variance in data.
+ nops = 100 # number of operations added to uri.
+
+ # We use counter to produce unique backup ids for multiple iterations
+ # of incremental backup.
+ bkup_id = 0
+ # Setup some of the backup tests, and increments the backup id.
+ initial_backup = False
+ # Used for populate function.
rows = 100
populate_big = None
+ # Specify a logpath directory to be used to place wiredtiger log files.
+ logpath=''
+ # Temporary directory used to verify consistent data between multiple incremental backups.
+ home_tmp = "WT_TEST_TMP"
+
#
# Add data to the given uri.
# Allows the option for doing a session checkpoint after adding data.
#
def add_data(self, uri, key, val, do_checkpoint=False):
assert(self.nops != 0)
- c = self.session.open_cursor(uri, None, self.cursor_config)
+ c = self.session.open_cursor(uri, None, self.data_cursor_config)
for i in range(0, self.nops):
num = i + (self.mult * self.nops)
k = key + str(num)
@@ -62,7 +67,7 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess):
self.session.checkpoint()
# Increase the counter so that later backups have unique ids.
if not self.initial_backup:
- self.counter += 1
+ self.bkup_id += 1
# Increase the multiplier so that later calls insert unique items.
self.mult += 1
@@ -83,31 +88,36 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess):
cg_config = i[3]
i[1](self, i[0], self.rows, cgconfig = cg_config).populate()
- # Backup needs a checkpoint
+ # Backup needs a checkpoint.
if do_checkpoint:
self.session.checkpoint()
#
- # Set up all the directories needed for the test. We have a full backup directory for each
- # iteration and an incremental backup for each iteration. That way we can compare the full and
- # incremental each time through.
+ # Set up all the directories needed for the test. We have a full backup directory, an incremental backup and
+ # temporary directory. The temp directory is used to hold updated data for incremental backups, and will overwrite
+ # the contents of the incremental directory when this function is called, to setup future backup calls.
+ # That way we can compare the full and incremental backup each time through.
#
- def setup_directories(self, max_iteration, home_incr, home_full, logpath):
- for i in range(0, max_iteration):
- # The log directory is a subdirectory of the home directory,
- # creating that will make the home directory also.
+ # Note: The log directory is a subdirectory of the home directory, creating that will make the home directory also.
+ # The incremental backup function, copies the latest data into the temporary directory.
+ def setup_directories(self, home_incr, home_full):
+ # Create the temp directory, if the path doesn't exist
+ # as we only want to create this directory at the start
+ if not os.path.exists(self.home_tmp):
+ os.makedirs(self.home_tmp + '/' + self.logpath)
- home_incr_dir = home_incr + '.' + str(i)
- if os.path.exists(home_incr_dir):
- os.remove(home_incr_dir)
- os.makedirs(home_incr_dir + '/' + logpath)
+ if os.path.exists(home_full):
+ shutil.rmtree(home_full)
+ os.makedirs(home_full + '/' + self.logpath)
- if i == 0:
- continue
- home_full_dir = home_full + '.' + str(i)
- if os.path.exists(home_full_dir):
- os.remove(home_full_dir)
- os.makedirs(home_full_dir + '/' + logpath)
+ # If the incremental directory exists, then remove the contents of the directory
+ # and place all the contents of temporary directory into the incremental directory
+ # such that the test can now perform further incremental backups on the directory.
+ if os.path.exists(home_incr):
+ shutil.rmtree(home_incr)
+ shutil.copytree(self.home_tmp, self.home_incr)
+ else:
+ os.makedirs(home_incr + '/' + self.logpath)
#
# Check that a URI doesn't exist, both the meta-data and the file names.
@@ -125,16 +135,57 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess):
uri.split(":")[1] + '\" found')
#
+ # Copy a file into given directory.
+ #
+ def copy_file(self, file, dir):
+ copy_from = file
+ # If it is log file, prepend the path.
+ if self.logpath and "WiredTigerLog" in file:
+ copy_to = dir + '/' + self.logpath
+ else:
+ copy_to = dir
+ shutil.copy(copy_from, copy_to)
+
+ #
+ # Uses a backup cursor to perform a full backup, by iterating through the cursor
+ # grabbing files to copy over into a given directory. When dealing with a test
+ # that performs multiple incremental backups, we initially perform a full backup
+ # on each incremental directory as a starting base.
+ # Optional arguments:
+ # backup_cur: A backup cursor that can be given into the function, but function caller
+ # holds reponsibility of closing the cursor.
+ #
+ def take_full_backup(self, backup_dir, backup_cur=None):
+ self.pr('Full backup to ' + backup_dir + ': ')
+ bkup_c = backup_cur
+ if backup_cur == None:
+ config = None
+ if self.initial_backup:
+ config = 'incremental=(granularity=1M,enabled=true,this_id=ID0)'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ all_files = []
+ # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
+ # values and adding in get_values returns ENOTSUP and causes the usage to fail.
+ # If that changes then this, and the use of the duplicate below can change.
+ while bkup_c.next() == 0:
+ newfile = bkup_c.get_key()
+ sz = os.path.getsize(newfile)
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
+ self.copy_file(newfile, backup_dir)
+ all_files.append(newfile)
+ if backup_cur == None:
+ bkup_c.close()
+ return all_files
+
+ #
# Compare against two directory paths using the wt dump command.
- # The suffix allows the option to add distinctive tests adding suffix to both the output files and directories
+ # The suffix allows the option to add distinctive tests adding suffix to the output files.
#
- def compare_backups(self, uri, base_dir_home, other_dir_home, suffix = None):
+ def compare_backups(self, uri, base_dir, other_dir, suffix = None):
sfx = ""
if suffix != None:
sfx = "." + suffix
base_out = "./backup_base" + sfx
- base_dir = base_dir_home + sfx
-
if os.path.exists(base_out):
os.remove(base_out)
@@ -144,6 +195,133 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess):
if os.path.exists(other_out):
os.remove(other_out)
# Run wt dump on incremental backup
- other_dir = other_dir_home + sfx
self.runWt(['-R', '-h', other_dir, 'dump', uri], outfilename=other_out)
+ self.pr("compare_files: " + base_out + ", " + other_out)
self.assertEqual(True, compare_files(self, base_out, other_out))
+
+ #
+ # Perform a block range copy for a given offset and file.
+ #
+ def range_copy(self, filename, offset, size, backup_incr_dir):
+ read_from = filename
+ write_to = backup_incr_dir + '/' + filename
+ rfp = open(read_from, "rb")
+ rfp.seek(offset, 0)
+ buf = rfp.read(size)
+ # Perform between previous incremental directory, to check that
+ # the old file and the new file is different.
+ old_to = self.home_tmp + '/' + filename
+ if os.path.exists(old_to):
+ self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size))
+ old_rfp = open(old_to, "rb")
+ old_rfp.seek(offset, 0)
+ old_buf = old_rfp.read(size)
+ old_rfp.close()
+ # This assertion tests that the offset range we're given actually changed
+ # from the previous backup.
+ self.assertNotEqual(buf, old_buf)
+ wfp = None
+ # Create file if the file doesn't exist.
+ if not os.path.exists(write_to):
+ wfp = open(write_to, "w+b")
+ else:
+ wfp = open(write_to, "r+b")
+ wfp.seek(offset, 0)
+ wfp.write(buf)
+ rfp.close()
+ wfp.close()
+
+ #
+ # With a given backup cursor, open an incremental block cursor to copy the blocks of a
+ # given file. If the type of file is WT_BACKUP_FILE, perform full copy into given directory,
+ # otherwise if type of file is WT_BACKUP_RANGE, perform partial copy of the file using range copy.
+ #
+ # Note: we return the sizes of WT_BACKUP_RANGE type files for tests that check for consolidate config.
+ #
+ def take_incr_backup_block(self, bkup_c, newfile, backup_incr_dir):
+ config = 'incremental=(file=' + newfile + ')'
+ self.pr('Open incremental cursor with ' + config)
+ # For each file listed, open a duplicate backup cursor and copy the blocks.
+ incr_c = self.session.open_cursor(None, bkup_c, config)
+ # For consolidate
+ lens = []
+ # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have
+ # values and adding in get_values returns ENOTSUP and causes the usage to fail.
+ # If that changes then this, and the use of the duplicate below can change.
+ while incr_c.next() == 0:
+ incrlist = incr_c.get_keys()
+ offset = incrlist[0]
+ size = incrlist[1]
+ curtype = incrlist[2]
+ self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE)
+ if curtype == wiredtiger.WT_BACKUP_FILE:
+ sz = os.path.getsize(newfile)
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + backup_incr_dir)
+ # Copy the whole file.
+ self.copy_file(newfile, backup_incr_dir)
+ else:
+ # Copy the block range.
+ self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
+ self.range_copy(newfile, offset, size, backup_incr_dir)
+ lens.append(size)
+ incr_c.close()
+ return lens
+
+ #
+ # Given a backup cursor, open a log cursor, and copy all log files that are not
+ # in the given log list. Return all the log files.
+ #
+ def take_log_backup(self, bkup_c, backup_dir, orig_logs, log_cursor=None):
+ # Now open a duplicate backup cursor.
+ dupc = log_cursor
+ if log_cursor == None:
+ config = 'target=("log:")'
+ dupc = self.session.open_cursor(None, bkup_c, config)
+ dup_logs = []
+ while dupc.next() == 0:
+ newfile = dupc.get_key()
+ self.assertTrue("WiredTigerLog" in newfile)
+ sz = os.path.getsize(newfile)
+ if (newfile not in orig_logs):
+ self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + backup_dir)
+ shutil.copy(newfile, backup_dir)
+ # Record all log files returned for later verification.
+ dup_logs.append(newfile)
+ if log_cursor == None:
+ dupc.close()
+ return dup_logs
+
+ #
+ # Open incremental backup cursor, with an id and iterate through all the files
+ # and perform incremental block copy for each of them. Returns the information about
+ # the backup files.
+ #
+ # Optional arguments:
+ # consolidate: Add consolidate option to the cursor.
+ #
+ def take_incr_backup(self, backup_incr_dir, id=0, consolidate=False):
+ self.assertTrue(id > 0 or self.bkup_id > 0)
+ if id == 0:
+ id = self.bkup_id
+ # Open the backup data source for incremental backup.
+ config = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"'
+ if consolidate:
+ config += ',consolidate=true'
+ config += ')'
+ self.pr("Incremental backup cursor with config " + config)
+ bkup_c = self.session.open_cursor('backup:', None, config)
+
+ file_sizes = []
+ file_names = []
+
+ # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
+ # values and adding in get_values returns ENOTSUP and causes the usage to fail.
+ # If that changes then this, and the use of the duplicate below can change.
+ while bkup_c.next() == 0:
+ newfile = bkup_c.get_key()
+ file_sizes += self.take_incr_backup_block(bkup_c, newfile, backup_incr_dir)
+ file_names.append(newfile)
+ # Copy into temp directory for tests that require further iterations of incremental backups.
+ self.copy_file(newfile, self.home_tmp)
+ bkup_c.close()
+ return (file_names, file_sizes)
diff --git a/src/third_party/wiredtiger/test/suite/wttest.py b/src/third_party/wiredtiger/test/suite/wttest.py
index 15b328ad1bb..85206494188 100755
--- a/src/third_party/wiredtiger/test/suite/wttest.py
+++ b/src/third_party/wiredtiger/test/suite/wttest.py
@@ -26,6 +26,10 @@
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
+# [TEST_TAGS]
+# ignored_file
+# [END_TAGS]
+#
# WiredTigerTestCase
# parent class for all test cases
#
diff --git a/src/third_party/wiredtiger/test/test_coverage.md b/src/third_party/wiredtiger/test/test_coverage.md
new file mode 100644
index 00000000000..1b29241ff32
--- /dev/null
+++ b/src/third_party/wiredtiger/test/test_coverage.md
@@ -0,0 +1,6 @@
+|Component|Test Type|Testing Area|Description|Existing tests|
+|---|---|---|---|---|
+|Backup|Correctness|Full Backup|Full backup contains correct data|[../test/suite/test_backup01.py](../test/suite/test_backup01.py)
+|Caching Eviction|Correctness|Written Data|Ensure that the data written out by eviction is correct after reading|[../test/suite/test_hs15.py](../test/suite/test_hs15.py)
+|Checkpoints|Correctness|Checkpoint Data|On system with a complex, concurrent workload the correct versions of data appear in checkpoints|[../test/suite/test_checkpoint02.py](../test/suite/test_checkpoint02.py), [../test/suite/test_checkpoint03.py](../test/suite/test_checkpoint03.py)
+|Checkpoints|Liveness|Liveness|Identify bugs and race conditions related to checkpoints that can cause deadlocks or livelocks|[../test/csuite/wt3363_checkpoint_op_races/main.c](../test/csuite/wt3363_checkpoint_op_races/main.c)
diff --git a/src/third_party/wiredtiger/tools/xray_to_optrack/xray_to_optrack.cxx b/src/third_party/wiredtiger/tools/xray_to_optrack/xray_to_optrack.cxx
index 856c72a01c6..39d868837a1 100644
--- a/src/third_party/wiredtiger/tools/xray_to_optrack/xray_to_optrack.cxx
+++ b/src/third_party/wiredtiger/tools/xray_to_optrack/xray_to_optrack.cxx
@@ -79,7 +79,7 @@ write_optrack_record(std::ofstream &os, int record_type, const std::string &func
/*
* symbolize_func_id --
- * Symbolize the full function name for a given XRay function id.
+ * Symbolize the full function name for a given XRay function id.
*/
static llvm::Expected<std::string>
symbolize_func_id(uint32_t func_id, const std::string &instr_map,