diff options
author | Luke Chen <luke.chen@mongodb.com> | 2021-02-08 15:39:00 +1100 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-02-08 05:10:08 +0000 |
commit | d99d2428027fcc95478300c2da84a4ca554fdcbd (patch) | |
tree | 753aad1519520562cc464f63c2b4d19df5fc8a93 | |
parent | a642fd5193e5a18fb476eb684e1f2bf19bfed36e (diff) | |
download | mongo-d99d2428027fcc95478300c2da84a4ca554fdcbd.tar.gz |
Import wiredtiger: cde57ba4b7aac0a955e6c35f503229e3809a1560 from branch mongodb-5.0
ref: 772edf829f..cde57ba4b7
for: 4.9.0
WT-4649 Updating build system to allow for different C++ compilers
WT-6354 Increase wt utility coverage with printlog, downgrade, upgrade tests
WT-7039 Creating test configuration framework using the WiredTiger configuration API
WT-7102 Migrate full and incremental backup test functionalities into wtbackup class
WT-7159 Always write on-disk update as a full update to history store
37 files changed, 1230 insertions, 949 deletions
diff --git a/src/third_party/wiredtiger/build_posix/configure.ac.in b/src/third_party/wiredtiger/build_posix/configure.ac.in index 3c427b45e14..7bcbae594cc 100644 --- a/src/third_party/wiredtiger/build_posix/configure.ac.in +++ b/src/third_party/wiredtiger/build_posix/configure.ac.in @@ -29,18 +29,20 @@ define([AC_LIBTOOL_LANG_F77_CONFIG], [:])dnl # reason to believe "c++" can build compatible objects. # # Check whether the C++ compiler works by linking a trivial program. -if test "$CC" = "cc"; then - AC_CACHE_CHECK([whether the C++ compiler works], - [wt_cv_prog_cxx_works], - [AC_LANG_PUSH([C++]) - AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])], - [wt_cv_prog_cxx_works=yes], - [wt_cv_prog_cxx_works=no]) - AC_LANG_POP([C++])]) -else - AC_MSG_WARN([C++ compiler ignored unless compiler is named "cc"]) - wt_cv_prog_cxx_works=no -fi +AM_CONDITIONAL([IS_CXX_OK], [test "$CC" = "cc"]) +AM_COND_IF([IS_CXX_OK], [], AM_CONDITIONAL([IS_CXX_OK], [test $(expr `"$CC" --version | head -n 1 | grep -o -E "[[[[:digit:]]]].[[[[:digit:]]]].[[[[:digit:]]]]" | uniq`) = $(expr `"$CXX" --version | head -n 1 | grep -o -E "[[[[:digit:]]]].[[[[:digit:]]]].[[[[:digit:]]]]" | uniq`)])) + +AM_COND_IF([IS_CXX_OK], + [AC_CACHE_CHECK([whether the C++ compiler works], + [wt_cv_prog_cxx_works], + [AC_LANG_PUSH([C++]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])], + [wt_cv_prog_cxx_works=yes], + [wt_cv_prog_cxx_works=no]) + AC_LANG_POP([C++])])], + [AC_MSG_WARN([C++ compiler ignored unless compiler is named "cc" or gcc and g++ versions match]) + wt_cv_prog_cxx_works=no]) + AM_CONDITIONAL([HAVE_CXX], [test "$wt_cv_prog_cxx_works" = "yes"]) LT_PREREQ(2.2.6) diff --git a/src/third_party/wiredtiger/build_win/wiredtiger.def b/src/third_party/wiredtiger/build_win/wiredtiger.def index 71c52bd81af..16a824acf1b 100644 --- a/src/third_party/wiredtiger/build_win/wiredtiger.def +++ b/src/third_party/wiredtiger/build_win/wiredtiger.def @@ -15,6 +15,7 @@ EXPORTS wiredtiger_struct_pack wiredtiger_struct_size wiredtiger_struct_unpack + wiredtiger_test_config_validate wiredtiger_unpack_int wiredtiger_unpack_item wiredtiger_unpack_start diff --git a/src/third_party/wiredtiger/dist/api_config.py b/src/third_party/wiredtiger/dist/api_config.py index 3ab0ec15504..8313b4bb445 100755 --- a/src/third_party/wiredtiger/dist/api_config.py +++ b/src/third_party/wiredtiger/dist/api_config.py @@ -2,9 +2,21 @@ from __future__ import print_function import os, re, sys, textwrap -import api_data from dist import compare_srcfile, format_srcfile +test_config = False + +# This file serves two purposes, it can generate configuration for the main wiredtiger library and, +# it can generate configuration for the c and cpp suite tests. To avoid duplication we import the +# differing apis here and then treat them as the same for the remainder of the script. However we +# do have different logic depending on whether we intend to generate the test api or not, which is +# managed with a boolean flag. +if len(sys.argv) == 1 or sys.argv[1] != "-t": + import api_data as api_data_def +else: + test_config = True + import test_data as api_data_def + # Temporary file. tmp_file = '__tmp' @@ -76,7 +88,7 @@ def parseconfig(c, method_name, name_indent=''): if ctype == 'category': for subc in sorted(c.subconfig): output += parseconfig(subc, method_name, \ - name_indent + (' ' * 4)) + name_indent + (' ' * 4)) output += '@config{ ),,}\n' return output @@ -97,58 +109,61 @@ def getconfcheck(c): w.wrap(check + ' ' + cstr + ', ' + sstr + ' },')) return check -skip = False -for line in open(f, 'r'): - if skip: - if '@configend' in line: - skip = False - continue +if not test_config: + skip = False + for line in open(f, 'r'): + if skip: + if '@configend' in line: + skip = False + continue - m = cbegin_re.match(line) - if not m: - tfile.write(line) - continue + m = cbegin_re.match(line) + if not m: + tfile.write(line) + continue - prefix, config_name = m.groups() - if config_name not in api_data.methods: - print("Missing configuration for " + config_name, file=sys.stderr) - tfile.write(line) - continue + prefix, config_name = m.groups() + if config_name not in api_data_def.methods: + print("Missing configuration for " + config_name, file=sys.stderr) + tfile.write(line) + continue - skip = ('@configstart' in line) + skip = ('@configstart' in line) - if not api_data.methods[config_name].config: - tfile.write(prefix + '@configempty{' + config_name + - ', see dist/api_data.py}\n') - continue - - tfile.write(prefix + '@configstart{' + config_name + - ', see dist/api_data.py}\n') - - w = textwrap.TextWrapper(width=100-len(prefix.expandtabs()), - break_on_hyphens=False, - break_long_words=False, - replace_whitespace=False, - fix_sentence_endings=True) - # Separate at spaces, and after a set of non-breaking space indicators. - w.wordsep_re = w.wordsep_simple_re = \ - re.compile(r'(\s+|(?<= )[\w_,.;:]+)') - for c in api_data.methods[config_name].config: - if 'undoc' in c.flags: + if not api_data_def.methods[config_name].config: + tfile.write(prefix + '@configempty{' + config_name + + ', see dist/api_data.py}\n') continue - output = parseconfig(c, config_name) - for l in w.wrap(output): - tfile.write(prefix + l.replace('\n', '\n' + prefix) + '\n') - tfile.write(prefix + '@configend\n') + tfile.write(prefix + '@configstart{' + config_name + + ', see dist/api_data.py}\n') -tfile.close() -compare_srcfile(tmp_file, f) + w = textwrap.TextWrapper(width=100-len(prefix.expandtabs()), + break_on_hyphens=False, + break_long_words=False, + replace_whitespace=False, + fix_sentence_endings=True) + # Separate at spaces, and after a set of non-breaking space indicators. + w.wordsep_re = w.wordsep_simple_re = \ + re.compile(r'(\s+|(?<= )[\w_,.;:]+)') + for c in api_data_def.methods[config_name].config: + if 'undoc' in c.flags: + continue + output = parseconfig(c, config_name) + for l in w.wrap(output): + tfile.write(prefix + l.replace('\n', '\n' + prefix) + '\n') + + tfile.write(prefix + '@configend\n') + + tfile.close() + compare_srcfile(tmp_file, f) ##################################################################### # Create config_def.c with defaults for each config string ##################################################################### f='../src/config/config_def.c' +if test_config: + f = '../test/cppsuite/test_config.c' tfile = open(tmp_file, 'w') tfile.write('''/* DO NOT EDIT: automatically built by dist/api_config.py. */ @@ -239,8 +254,8 @@ def getsubconfigstr(c): # Write structures of arrays of allowable configuration options, including a # NULL as a terminator for iteration. -for name in sorted(api_data.methods.keys()): - config = api_data.methods[name].config +for name in sorted(api_data_def.methods.keys()): + config = api_data_def.methods[name].config if config: tfile.write(''' static const WT_CONFIG_CHECK confchk_%(name)s[] = { @@ -258,8 +273,8 @@ tfile.write('static const WT_CONFIG_ENTRY config_entries[] = {') slot=-1 config_defines = '' -for name in sorted(api_data.methods.keys()): - config = api_data.methods[name].config +for name in sorted(api_data_def.methods.keys()): + config = api_data_def.methods[name].config slot += 1 # Build a list of #defines that reference specific slots in the list (the @@ -295,72 +310,94 @@ tfile.write('\n};\n') # Write the routine that connects the WT_CONNECTION_IMPL structure to the list # of configuration entry structures. -tfile.write(''' -int -__wt_conn_config_init(WT_SESSION_IMPL *session) -{ -\tWT_CONNECTION_IMPL *conn; -\tconst WT_CONFIG_ENTRY *ep, **epp; - -\tconn = S2C(session); - -\t/* Build a list of pointers to the configuration information. */ -\tWT_RET(__wt_calloc_def(session, WT_ELEMENTS(config_entries), &epp)); -\tconn->config_entries = epp; - -\t/* Fill in the list to reference the default information. */ -\tfor (ep = config_entries;;) { -\t\t*epp++ = ep++; -\t\tif (ep->method == NULL) -\t\t\tbreak; -\t} -\treturn (0); -} - -void -__wt_conn_config_discard(WT_SESSION_IMPL *session) -{ -\tWT_CONNECTION_IMPL *conn; - -\tconn = S2C(session); - -\t__wt_free(session, conn->config_entries); -} - -/* - * __wt_conn_config_match -- - * Return the static configuration entry for a method. - */ -const WT_CONFIG_ENTRY * -__wt_conn_config_match(const char *method) -{ -\tconst WT_CONFIG_ENTRY *ep; - -\tfor (ep = config_entries; ep->method != NULL; ++ep) -\t\tif (strcmp(method, ep->method) == 0) -\t\t\treturn (ep); -\treturn (NULL); -} -''') +if not test_config: + tfile.write(''' + int + __wt_conn_config_init(WT_SESSION_IMPL *session) + { + \tWT_CONNECTION_IMPL *conn; + \tconst WT_CONFIG_ENTRY *ep, **epp; + + \tconn = S2C(session); + + \t/* Build a list of pointers to the configuration information. */ + \tWT_RET(__wt_calloc_def(session, WT_ELEMENTS(config_entries), &epp)); + \tconn->config_entries = epp; + + \t/* Fill in the list to reference the default information. */ + \tfor (ep = config_entries;;) { + \t\t*epp++ = ep++; + \t\tif (ep->method == NULL) + \t\t\tbreak; + \t} + \treturn (0); + } + + void + __wt_conn_config_discard(WT_SESSION_IMPL *session) + { + \tWT_CONNECTION_IMPL *conn; + + \tconn = S2C(session); + + \t__wt_free(session, conn->config_entries); + } + + /* + * __wt_conn_config_match -- + * Return the static configuration entry for a method. + */ + const WT_CONFIG_ENTRY * + __wt_conn_config_match(const char *method) + { + \tconst WT_CONFIG_ENTRY *ep; + + \tfor (ep = config_entries; ep->method != NULL; ++ep) + \t\tif (strcmp(method, ep->method) == 0) + \t\t\treturn (ep); + \treturn (NULL); + } + ''') +else: + tfile.write( + ''' + /* + * __wt_test_config_match -- + * Return the static configuration entry for a test. + */ + const WT_CONFIG_ENTRY * + __wt_test_config_match(const char *test_name) + { + const WT_CONFIG_ENTRY *ep; + + for (ep = config_entries; ep->method != NULL; ++ep) + if (strcmp(test_name, ep->method) == 0) + return (ep); + return (NULL); + } + ''' + ) tfile.close() format_srcfile(tmp_file) compare_srcfile(tmp_file, f) # Update the config.h file with the #defines for the configuration entries. -tfile = open(tmp_file, 'w') -skip = 0 -for line in open('../src/include/config.h', 'r'): - if skip: - if 'configuration section: END' in line: - tfile.write('/*\n' + line) - skip = 0 - else: - tfile.write(line) - if 'configuration section: BEGIN' in line: - skip = 1 - tfile.write(' */\n') - tfile.write(config_defines) -tfile.close() -format_srcfile(tmp_file) -compare_srcfile(tmp_file, '../src/include/config.h') +if not test_config: + tfile = open(tmp_file, 'w') + skip = 0 + config_file = '../src/include/config.h' + for line in open(config_file, 'r'): + if skip: + if 'configuration section: END' in line: + tfile.write('/*\n' + line) + skip = 0 + else: + tfile.write(line) + if 'configuration section: BEGIN' in line: + skip = 1 + tfile.write(' */\n') + tfile.write(config_defines) + tfile.close() + format_srcfile(tmp_file) + compare_srcfile(tmp_file, config_file) diff --git a/src/third_party/wiredtiger/dist/api_config_gen.py b/src/third_party/wiredtiger/dist/api_config_gen.py new file mode 100755 index 00000000000..e8c088fbf42 --- /dev/null +++ b/src/third_party/wiredtiger/dist/api_config_gen.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +import os +os.system("./api_config.py") +os.system("./api_config.py -t") diff --git a/src/third_party/wiredtiger/dist/filelist b/src/third_party/wiredtiger/dist/filelist index 7bbc5e5596a..39b85746b6c 100644 --- a/src/third_party/wiredtiger/dist/filelist +++ b/src/third_party/wiredtiger/dist/filelist @@ -215,3 +215,4 @@ src/txn/txn_log.c src/txn/txn_recover.c src/txn/txn_rollback_to_stable.c src/txn/txn_timestamp.c +test/cppsuite/test_config.c diff --git a/src/third_party/wiredtiger/dist/s_all b/src/third_party/wiredtiger/dist/s_all index c02423571dc..8b36c09aa66 100755 --- a/src/third_party/wiredtiger/dist/s_all +++ b/src/third_party/wiredtiger/dist/s_all @@ -73,7 +73,7 @@ run() # already parallelize internally. run "sh ./s_readme $force" run "sh ./s_install $force" -run "python api_config.py" +run "python api_config_gen.py" run "python api_err.py" run "python flags.py" run "python log.py" diff --git a/src/third_party/wiredtiger/dist/s_export.list b/src/third_party/wiredtiger/dist/s_export.list index e85bf62517d..ed070963ab0 100644 --- a/src/third_party/wiredtiger/dist/s_export.list +++ b/src/third_party/wiredtiger/dist/s_export.list @@ -14,6 +14,7 @@ wiredtiger_strerror wiredtiger_struct_pack wiredtiger_struct_size wiredtiger_struct_unpack +wiredtiger_test_config_validate wiredtiger_unpack_int wiredtiger_unpack_item wiredtiger_unpack_start diff --git a/src/third_party/wiredtiger/dist/s_funcs.list b/src/third_party/wiredtiger/dist/s_funcs.list index 0b7db52d26c..4f3d2a2ca87 100644 --- a/src/third_party/wiredtiger/dist/s_funcs.list +++ b/src/third_party/wiredtiger/dist/s_funcs.list @@ -45,6 +45,7 @@ wiredtiger_pack_uint wiredtiger_struct_pack wiredtiger_struct_size wiredtiger_struct_unpack +wiredtiger_test_config_validate wiredtiger_unpack_int wiredtiger_unpack_item wiredtiger_unpack_start diff --git a/src/third_party/wiredtiger/dist/s_string.ok b/src/third_party/wiredtiger/dist/s_string.ok index 6c3cfa3403f..f9f2b67712f 100644 --- a/src/third_party/wiredtiger/dist/s_string.ok +++ b/src/third_party/wiredtiger/dist/s_string.ok @@ -1130,6 +1130,7 @@ pcpu perf pfx pluggable +poc popen poptable popthreads diff --git a/src/third_party/wiredtiger/dist/test_data.py b/src/third_party/wiredtiger/dist/test_data.py new file mode 100644 index 00000000000..ac46cf55bc2 --- /dev/null +++ b/src/third_party/wiredtiger/dist/test_data.py @@ -0,0 +1,56 @@ +# This file is a python script that describes the cpp test framework test configuration options. + +class Method: + def __init__(self, config): + # Deal with duplicates: with complex configurations (like + # WT_SESSION::create), it's simpler to deal with duplicates once than + # manually as configurations are defined + self.config = [] + lastname = None + for c in sorted(config): + if '.' in c.name: + raise "Bad config key '%s'" % c.name + if c.name == lastname: + continue + lastname = c.name + self.config.append(c) + +class Config: + def __init__(self, name, default, desc, subconfig=None, **flags): + self.name = name + self.default = default + self.desc = desc + self.subconfig = subconfig + self.flags = flags + + # Comparators for sorting. + def __eq__(self, other): + return self.name == other.name + + def __ne__(self, other): + return self.name != other.name + + def __lt__(self, other): + return self.name < other.name + + def __le__(self, other): + return self.name <= other.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return self.name >= other.name +methods = { +'poc_test' : Method([ + Config('collection_count', '1', r''' + the number of collections to create for testing''', + min='1', max='10'), + Config('key_size', '10', r''' + the size of the keys to be created in bytes''', + min='1', max='10000'), + Config('values', 'first', r''' + The value that each key will be populated with, used an example string configuration''', + choices=['first', 'second', 'third']) +]), +} diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data index e3e584e6bb4..4d495dfce0f 100644 --- a/src/third_party/wiredtiger/import.data +++ b/src/third_party/wiredtiger/import.data @@ -2,5 +2,5 @@ "vendor": "wiredtiger", "github": "wiredtiger/wiredtiger.git", "branch": "mongodb-5.0", - "commit": "772edf829f31c87fad4cdb8f73bf7e50b993e067" + "commit": "cde57ba4b7aac0a955e6c35f503229e3809a1560" } diff --git a/src/third_party/wiredtiger/src/config/config_api.c b/src/third_party/wiredtiger/src/config/config_api.c index f0c1d799089..d5c343e422c 100644 --- a/src/third_party/wiredtiger/src/config/config_api.c +++ b/src/third_party/wiredtiger/src/config/config_api.c @@ -85,12 +85,13 @@ wiredtiger_config_parser_open( } /* - * wiredtiger_config_validate -- - * Validate a configuration string. + * __config_validate -- + * Validate a configuration string. Taking a function pointer to the matching function for the + * given configuration set. */ -int -wiredtiger_config_validate( - WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config) +static int +__config_validate(WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, + const char *config, const WT_CONFIG_ENTRY *config_matcher(const char *)) { const WT_CONFIG_ENTRY *ep, **epp; WT_CONNECTION_IMPL *conn, dummy_conn; @@ -132,7 +133,7 @@ wiredtiger_config_validate( * added). */ if (session == NULL || conn == NULL || conn->config_entries == NULL) - ep = __wt_conn_config_match(name); + ep = config_matcher(name); else { ep = NULL; for (epp = conn->config_entries; *epp != NULL && (*epp)->method != NULL; ++epp) @@ -148,6 +149,28 @@ wiredtiger_config_validate( } /* + * wiredtiger_config_validate -- + * Validate a configuration string. + */ +int +wiredtiger_config_validate( + WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config) +{ + return (__config_validate(wt_session, event_handler, name, config, __wt_conn_config_match)); +} + +/* + * wiredtiger_test_config_validate -- + * Validate a test configuration string. + */ +int +wiredtiger_test_config_validate( + WT_SESSION *wt_session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config) +{ + return (__config_validate(wt_session, event_handler, name, config, __wt_test_config_match)); +} + +/* * __conn_foc_add -- * Add a new entry into the connection's free-on-close list. */ diff --git a/src/third_party/wiredtiger/src/history/hs_rec.c b/src/third_party/wiredtiger/src/history/hs_rec.c index 0021187281f..3640b4d0adb 100644 --- a/src/third_party/wiredtiger/src/history/hs_rec.c +++ b/src/third_party/wiredtiger/src/history/hs_rec.c @@ -690,10 +690,18 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi) /* * Calculate reverse modify and clear the history store records with timestamps when - * inserting the first update. + * inserting the first update. Always write on-disk data store updates to the history + * store as a full update because the on-disk update will be the base update for all the + * updates that are older than the on-disk update. + * + * Due to concurrent operation of checkpoint and eviction, it is possible that history + * store may have more recent versions of a key than the on-disk version. Without a + * proper base value in the history store, it can lead to wrong value being restored by + * the RTS. */ nentries = MAX_REVERSE_MODIFY_NUM; - if (upd->type == WT_UPDATE_MODIFY && enable_reverse_modify && + if (!F_ISSET(upd, WT_UPDATE_DS) && upd->type == WT_UPDATE_MODIFY && + enable_reverse_modify && __wt_calc_modify(session, prev_full_value, full_value, prev_full_value->size / 10, entries, &nentries) == 0) { WT_ERR(__wt_modify_pack(cursor, entries, nentries, &modify_value)); diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h index 405ae73401b..9941c9ba34a 100644 --- a/src/third_party/wiredtiger/src/include/extern.h +++ b/src/third_party/wiredtiger/src/include/extern.h @@ -47,6 +47,8 @@ extern char *__wt_timestamp_to_string(wt_timestamp_t ts, char *ts_string) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern const WT_CONFIG_ENTRY *__wt_conn_config_match(const char *method) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); +extern const WT_CONFIG_ENTRY *__wt_test_config_match(const char *test_name) + WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern const char *__wt_addr_string(WT_SESSION_IMPL *session, const uint8_t *addr, size_t addr_size, WT_ITEM *buf) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern const char *__wt_buf_set_printable(WT_SESSION_IMPL *session, const void *p, size_t size, diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in index 8b760c6df42..5b0f2e57279 100644 --- a/src/third_party/wiredtiger/src/include/wiredtiger.in +++ b/src/third_party/wiredtiger/src/include/wiredtiger.in @@ -3329,6 +3329,13 @@ struct __wt_config_item { int wiredtiger_config_validate(WT_SESSION *session, WT_EVENT_HANDLER *event_handler, const char *name, const char *config) WT_ATTRIBUTE_LIBRARY_VISIBLE; + +/* + * Validate a configuration string for a WiredTiger test program. + */ +int wiredtiger_test_config_validate(WT_SESSION *session, + WT_EVENT_HANDLER *event_handler, const char *name, const char *config) + WT_ATTRIBUTE_LIBRARY_VISIBLE; #endif /*! diff --git a/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c b/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c index 9ad6b7abd6d..0001d09302b 100644 --- a/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c +++ b/src/third_party/wiredtiger/src/txn/txn_rollback_to_stable.c @@ -254,10 +254,15 @@ __rollback_row_ondisk_fixup_key(WT_SESSION_IMPL *session, WT_PAGE *page, WT_ROW /* * Do not include history store updates greater than on-disk data store version to construct - * a full update to restore. Comparing with timestamps here has no problem unlike in search - * flow where the timestamps may be reset during reconciliation. RTS detects an on-disk - * update is unstable based on the written proper timestamp, so comparing against it with - * history store shouldn't have any problem. + * a full update to restore. Include the most recent updates than the on-disk version + * shouldn't be problem as the on-disk version in history store is always a full update. It + * is better to not to include those updates as it unnecessarily increases the rollback to + * stable time. + * + * Comparing with timestamps here has no problem unlike in search flow where the timestamps + * may be reset during reconciliation. RTS detects an on-disk update is unstable based on + * the written proper timestamp, so comparing against it with history store shouldn't have + * any problem. */ if (hs_start_ts <= unpack->tw.start_ts) { if (type == WT_UPDATE_MODIFY) @@ -267,7 +272,13 @@ __rollback_row_ondisk_fixup_key(WT_SESSION_IMPL *session, WT_PAGE *page, WT_ROW WT_ASSERT(session, type == WT_UPDATE_STANDARD); WT_ERR(__wt_buf_set(session, &full_value, hs_value->data, hs_value->size)); } - } + } else + __wt_verbose(session, WT_VERB_RECOVERY_RTS(session), + "history store update more recent than on-disk update with start timestamp: %s," + " durable timestamp: %s, stop timestamp: %s and type: %" PRIu8, + __wt_timestamp_to_string(hs_start_ts, ts_string[0]), + __wt_timestamp_to_string(hs_durable_ts, ts_string[1]), + __wt_timestamp_to_string(hs_stop_durable_ts, ts_string[2]), type); /* * Verify the history store timestamps are in order. The start timestamp may be equal to the diff --git a/src/third_party/wiredtiger/test/cppsuite/test_config.c b/src/third_party/wiredtiger/test/cppsuite/test_config.c new file mode 100644 index 00000000000..6d1585172c8 --- /dev/null +++ b/src/third_party/wiredtiger/test/cppsuite/test_config.c @@ -0,0 +1,28 @@ +/* DO NOT EDIT: automatically built by dist/api_config.py. */ + +#include "wt_internal.h" + +static const WT_CONFIG_CHECK confchk_poc_test[] = { + {"collection_count", "int", NULL, "min=1,max=10", NULL, 0}, + {"key_size", "int", NULL, "min=1,max=10000", NULL, 0}, + {"values", "string", NULL, "choices=[\"first\",\"second\",\"third\"]", NULL, 0}, + {NULL, NULL, NULL, NULL, NULL, 0}}; + +static const WT_CONFIG_ENTRY config_entries[] = { + {"poc_test", "collection_count=1,key_size=10,values=first", confchk_poc_test, 3}, + {NULL, NULL, NULL, 0}}; + +/* + * __wt_test_config_match -- + * Return the static configuration entry for a test. + */ +const WT_CONFIG_ENTRY * +__wt_test_config_match(const char *test_name) +{ + const WT_CONFIG_ENTRY *ep; + + for (ep = config_entries; ep->method != NULL; ++ep) + if (strcmp(test_name, ep->method) == 0) + return (ep); + return (NULL); +} diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h new file mode 100644 index 00000000000..9f1d43773c9 --- /dev/null +++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/configuration_settings.h @@ -0,0 +1,107 @@ +/* Include guard. */ +#ifndef CONFIGURATION_SETTINGS_H +#define CONFIGURATION_SETTINGS_H + +#include "wt_internal.h" +#include <string> +#include <stdexcept> + +namespace test_harness { +class configuration { + private: + std::string _config; + WT_CONFIG_PARSER *_config_parser; + + public: + configuration(const char *test_config_name, const char *config) : _config(config) + { + int ret = wiredtiger_config_parser_open(nullptr, config, strlen(config), &_config_parser); + if (ret != 0) + throw std::invalid_argument( + "failed to create configuration parser for provided config"); + if (wiredtiger_test_config_validate(nullptr, nullptr, test_config_name, config) != 0) + throw std::invalid_argument( + "failed to validate given config, ensure test config exists"); + } + + configuration(const char *test_config_name, const WT_CONFIG_ITEM &nested) + { + if (nested.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT) + throw std::invalid_argument("provided config item isn't a structure"); + int ret = wiredtiger_config_parser_open(nullptr, nested.str, nested.len, &_config_parser); + if (ret != 0) + throw std::invalid_argument( + "failed to create configuration parser for provided sub config"); + } + + ~configuration() + { + if (_config_parser != nullptr) { + _config_parser->close(_config_parser); + _config_parser = nullptr; + } + } + + std::string + get_config() + { + return _config; + } + + /* + * Wrapper functions for retrieving basic configuration values. Ideally the tests can avoid + * using the config item struct provided by wiredtiger. However if they still wish to use it the + * get and next functions can be used. + */ + int + get_string(const char *key, std::string &value) + { + WT_CONFIG_ITEM temp_value; + WT_RET(_config_parser->get(_config_parser, key, &temp_value)); + if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRING || + temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_ID) + return (-1); + value = std::string(temp_value.str, temp_value.len); + return (0); + } + + int + get_bool(const char *key, bool &value) + { + WT_CONFIG_ITEM temp_value; + WT_RET(_config_parser->get(_config_parser, key, &temp_value)); + if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_BOOL) + return (-1); + value = temp_value.val != 0; + return (0); + } + + int + get_int(const char *key, int64_t &value) + { + WT_CONFIG_ITEM temp_value; + WT_RET(_config_parser->get(_config_parser, key, &temp_value)); + if (temp_value.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM) + return (-1); + value = temp_value.val; + return (0); + } + + /* + * Basic configuration parsing helper functions. + */ + int + next(WT_CONFIG_ITEM *key, WT_CONFIG_ITEM *value) + { + return _config_parser->next(_config_parser, key, value); + } + + int + get(const char *key, WT_CONFIG_ITEM *value) + { + return _config_parser->get(_config_parser, key, value); + } +}; +} // namespace test_harness + +#endif diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h index 5660944ae67..b489c84b8ec 100644 --- a/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h +++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/test_harness.h @@ -5,19 +5,27 @@ /* Required to build using older versions of g++. */ #include <cinttypes> -extern "C" { +/* Include various wiredtiger libs. */ #include "wiredtiger.h" #include "wt_internal.h" -} + +#include "configuration_settings.h" namespace test_harness { class test { public: + configuration *_configuration; + static const std::string _name; /* * All tests will implement this initially, the return value from it will indicate whether the * test was successful or not. */ virtual int run() = 0; + + test(std::string config) + { + _configuration = new configuration(_name.c_str(), config.c_str()); + } }; } // namespace test_harness diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx b/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx index 0bf50387344..d41ff8dfa6b 100644 --- a/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx +++ b/src/third_party/wiredtiger/test/cppsuite/tests/poc.cxx @@ -8,23 +8,28 @@ class poc_test : public test_harness::test { WT_CONNECTION *conn; int ret = 0; /* Setup basic test directory. */ - const std::string default_dir = "WT_TEST"; + const char *default_dir = "WT_TEST"; /* * Csuite tests utilise a test_util.h command to make their directory, currently that doesn't * compile under c++ and some extra work will be needed to make it work. Its unclear if the * test framework will use test_util.h yet. */ - const std::string mkdir_cmd = "mkdir " + default_dir; - ret = system(mkdir_cmd.c_str()); + const char *mkdir_cmd = "mkdir WT_TEST"; + ret = system(mkdir_cmd); if (ret != 0) return (ret); - ret = wiredtiger_open(default_dir.c_str(), NULL, "create,cache_size=1G", &conn); + ret = wiredtiger_open(default_dir, NULL, "create,cache_size=1G", &conn); return (ret); } + + poc_test(std::string config) : test(config) {} }; +const std::string poc_test::test::_name = "poc_test"; + int main(int argc, char *argv[]) { - return poc_test().run(); + const char *cfg = "collection_count=1,key_size=5"; + return poc_test(cfg).run(); } diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml index c4c79f5e39d..127c99ea040 100755 --- a/src/third_party/wiredtiger/test/evergreen.yml +++ b/src/third_party/wiredtiger/test/evergreen.yml @@ -110,7 +110,7 @@ functions: set -o verbose sh reconf if [ "$OS" != "Windows_NT" ]; then - CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer -fsanitize=address" \ + CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer -fsanitize=address" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb -fPIC" \ ../configure ${configure_python_setting|} \ --enable-diagnostic --with-builtins=lz4,snappy,zlib fi @@ -429,7 +429,7 @@ variables: vars: configure_env_vars: CC="/opt/mongodbtoolchain/v3/bin/clang -fsanitize=address" - PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer" + PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fno-omit-frame-pointer" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb -fPIC" posix_configure_flags: --enable-strict --enable-diagnostic --with-builtins=lz4,snappy,zlib - func: "format test script" vars: @@ -477,7 +477,7 @@ tasks: - func: "get project" - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static - func: "upload artifact" - func: "cleanup" @@ -487,7 +487,7 @@ tasks: - func: "get project" - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" CXXFLAGS="-fsanitize=memory -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static - func: "upload artifact" - func: "cleanup" @@ -497,7 +497,7 @@ tasks: - func: "get project" - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" CXXFLAGS="-fsanitize=undefined -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic - func: "upload artifact" - func: "cleanup" @@ -608,7 +608,7 @@ tasks: dependent_task: compile-msan - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=memory -ggdb" CXXFLAGS="-fsanitize=memory -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static - func: "make check all" vars: @@ -624,7 +624,7 @@ tasks: dependent_task: compile-asan - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" CXXFLAGS="-fsanitize=address -fno-omit-frame-pointer -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static - func: "make check all" vars: @@ -677,7 +677,7 @@ tasks: dependent_task: compile-asan - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/clang CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=address -ggdb" CXXFLAGS="-fsanitize=address -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic --disable-static - func: "make check directory" vars: @@ -857,7 +857,7 @@ tasks: dependent_task: compile-ubsan - func: "compile wiredtiger" vars: - configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" + configure_env_vars: CC=/opt/mongodbtoolchain/v3/bin/gcc CXX=/opt/mongodbtoolchain/v3/bin/g++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-fsanitize=undefined -ggdb" CXXFLAGS="-fsanitize=undefined -ggdb" posix_configure_flags: --enable-silent-rules --enable-strict --enable-diagnostic - command: shell.exec params: @@ -2322,6 +2322,8 @@ tasks: CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include" + CXXFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer + -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include" posix_configure_flags: --enable-diagnostic --with-builtins=lz4,snappy,zlib - func: "format test script" vars: @@ -2347,6 +2349,8 @@ tasks: CXX=/opt/mongodbtoolchain/v3/bin/clang++ PATH=/opt/mongodbtoolchain/v3/bin:$PATH CFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include" + CXXFLAGS="-ggdb -fPIC -fsanitize=address -fno-omit-frame-pointer + -I/opt/mongodbtoolchain/v3/lib/gcc/ppc64le-mongodb-linux/8.2.0/include" posix_configure_flags: --enable-diagnostic --with-builtins=lz4,snappy,zlib - func: "format test script" # To emulate the original Jenkins job's test coverage, we are running the smoke test 16 times diff --git a/src/third_party/wiredtiger/test/suite/test_backup04.py b/src/third_party/wiredtiger/test/suite/test_backup04.py index a378e5dbd63..80053f5016c 100755 --- a/src/third_party/wiredtiger/test/suite/test_backup04.py +++ b/src/third_party/wiredtiger/test/suite/test_backup04.py @@ -76,41 +76,17 @@ class test_backup_target(backup_base): cursor[simple_key(cursor, i)] = str(i) + ':' + upd * dsize cursor.close() - def take_full_backup(self, dir): - # Open up the backup cursor, and copy the files. Do a full backup. - cursor = self.session.open_cursor('backup:', None, None) - self.pr('Full backup to ' + dir + ': ') - os.mkdir(dir) - while True: - ret = cursor.next() - if ret != 0: - break - newfile = cursor.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + dir) - shutil.copy(newfile, dir) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - cursor.close() - # Take an incremental backup and then truncate/archive the logs. - def take_incr_backup(self, dir): - config = 'target=("log:")' - cursor = self.session.open_cursor('backup:', None, config) - while True: - ret = cursor.next() - if ret != 0: - break - newfile = cursor.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + dir) - shutil.copy(newfile, dir) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - self.session.truncate('log:', cursor, None, None) - cursor.close() + def take_log_incr_backup(self, dir): + config = 'target=("log:")' + cursor = self.session.open_cursor('backup:', None, config) + self.take_full_backup(dir, cursor) + self.session.truncate('log:', cursor, None, None) + cursor.close() # Run background inserts while running checkpoints and incremental backups # repeatedly. - def test_incremental_backup(self): + def test_log_incremental_backup(self): import sys # Create the backup directory. self.session.create(self.uri, "key_format=S,value_format=S") @@ -118,8 +94,9 @@ class test_backup_target(backup_base): self.populate_with_string(self.uri, self.dsize, self.nops) # We need to start the directory for the incremental backup with - # a full backup. The full backup function creates the directory. + # a full backup. dir = self.dir + os.mkdir(dir) self.take_full_backup(dir) self.session.checkpoint(None) @@ -137,11 +114,12 @@ class test_backup_target(backup_base): self.session.checkpoint(None) self.pr('Iteration: ' + str(increment)) - self.take_incr_backup(self.dir) + self.take_log_incr_backup(self.dir) # After running, take a full backup. Compare the incremental # backup to the original database and the full backup database. full_dir = self.dir + ".full" + os.mkdir(full_dir) self.take_full_backup(full_dir) self.compare_backups(self.uri, self.dir, full_dir) self.compare_backups(self.uri, self.dir, './') diff --git a/src/third_party/wiredtiger/test/suite/test_backup07.py b/src/third_party/wiredtiger/test/suite/test_backup07.py index 360432690eb..9f51a05e199 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup07.py +++ b/src/third_party/wiredtiger/test/suite/test_backup07.py @@ -34,8 +34,7 @@ from wtdataset import simple_key from wtscenario import make_scenarios # test_backup07.py -# Test cursor backup with target URIs, logging and create during backup - +# Test cursor backup with target URIs, logging and create during backup. class test_backup07(backup_base): dir='backup.dir' # Backup directory name logmax="100K" @@ -67,10 +66,8 @@ class test_backup07(backup_base): # when the backup metadata is created on cursor open and the newly # created file is not in the cursor list. - # Open up the backup cursor, create and add data to a new table - # and then copy the files. + # Create and add data to a new table and then copy the files with a full backup. os.mkdir(self.dir) - bkup_c = self.session.open_cursor('backup:', None, None) # Now create and populate the new table. Make sure the log records # are on disk and will be copied to the backup. @@ -78,19 +75,9 @@ class test_backup07(backup_base): self.add_data(self.newuri, 'key', 'value') self.session.log_flush('sync=on') - # Now copy the files returned by the backup cursor. This should not - # include the newly created table. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - self.assertNotEqual(newfile, self.newuri) - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() + # Now copy the files using full backup. This should not include the newly + # created table. + self.take_full_backup(self.dir) # After the full backup, open and recover the backup database. # Make sure we properly recover even though the log file will have diff --git a/src/third_party/wiredtiger/test/suite/test_backup10.py b/src/third_party/wiredtiger/test/suite/test_backup10.py index 9a74e190b3f..68a14307407 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup10.py +++ b/src/third_party/wiredtiger/test/suite/test_backup10.py @@ -73,37 +73,14 @@ class test_backup10(backup_base): self.add_data(self.uri, 'key', 'value') self.session.log_flush('sync=on') - # Now copy the files returned by the backup cursor. - orig_logs = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - if "WiredTigerLog" in newfile: - orig_logs.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + # Now make a full backup and track the log files. + all_files = self.take_full_backup(self.dir, bkup_c) + orig_logs = [file for file in all_files if "WiredTigerLog" in file] # Now open a duplicate backup cursor. config = 'target=("log:")' dupc = self.session.open_cursor(None, bkup_c, config) - dup_logs = [] - while True: - ret = dupc.next() - if ret != 0: - break - newfile = dupc.get_key() - self.assertTrue("WiredTigerLog" in newfile) - sz = os.path.getsize(newfile) - if (newfile not in orig_logs): - self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - # Record all log files returned for later verification. - dup_logs.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + dup_logs = self.take_log_backup(bkup_c, self.dir, orig_logs, dupc) # We expect that the duplicate logs are a superset of the # original logs. And we expect the difference to be the @@ -129,7 +106,6 @@ class test_backup10(backup_base): self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda:self.assertEquals(self.session.open_cursor(None, dupc, config), 0), msg) - dupc.close() # Test we must use the log target. diff --git a/src/third_party/wiredtiger/test/suite/test_backup11.py b/src/third_party/wiredtiger/test/suite/test_backup11.py index a974d505654..2b313a2dc9b 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup11.py +++ b/src/third_party/wiredtiger/test/suite/test_backup11.py @@ -54,41 +54,15 @@ class test_backup11(backup_base): # Add data while the backup cursor is open. self.add_data(self.uri, 'key', 'value', True) - # Now copy the files returned by the backup cursor. - orig_logs = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - if "WiredTigerLog" in newfile: - orig_logs.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + # Now make a full backup and track the log files. + all_files = self.take_full_backup(self.dir, bkup_c) + orig_logs = [file for file in all_files if "WiredTigerLog" in file] # Now open a duplicate backup cursor. # We *can* use a log target duplicate on an incremental primary backup so that # a backup process can get all the log files that occur while that primary cursor # is open. - config = 'target=("log:")' - dupc = self.session.open_cursor(None, bkup_c, config) - dup_logs = [] - while True: - ret = dupc.next() - if ret != 0: - break - newfile = dupc.get_key() - self.assertTrue("WiredTigerLog" in newfile) - sz = os.path.getsize(newfile) - if (newfile not in orig_logs): - self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - # Record all log files returned for later verification. - dup_logs.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - dupc.close() + dup_logs = self.take_log_backup(bkup_c, self.dir, orig_logs) bkup_c.close() # Add more data diff --git a/src/third_party/wiredtiger/test/suite/test_backup12.py b/src/third_party/wiredtiger/test/suite/test_backup12.py index 53ad7845634..7fe72dd43fe 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup12.py +++ b/src/third_party/wiredtiger/test/suite/test_backup12.py @@ -58,12 +58,12 @@ class test_backup12(backup_base): self.add_data(self.uri2, self.bigkey, self.bigval, True) self.add_data(self.uri_rem, self.bigkey, self.bigval, True) + os.mkdir(self.dir) + # # Open up the backup cursor. This causes a new log file to be created. # That log file is not part of the list returned. This is a full backup # primary cursor with incremental configured. - os.mkdir(self.dir) - # - # Note, this first backup is actually done before a checkpoint is taken. + # Note: this first backup is actually done before a checkpoint is taken. # config = 'incremental=(enabled,granularity=1M,this_id="ID1")' bkup_c = self.session.open_cursor('backup:', None, config) @@ -71,41 +71,15 @@ class test_backup12(backup_base): # Add more data while the backup cursor is open. self.add_data(self.uri, self.bigkey, self.bigval, True) - # Now copy the files returned by the backup cursor. - all_files = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - all_files.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + # Now make a full backup. + all_files = self.take_full_backup(self.dir, bkup_c) # Now open a duplicate backup cursor. # We *can* use a log target duplicate on an incremental primary backup so that # a backup process can get all the log files that occur while that primary cursor # is open. - config = 'target=("log:")' - dupc = self.session.open_cursor(None, bkup_c, config) - dup_logs = [] - while True: - ret = dupc.next() - if ret != 0: - break - newfile = dupc.get_key() - self.assertTrue("WiredTigerLog" in newfile) - sz = os.path.getsize(newfile) - if (newfile not in all_files): - self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - # Record all log files returned for later verification. - dup_logs.append(newfile) - all_files.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - dupc.close() + dup_logs = self.take_log_backup(bkup_c, self.dir, all_files) + all_files += dup_logs bkup_c.close() # Add more data. @@ -115,50 +89,9 @@ class test_backup12(backup_base): # Drop a table. self.session.drop(self.uri_rem) - # Now do an incremental backup. - config = 'incremental=(src_id="ID1",this_id="ID2")' - bkup_c = self.session.open_cursor('backup:', None, config) - self.pr('Open backup cursor ID1') - bkup_files = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - config = 'incremental=(file=' + newfile + ')' - self.pr('Open incremental cursor with ' + config) - dup_cnt = 0 - dupc = self.session.open_cursor(None, bkup_c, config) - bkup_files.append(newfile) - all_files.append(newfile) - while True: - ret = dupc.next() - if ret != 0: - break - incrlist = dupc.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - # 1 is WT_BACKUP_FILE - # 2 is WT_BACKUP_RANGE - self.assertTrue(curtype == 1 or curtype == 2) - if curtype == 1: - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - else: - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - rfp = open(newfile, "r+b") - wfp = open(self.dir + '/' + newfile, "w+b") - rfp.seek(offset, 0) - wfp.seek(offset, 0) - buf = rfp.read(size) - wfp.write(buf) - rfp.close() - wfp.close() - dup_cnt += 1 - dupc.close() - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() + # Now do an incremental backup with id 2. + (bkup_files, _) = self.take_incr_backup(self.dir, 2) + all_files += bkup_files # We need to remove files in the backup directory that are not in the current backup. all_set = set(all_files) diff --git a/src/third_party/wiredtiger/test/suite/test_backup13.py b/src/third_party/wiredtiger/test/suite/test_backup13.py index 8992440c038..73bc4aad9a5 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup13.py +++ b/src/third_party/wiredtiger/test/suite/test_backup13.py @@ -57,18 +57,18 @@ class test_backup13(backup_base): def simulate_crash_restart(self, olddir, newdir): ''' Simulate a crash from olddir and restart in newdir. ''' - # with the connection still open, copy files to new directory + # with the connection still open, copy files to new directory. shutil.rmtree(newdir, ignore_errors=True) os.mkdir(newdir) for fname in os.listdir(olddir): fullname = os.path.join(olddir, fname) - # Skip lock file on Windows since it is locked + # Skip lock file on Windows since it is locked. if os.path.isfile(fullname) and \ "WiredTiger.lock" not in fullname and \ "Tmplog" not in fullname and \ "Preplog" not in fullname: shutil.copy(fullname, newdir) - # close the original connection and open to new directory + # close the original connection and open to new directory. self.close_conn() self.conn = self.setUpConnectionOpen(newdir) self.session = self.setUpSessionOpen(self.conn) @@ -87,79 +87,26 @@ class test_backup13(backup_base): def test_backup13(self): self.session.create(self.uri, "key_format=S,value_format=S") self.add_data_and_check() + + os.mkdir(self.dir) + + # Add more data while the backup cursor is open. + self.add_data_and_check() + # Open up the backup cursor. This causes a new log file to be created. # That log file is not part of the list returned. This is a full backup # primary cursor with incremental configured. - os.mkdir(self.dir) config = 'incremental=(enabled,granularity=1M,this_id="ID1")' bkup_c = self.session.open_cursor('backup:', None, config) - # Add more data while the backup cursor is open. - self.add_data_and_check() - - # Now copy the files returned by the backup cursor. - all_files = [] - - # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - all_files.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + # Now make a full backup and track the files. + all_files = self.take_full_backup(self.dir, bkup_c) bkup_c.close() - # Add more data. self.add_data_and_check() - # Now do an incremental backup. - config = 'incremental=(src_id="ID1",this_id="ID2")' - bkup_c = self.session.open_cursor('backup:', None, config) - self.pr('Open backup cursor ID1') - bkup_files = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - config = 'incremental=(file=' + newfile + ')' - self.pr('Open incremental cursor with ' + config) - dup_cnt = 0 - dupc = self.session.open_cursor(None, bkup_c, config) - bkup_files.append(newfile) - all_files.append(newfile) - while True: - ret = dupc.next() - if ret != 0: - break - incrlist = dupc.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE) - if curtype == wiredtiger.WT_BACKUP_FILE: - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - else: - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - rfp = open(newfile, "r+b") - wfp = open(self.dir + '/' + newfile, "w+b") - rfp.seek(offset, 0) - wfp.seek(offset, 0) - buf = rfp.read(size) - wfp.write(buf) - rfp.close() - wfp.close() - dup_cnt += 1 - dupc.close() - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() + # Now do an incremental backup with id 2. + (bkup_files, _) = self.take_incr_backup(self.dir, 2) all_set = set(all_files) bkup_set = set(bkup_files) @@ -178,7 +125,6 @@ class test_backup13(backup_base): # Make sure after a force stop we cannot access old backup info. config = 'incremental=(src_id="ID1",this_id="ID3")' - self.assertRaises(wiredtiger.WiredTigerError, lambda: self.session.open_cursor('backup:', None, config)) diff --git a/src/third_party/wiredtiger/test/suite/test_backup14.py b/src/third_party/wiredtiger/test/suite/test_backup14.py index a4933140833..803dffd2562 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup14.py +++ b/src/third_party/wiredtiger/test/suite/test_backup14.py @@ -57,125 +57,8 @@ class test_backup14(backup_base): bigkey = 'Key' * 100 bigval = 'Value' * 100 - def take_full_backup(self): - if self.counter != 0: - hdir = self.home_full + '.' + str(self.counter) - else: - hdir = self.home_incr - - # - # First time through we take a full backup into the incremental directories. Otherwise only - # into the appropriate full directory. - # - buf = None - if self.initial_backup == True: - buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)' - - cursor = self.session.open_cursor('backup:', None, buf) - while True: - ret = cursor.next() - if ret != 0: - break - newfile = cursor.get_key() - - if self.counter == 0: - # Take a full backup into each incremental directory - for i in range(0, self.max_iteration): - copy_from = newfile - # If it is a log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath - else: - copy_to = self.home_incr + '.' + str(i) - shutil.copy(copy_from, copy_to) - else: - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = hdir + '/' + self.logpath - else: - copy_to = hdir - - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - cursor.close() - - def take_incr_backup(self): - # Open the backup data source for incremental backup. - buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '",this_id="ID' + str(self.counter) + '")' - bkup_c = self.session.open_cursor('backup:', None, buf) - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - h = self.home_incr + '.0' - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - - shutil.copy(copy_from, copy_to) - first = True - config = 'incremental=(file=' + newfile + ')' - dup_cnt = 0 - incr_c = self.session.open_cursor(None, bkup_c, config) - - # For each file listed, open a duplicate backup cursor and copy the blocks. - while True: - ret = incr_c.next() - if ret != 0: - break - incrlist = incr_c.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - # 1 is WT_BACKUP_FILE - # 2 is WT_BACKUP_RANGE - self.assertTrue(curtype == 1 or curtype == 2) - if curtype == 1: - if first == True: - h = self.home_incr + '.' + str(self.counter) - first = False - - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - else: - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - read_from = newfile - write_to = self.home_incr + '.' + str(self.counter) + '/' + newfile - rfp = open(read_from, "r+b") - wfp = open(write_to, "w+b") - rfp.seek(offset, 0) - wfp.seek(offset, 0) - buf = rfp.read(size) - wfp.write(buf) - rfp.close() - wfp.close() - dup_cnt += 1 - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - incr_c.close() - - # For each file, we want to copy the file into each of the later incremental directories - for i in range(self.counter, self.max_iteration): - h = self.home_incr + '.' + str(i) - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() - # - # Remove data from uri (table:main) + # Remove data from uri (table:main). # def remove_data(self): c = self.session.open_cursor(self.uri) @@ -191,7 +74,7 @@ class test_backup14(backup_base): self.assertEquals(c.remove(), 0) c.close() # Increase the counter so that later backups have unique ids. - self.counter += 1 + self.bkup_id += 1 # # This function will add records to the table (table:main), take incremental/full backups and @@ -202,15 +85,16 @@ class test_backup14(backup_base): self.initial_backup = True self.add_data(self.uri, self.bigkey, self.bigval) - self.take_full_backup() + self.take_full_backup(self.home_incr) self.initial_backup = False self.session.checkpoint() self.add_data(self.uri, self.bigkey, self.bigval) - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter)) + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) # # This function will remove all the records from table (table:main), take backup and validate the @@ -218,9 +102,10 @@ class test_backup14(backup_base): # def remove_all_records_validate(self): self.remove_data() - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter)) + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) # # This function will drop the existing table uri (table:main) that is part of the backups and @@ -231,18 +116,19 @@ class test_backup14(backup_base): # Drop main table. self.session.drop(self.uri) - # Create uri2 (table:extra) + # Create uri2 (table:extra). self.session.create(self.uri2, "key_format=S,value_format=S") self.new_table = True self.add_data(self.uri2, self.bigkey, self.bigval) - self.take_incr_backup() + self.take_incr_backup(self.home_incr) table_list = 'tablelist.txt' # Assert if the dropped table (table:main) exists in the incremental folder. self.runWt(['-R', '-h', self.home, 'list'], outfilename=table_list) ret = os.system("grep " + self.uri + " " + table_list) self.assertNotEqual(ret, 0, self.uri + " dropped, but table exists in " + self.home) + self.setup_directories(self.home_incr, self.home_full) # # This function will create previously dropped table uri (table:main) and add different content to @@ -251,9 +137,10 @@ class test_backup14(backup_base): def create_dropped_table_add_new_content(self): self.session.create(self.uri, "key_format=S,value_format=S") self.add_data(self.uri, self.bigkey, self.bigval) - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter)) + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) # # This function will insert bulk data in logged and not-logged table, take backups and validate the @@ -266,25 +153,27 @@ class test_backup14(backup_base): self.session.create(self.uri_logged, "key_format=S,value_format=S") self.add_data(self.uri_logged, self.bigkey, self.bigval) - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri_logged, self.home_full, self.home_incr, str(self.counter)) + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri_logged, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) # # Insert bulk data into uri4 (table:not_logged_table). # self.session.create(self.uri_not_logged, "key_format=S,value_format=S,log=(enabled=false)") self.add_data(self.uri_not_logged, self.bigkey, self.bigval) - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri_not_logged, self.home_full, self.home_incr, str(self.counter)) + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri_not_logged, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) def test_backup14(self): os.mkdir(self.bkp_home) self.home = self.bkp_home self.session.create(self.uri, "key_format=S,value_format=S") - self.setup_directories(self.max_iteration, self.home_incr, self.home_full, self.logpath) + self.setup_directories(self.home_incr, self.home_full) self.pr('*** Add data, checkpoint, take backups and validate ***') self.add_data_validate_backups() diff --git a/src/third_party/wiredtiger/test/suite/test_backup15.py b/src/third_party/wiredtiger/test/suite/test_backup15.py index 669618c151c..2cbb36d3a1b 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup15.py +++ b/src/third_party/wiredtiger/test/suite/test_backup15.py @@ -37,7 +37,7 @@ import glob # Test cursor backup with a block-based incremental cursor. class test_backup15(backup_base): bkp_home = "WT_BLOCK" - counter=0 + bkup_id=0 conn_config='cache_size=1G,log=(enabled,file_max=100K)' logmax="100K" max_iteration=5 @@ -53,153 +53,12 @@ class test_backup15(backup_base): logpath = "logpath" new_table=False - initial_backup=False pfx = 'test_backup' # Set the key and value big enough that we modify a few blocks. bigkey = 'Key' * 100 bigval = 'Value' * 100 - def range_copy(self, filename, offset, size): - read_from = filename - old_to = self.home_incr + '.' + str(self.counter - 1) + '/' + filename - write_to = self.home_incr + '.' + str(self.counter) + '/' + filename - rfp = open(read_from, "r+b") - self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size)) - rfp2 = open(old_to, "r+b") - rfp.seek(offset, 0) - rfp2.seek(offset, 0) - buf = rfp.read(size) - buf2 = rfp2.read(size) - # This assertion tests that the offset range we're given actually changed - # from the previous backup. - self.assertNotEqual(buf, buf2) - wfp = open(write_to, "w+b") - wfp.seek(offset, 0) - wfp.write(buf) - rfp.close() - rfp2.close() - wfp.close() - - def take_full_backup(self): - if self.counter != 0: - hdir = self.home_full + '.' + str(self.counter) - else: - hdir = self.home_incr - - # - # First time through we take a full backup into the incremental directories. Otherwise only - # into the appropriate full directory. - # - buf = None - if self.initial_backup == True: - buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)' - - bkup_c = self.session.open_cursor('backup:', None, buf) - # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - - if self.counter == 0: - # Take a full backup into each incremental directory - for i in range(0, self.max_iteration): - copy_from = newfile - # If it is a log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath - else: - copy_to = self.home_incr + '.' + str(i) - shutil.copy(copy_from, copy_to) - else: - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = hdir + '/' + self.logpath - else: - copy_to = hdir - - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() - - def take_incr_backup(self): - self.assertTrue(self.counter > 0) - # Open the backup data source for incremental backup. - buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '",this_id="ID' + str(self.counter) + '")' - self.pr(buf) - bkup_c = self.session.open_cursor('backup:', None, buf) - - # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - h = self.home_incr + '.0' - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - - shutil.copy(copy_from, copy_to) - first = True - config = 'incremental=(file=' + newfile + ')' - dup_cnt = 0 - # For each file listed, open a duplicate backup cursor and copy the blocks. - incr_c = self.session.open_cursor(None, bkup_c, config) - - # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = incr_c.next() - if ret != 0: - break - incrlist = incr_c.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE) - if curtype == wiredtiger.WT_BACKUP_FILE: - # Copy the whole file. - if first == True: - h = self.home_incr + '.' + str(self.counter) - first = False - - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - else: - # Copy the block range. - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - self.range_copy(newfile, offset, size) - dup_cnt += 1 - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - incr_c.close() - - # For each file, we want to copy it into each of the later incremental directories. - for i in range(self.counter, self.max_iteration): - h = self.home_incr + '.' + str(i) - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() # # Add data to the given uri. # @@ -228,23 +87,22 @@ class test_backup15(backup_base): self.mult += 1 # Increase the counter so that later backups have unique ids. if self.initial_backup == False: - self.counter += 1 + self.bkup_id += 1 def test_backup15(self): os.mkdir(self.bkp_home) self.home = self.bkp_home self.session.create(self.uri, "key_format=S,value_format=S") - self.setup_directories(self.max_iteration, self.home_incr, self.home_full, self.logpath) + self.setup_directories(self.home_incr, self.home_full) self.pr('*** Add data, checkpoint, take backups and validate ***') self.pr('Adding initial data') self.initial_backup = True self.add_complex_data(self.uri) - self.take_full_backup() + self.take_full_backup(self.home_incr) self.initial_backup = False self.session.checkpoint() - # Each call now to take a full backup will make a copy into a full directory. Then # each incremental will take an incremental backup and we can compare them. for i in range(1, self.max_iteration): @@ -253,12 +111,13 @@ class test_backup15(backup_base): # Swap the order of the full and incremental backups. It should not matter. They # should not interfere with each other. if i % 2 == 0: - self.take_full_backup() - self.take_incr_backup() + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) else: - self.take_incr_backup() - self.take_full_backup() - self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter)) + self.take_incr_backup(self.home_incr) + self.take_full_backup(self.home_full) + self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id)) + self.setup_directories(self.home_incr, self.home_full) if __name__ == '__main__': wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_backup16.py b/src/third_party/wiredtiger/test/suite/test_backup16.py index a589cbd9691..1d3395e91fc 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup16.py +++ b/src/third_party/wiredtiger/test/suite/test_backup16.py @@ -38,7 +38,6 @@ from wtscenario import make_scenarios class test_backup16(backup_base): conn_config='cache_size=1G,log=(enabled,file_max=100K)' - counter=1 logmax='100K' # Define the table name and its on-disk file name together. @@ -61,14 +60,14 @@ class test_backup16(backup_base): bigval = 'Value' * 10 mult = 1 - counter = 1 + bkup_id = 1 nops = 10 - + initial_backup = True def verify_incr_backup(self, expected_file_list): - bkup_config = ('incremental=(src_id="ID' + str(self.counter - 1) + - '",this_id="ID' + str(self.counter) + '")') + bkup_config = ('incremental=(src_id="ID' + str(self.bkup_id - 1) + + '",this_id="ID' + str(self.bkup_id) + '")') bkup_cur = self.session.open_cursor('backup:', None, bkup_config) - self.counter += 1 + self.bkup_id += 1 num_files = 0 # Verify the files included in the incremental backup are the ones we expect. diff --git a/src/third_party/wiredtiger/test/suite/test_backup17.py b/src/third_party/wiredtiger/test/suite/test_backup17.py index 04bff3ea9d8..3639bbef58e 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup17.py +++ b/src/third_party/wiredtiger/test/suite/test_backup17.py @@ -52,60 +52,21 @@ class test_backup17(backup_base): nops = 1000 - def take_incr_backup(self, id, consolidate): - # Open the backup data source for incremental backup. - buf = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"' - if consolidate: - buf += ',consolidate=true' - buf += ')' - bkup_c = self.session.open_cursor('backup:', None, buf) - lens = [] + # + # With a file length list, and the consolidate option is used, we expect the incremental + # backup to collapse adjacent blocks and return us lengths that exceed the granularity setting + # and verify that we see multiple blocks. If consolidate is not used, no block lengths should + # ever be greater than the granularity setting. + # + def check_consolidate_sizes(self, file_lens, consolidate): saw_multiple = False - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - config = 'incremental=(file=' + newfile + ')' - self.pr('Open incremental cursor with ' + config) - dup_cnt = 0 - dupc = self.session.open_cursor(None, bkup_c, config) - while True: - ret = dupc.next() - if ret != 0: - break - incrlist = dupc.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - # 1 is WT_BACKUP_FILE - # 2 is WT_BACKUP_RANGE - self.assertTrue(curtype == 1 or curtype == 2) - if curtype == 1: - self.pr('Copy from: ' + newfile + ' (' + str(size) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - else: - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - lens.append(size) - rfp = open(newfile, "r+b") - wfp = open(self.dir + '/' + newfile, "w+b") - rfp.seek(offset, 0) - wfp.seek(offset, 0) - if size > self.granval: - saw_multiple = True - buf = rfp.read(size) - wfp.write(buf) - rfp.close() - wfp.close() - dup_cnt += 1 - dupc.close() - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() + for size in file_lens: + if size > self.granval: + saw_multiple = True if consolidate: self.assertTrue(saw_multiple) else: self.assertFalse(saw_multiple) - return lens def test_backup17(self): @@ -115,25 +76,15 @@ class test_backup17(backup_base): self.mult = 0 self.add_data(self.uri2, self.bigkey, self.bigval, True) + os.mkdir(self.dir) # Open up the backup cursor. This causes a new log file to be created. # That log file is not part of the list returned. This is a full backup # primary cursor with incremental configured. - os.mkdir(self.dir) config = 'incremental=(enabled,granularity=%s,this_id="ID1")' % self.gran bkup_c = self.session.open_cursor('backup:', None, config) - # Now copy the files returned by the backup cursor. - all_files = [] - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - sz = os.path.getsize(newfile) - self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) - shutil.copy(newfile, self.dir) - all_files.append(newfile) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + # Now make a full backup and track the log files. + self.take_full_backup(self.dir, bkup_c) bkup_c.close() # This is the main part of the test for consolidate. Add data to the first table. @@ -143,12 +94,16 @@ class test_backup17(backup_base): self.mult = 1 self.add_data(self.uri, self.bigkey, self.bigval, True) - uri1_lens = self.take_incr_backup(2, False) + # Do an incremental backup with id 2. + (_, uri1_lens) = self.take_incr_backup(self.dir, 2, False) + self.check_consolidate_sizes(uri1_lens, False) self.mult = 1 self.add_data(self.uri2, self.bigkey, self.bigval, True) - uri2_lens = self.take_incr_backup(3, True) + # Now do an incremental backup with id 3. + (_, uri2_lens) = self.take_incr_backup(self.dir, 3, True) + self.check_consolidate_sizes(uri2_lens, True) # Assert that we recorded fewer lengths on the consolidated backup. self.assertLess(len(uri2_lens), len(uri1_lens)) diff --git a/src/third_party/wiredtiger/test/suite/test_backup19.py b/src/third_party/wiredtiger/test/suite/test_backup19.py index c94bf381790..20b43f39ea7 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup19.py +++ b/src/third_party/wiredtiger/test/suite/test_backup19.py @@ -37,7 +37,7 @@ import glob # Test cursor backup with a block-based incremental cursor source id only. class test_backup19(backup_base): bkp_home = "WT_BLOCK" - counter=0 + bkup_id=0 conn_config='cache_size=1G,log=(enabled,file_max=100K)' logmax="100K" mult=0 @@ -45,6 +45,7 @@ class test_backup19(backup_base): savefirst=0 savekey='NOTSET' uri="table:main" + max_iteration=2 dir='backup.dir' # Backup directory name home_full = "WT_BLOCK_LOG_FULL" @@ -52,154 +53,12 @@ class test_backup19(backup_base): logpath = "logpath" new_table=False - initial_backup=False pfx = 'test_backup' # Set the key and value big enough that we modify a few blocks. bigkey = 'Key' * 100 bigval = 'Value' * 100 - def range_copy(self, filename, offset, size): - read_from = filename - old_to = self.home_incr + '.' + str(self.counter - 1) + '/' + filename - write_to = self.home_incr + '.' + str(self.counter) + '/' + filename - rfp = open(read_from, "r+b") - self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size)) - rfp2 = open(old_to, "r+b") - rfp.seek(offset, 0) - rfp2.seek(offset, 0) - buf = rfp.read(size) - buf2 = rfp2.read(size) - # This assertion tests that the offset range we're given actually changed - # from the previous backup. - self.assertNotEqual(buf, buf2) - wfp = open(write_to, "w+b") - wfp.seek(offset, 0) - wfp.write(buf) - rfp.close() - rfp2.close() - wfp.close() - - def take_full_backup(self): - if self.counter != 0: - hdir = self.home_full + '.' + str(self.counter) - else: - hdir = self.home_incr - - # - # First time through we take a full backup into the incremental directories. Otherwise only - # into the appropriate full directory. - # - buf = None - if self.initial_backup == True: - buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)' - - bkup_c = self.session.open_cursor('backup:', None, buf) - # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - - if self.counter == 0: - # Take a full backup into each incremental directory - for i in range(0, 2): - copy_from = newfile - # If it is a log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath - else: - copy_to = self.home_incr + '.' + str(i) - shutil.copy(copy_from, copy_to) - else: - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = hdir + '/' + self.logpath - else: - copy_to = hdir - - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() - - def take_incr_backup(self): - self.assertTrue(self.counter > 0) - # Open the backup data source for incremental backup. - buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '")' - self.pr(buf) - bkup_c = self.session.open_cursor('backup:', None, buf) - - # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = bkup_c.next() - if ret != 0: - break - newfile = bkup_c.get_key() - h = self.home_incr + '.0' - copy_from = newfile - # If it is log file, prepend the path. - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - - shutil.copy(copy_from, copy_to) - first = True - config = 'incremental=(file=' + newfile + ')' - dup_cnt = 0 - # For each file listed, open a duplicate backup cursor and copy the blocks. - incr_c = self.session.open_cursor(None, bkup_c, config) - - # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have - # values and adding in get_values returns ENOTSUP and causes the usage to fail. - # If that changes then this, and the use of the duplicate below can change. - while True: - ret = incr_c.next() - if ret != 0: - break - incrlist = incr_c.get_keys() - offset = incrlist[0] - size = incrlist[1] - curtype = incrlist[2] - self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE) - if curtype == wiredtiger.WT_BACKUP_FILE: - # Copy the whole file. - if first == True: - h = self.home_incr + '.' + str(self.counter) - first = False - - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - else: - # Copy the block range. - self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) - self.range_copy(newfile, offset, size) - dup_cnt += 1 - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - incr_c.close() - - # For each file, we want to copy it into each of the later incremental directories. - for i in range(self.counter, 2): - h = self.home_incr + '.' + str(i) - copy_from = newfile - if ("WiredTigerLog" in newfile): - copy_to = h + '/' + self.logpath - else: - copy_to = h - shutil.copy(copy_from, copy_to) - self.assertEqual(ret, wiredtiger.WT_NOTFOUND) - bkup_c.close() - # # Add data to the given uri. # @@ -228,27 +87,28 @@ class test_backup19(backup_base): self.mult += 1 # Increase the counter so that later backups have unique ids. if self.initial_backup == False: - self.counter += 1 + self.bkup_id += 1 def test_backup19(self): os.mkdir(self.bkp_home) self.home = self.bkp_home self.session.create(self.uri, "key_format=S,value_format=S") - self.setup_directories(2, self.home_incr, self.home_full, self.logpath) + self.setup_directories(self.home_incr, self.home_full) self.pr('*** Add data, checkpoint, take backups and validate ***') self.pr('Adding initial data') self.initial_backup = True self.add_complex_data(self.uri) - self.take_full_backup() + self.take_full_backup(self.home_incr) self.initial_backup = False self.session.checkpoint() self.add_complex_data(self.uri) self.session.checkpoint() - self.take_full_backup() - self.take_incr_backup() - self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.counter)) + + self.take_full_backup(self.home_full) + self.take_incr_backup(self.home_incr) + self.compare_backups(self.uri, self.home_full, self.home_incr, str(self.bkup_id)) if __name__ == '__main__': wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py index ea88a33a066..948ed1b2a8f 100755 --- a/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py +++ b/src/third_party/wiredtiger/test/suite/test_rollback_to_stable14.py @@ -41,6 +41,9 @@ def timestamp_str(t): def mod_val(value, char, location, nbytes=1): return value[0:location] + char + value[location+nbytes:] +def append_val(value, char): + return value + char + def retry_rollback(self, name, txn_session, code): retry_limit = 100 retries = 0 @@ -300,5 +303,101 @@ class test_rollback_to_stable14(test_rollback_to_stable_base): # The test may output the following message in eviction under cache pressure. Ignore that. self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction") + def test_rollback_to_stable_same_ts_append(self): + nrows = 1500 + + # Create a table without logging. + self.pr("create/populate table") + uri = "table:rollback_to_stable14" + ds = SimpleDataSet( + self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)') + ds.populate() + + # Pin oldest and stable to timestamp 10. + self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) + + ',stable_timestamp=' + timestamp_str(10)) + + value_a = "aaaaa" * 100 + + value_modQ = append_val(value_a, 'Q') + value_modR = append_val(value_modQ, 'R') + value_modS = append_val(value_modR, 'S') + value_modT = append_val(value_modS, 'T') + + # Perform a combination of modifies and updates. + self.pr("large updates and modifies") + self.large_updates(uri, value_a, ds, nrows, 20) + self.large_modifies(uri, 'Q', ds, len(value_a), 1, nrows, 30) + # prepare cannot use same timestamp always, so use a different timestamps that are aborted. + if self.prepare: + self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, 51) + self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, 55) + self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, 60) + else: + self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, 60) + self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, 60) + self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, 60) + + # Verify data is visible and correct. + self.check(value_a, uri, nrows, 20) + self.check(value_modQ, uri, nrows, 30) + self.check(value_modT, uri, nrows, 60) + + self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50)) + + # Create a checkpoint thread + done = threading.Event() + ckpt = checkpoint_thread(self.conn, done) + try: + self.pr("start checkpoint") + ckpt.start() + + # Perform several modifies in parallel with checkpoint. + # Rollbacks may occur when checkpoint is running, so retry as needed. + self.pr("modifies") + retry_rollback(self, 'modify ds1, W', None, + lambda: self.large_modifies(uri, 'W', ds, len(value_modT), 1, nrows, 70)) + retry_rollback(self, 'modify ds1, X', None, + lambda: self.large_modifies(uri, 'X', ds, len(value_modT) + 1, 1, nrows, 80)) + retry_rollback(self, 'modify ds1, Y', None, + lambda: self.large_modifies(uri, 'Y', ds, len(value_modT) + 2, 1, nrows, 90)) + retry_rollback(self, 'modify ds1, Z', None, + lambda: self.large_modifies(uri, 'Z', ds, len(value_modT) + 3, 1, nrows, 100)) + finally: + done.set() + ckpt.join() + + # Simulate a server crash and restart. + self.pr("restart") + self.simulate_crash_restart(".", "RESTART") + self.pr("restart complete") + + stat_cursor = self.session.open_cursor('statistics:', None, None) + calls = stat_cursor[stat.conn.txn_rts][2] + hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2] + hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2] + hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2] + keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2] + keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2] + pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2] + upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2] + stat_cursor.close() + + self.assertEqual(calls, 0) + self.assertEqual(keys_removed, 0) + self.assertEqual(hs_restore_updates, nrows) + self.assertEqual(keys_restored, 0) + self.assertEqual(upd_aborted, 0) + self.assertGreater(pages_visited, 0) + self.assertGreaterEqual(hs_removed, nrows * 3) + self.assertGreaterEqual(hs_sweep, 0) + + # Check that the correct data is seen at and after the stable timestamp. + self.check(value_a, uri, nrows, 20) + self.check(value_modQ, uri, nrows, 30) + + # The test may output the following message in eviction under cache pressure. Ignore that. + self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction") + if __name__ == '__main__': wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_util18.py b/src/third_party/wiredtiger/test/suite/test_util18.py new file mode 100644 index 00000000000..c66bfca13b0 --- /dev/null +++ b/src/third_party/wiredtiger/test/suite/test_util18.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# +# Public Domain 2014-2021 MongoDB, Inc. +# Public Domain 2008-2014 WiredTiger, Inc. +# +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +import codecs, filecmp +from suite_subprocess import suite_subprocess +import wiredtiger, wttest +from wtscenario import make_scenarios + +# test_util18.py +# Utilities: wt printlog +class test_util18(wttest.WiredTigerTestCase, suite_subprocess): + tablename = 'test_util18.a' + uri = 'table:' + tablename + logmax = 100 + nentries = 5 + create_params = 'key_format=S,value_format=S' + key_prefix = 'KEY' + val_prefix = 'VAL' + + # Whether user data is redacted or printed. + print_user_data = [ + ('show_user_data', dict(print_user_data=True)), + ('no_user_data', dict(print_user_data=False)), + ] + + scenarios = make_scenarios(print_user_data) + + def conn_config(self): + return 'log=(archive=false,enabled,file_max=%dK)' % self.logmax + + # Populate our test table with data we can check against in the printlog output. + def populate(self): + cursor = self.session.open_cursor(self.uri, None) + for i in range(0, self.nentries): + key = self.key_prefix + str(i) + val = self.val_prefix + str(i) + cursor[key] = val + cursor.close() + + # Check the given printlog file reflects the data written by 'populate'. + def check_populated_printlog(self, log_file, expect_keyval, expect_keyval_hex): + for i in range(0, self.nentries): + key = self.key_prefix + str(i) + val = self.val_prefix + str(i) + # Check if the KEY/VAL commits exist in the log file. + if expect_keyval: + self.check_file_contains(log_file, '"key": "%s\\u0000"' % key) + self.check_file_contains(log_file, '"value": "%s\\u0000"' % val) + else: + self.check_file_not_contains(log_file, '"key": "%s\\u0000"' % key) + self.check_file_not_contains(log_file, '"value": "%s\\u0000"' % val) + + # Convert our KEY/VAL strings to their expected hex value. + hex_key = codecs.encode(key.encode(), 'hex') + val_key = codecs.encode(val.encode(), 'hex') + # Check if the KEY/VAL commits exist in the log file (in hex form). + if expect_keyval_hex: + self.check_file_contains(log_file, '"key-hex": "%s00"' % str(hex_key, 'ascii')) + self.check_file_contains(log_file, '"value-hex": "%s00"' % str(val_key, 'ascii')) + else: + self.check_file_not_contains(log_file, '"key-hex": "%s00"' % str(hex_key, 'ascii')) + self.check_file_not_contains(log_file, '"value-hex": "%s00"' % str(val_key, 'ascii')) + + def test_printlog_file(self): + """ + Run printlog on a populated table. + """ + self.session.create('table:' + self.tablename, self.create_params) + self.populate() + wt_args = ["printlog"] + # Append "-u" if we expect printlog to print user data. + if self.print_user_data: + wt_args.append("-u") + self.runWt(wt_args, outfilename='printlog.out') + self.check_non_empty_file('printlog.out') + self.check_populated_printlog('printlog.out', self.print_user_data, False) + + def test_printlog_hex_file(self): + """ + Run printlog with hexadecimal formatting on a populated table. + """ + self.session.create('table:' + self.tablename, self.create_params) + self.populate() + wt_args = ["printlog", "-x"] + # Append "-u" if we expect printlog to print user data. + if self.print_user_data: + wt_args.append("-u") + self.runWt(wt_args, outfilename='printlog-hex.out') + self.check_non_empty_file('printlog-hex.out') + self.check_populated_printlog('printlog-hex.out', self.print_user_data, self.print_user_data) + + def test_printlog_message(self): + """ + Run printlog with messages-only formatting on a populated table. + """ + self.session.create('table:' + self.tablename, self.create_params) + self.populate() + # Write a log message that we can specifically test the presence of. + log_message = "Test Message: %s" % self.tablename + self.session.log_printf(log_message) + wt_args = ["printlog", "-m"] + # Append "-u" if we expect printlog to print user data. + if self.print_user_data: + wt_args.append("-u") + self.runWt(wt_args, outfilename='printlog-message.out') + self.check_non_empty_file('printlog-message.out') + self.check_file_contains('printlog-message.out', log_message) + self.check_populated_printlog('printlog-message.out', False, False) + + def test_printlog_lsn_offset(self): + """ + Run printlog with an LSN offset provided. + """ + self.session.create('table:' + self.tablename, self.create_params) + self.populate() + + # Open a log cursor to accurately extract the first, second and last LSN from our + # log. + c = self.session.open_cursor("log:", None, None) + # Moving the cursor to the beginning of the file, extract our first LSN. + c.next() + first_lsn_keys = c.get_key() + # Moving the cursor, extract our second LSN. + c.next() + second_lsn_keys = c.get_key() + last_lsn_keys = [] + # Moving the cursor to the last available key, extract the last LSN value. + while c.next() == 0: + last_lsn_keys = c.get_key() + c.next() + c.close() + + # Construct the first, second and last LSN values, assuming the + # key elements follow the following sequence: [lsn.file, lsn.offset, opcount]. + first_lsn = '%s,%s' % (first_lsn_keys[0], first_lsn_keys[1]) + second_lsn = '%s,%s' % (second_lsn_keys[0], second_lsn_keys[1]) + last_lsn = '%s,%s' % (last_lsn_keys[0], last_lsn_keys[1]) + + # Test printlog on a bounded range that starts and ends on our first LSN record. In doing so we want + # to assert that other log records won't be printed e.g. the second LSN record. + wt_args = ["printlog", '-l %s,%s' % (first_lsn, first_lsn)] + self.runWt(wt_args, outfilename='printlog-lsn-offset.out') + self.check_file_contains('printlog-lsn-offset.out', '"lsn" : [%s]' % first_lsn) + self.check_file_not_contains('printlog-lsn-offset.out', '"lsn" : [%s]' % second_lsn) + self.check_populated_printlog('printlog-lsn-offset.out', False, False) + + # Test printlog from the starting LSN value to the end of the log. We expect to find the logs relating + # to the population of our table. + wt_args = ["printlog", '-l %s' % first_lsn] + # Append "-u" if we expect printlog to print user data. + if self.print_user_data: + wt_args.append("-u") + self.runWt(wt_args, outfilename='printlog-lsn-offset.out') + self.check_populated_printlog('printlog-lsn-offset.out', self.print_user_data, False) + + # Test that using LSN '1,0' and our first LSN value produce the same output when passed to printlog. + # We expect printing from LSN '1,0' (which should denote to the beginning of the first log file) + # is equivalent to printing from our first extracted LSN value to the last LSN value. + wt_args_beginning = ["printlog", '-l 1,0,%s' % last_lsn] + wt_args_first = ["printlog", '-l %s,%s' % (first_lsn, last_lsn)] + if self.print_user_data: + wt_args_beginning.append("-u") + wt_args_first.append("-u") + self.runWt(wt_args_beginning, outfilename='printlog-lsn-offset-beginning.out') + self.runWt(wt_args_first, outfilename='printlog-lsn-offset-first.out') + self.assertTrue(filecmp.cmp('printlog-lsn-offset-beginning.out', 'printlog-lsn-offset-first.out')) + +if __name__ == '__main__': + wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_util19.py b/src/third_party/wiredtiger/test/suite/test_util19.py new file mode 100644 index 00000000000..412e0eaeda4 --- /dev/null +++ b/src/third_party/wiredtiger/test/suite/test_util19.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# +# Public Domain 2014-2021 MongoDB, Inc. +# Public Domain 2008-2014 WiredTiger, Inc. +# +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +from suite_subprocess import suite_subprocess +import wiredtiger, wttest +from wtscenario import make_scenarios + +# test_util19.py +# Utilities: wt downgrade +class test_util19(wttest.WiredTigerTestCase, suite_subprocess): + tablename = 'test_util19.a' + uri = 'table:' + tablename + entries = 100 + log_max = "100K" + log_latest_compat = 5 + + create_release = [ + ('def', dict(create_rel='none')), + ('100', dict(create_rel="10.0")), + ('33', dict(create_rel="3.3")), + ('32', dict(create_rel="3.2")), + ('31', dict(create_rel="3.1")), + ('30', dict(create_rel="3.0")), + ('26', dict(create_rel="2.6")), + ] + + downgrade_release = [ + ('100_rel', dict(downgrade_rel="10.0", log_downgrade_compat=5)), + ('33_rel', dict(downgrade_rel="3.3", log_downgrade_compat=4)), + ('32_rel', dict(downgrade_rel="3.2", log_downgrade_compat=3)), + ('31_rel', dict(downgrade_rel="3.1", log_downgrade_compat=3)), + ('30_rel', dict(downgrade_rel="3.0", log_downgrade_compat=2)), + ('26_rel', dict(downgrade_rel="2.6", log_downgrade_compat=1)), + ] + + scenarios = make_scenarios(create_release, downgrade_release) + + def conn_config(self): + conf_str = 'log=(archive=false,enabled,file_max=%s),' % self.log_max + if (self.create_rel != 'none'): + conf_str += 'compatibility=(release="%s"),' % (self.create_rel) + return conf_str + + def test_downgrade(self): + """ + Run wt downgrade on our created database and test its new compatibility version. + """ + # Create the initial database at the compatibility level established by + # the connection config ('create_rel'). + self.session.create(self.uri, 'key_format=S,value_format=S') + c = self.session.open_cursor(self.uri, None) + # Populate the table to generate some log files. + for i in range(self.entries): + key = 'KEY' + str(i) + val = 'VAL' + str(i) + c[key] = val + c.close() + + # Call the downgrade utility to reconfigure our database with the specified compatibility version. + wt_config = 'log=(archive=false,enabled,file_max=%s),verbose=[log]' % self.log_max + downgrade_opt = '-V %s' % self.downgrade_rel + self.runWt(['-C', wt_config , 'downgrade', downgrade_opt], reopensession=False, outfilename='downgrade.out') + # Based on the downgrade version we can test if the corresponding log compatibility version + # has been set. + compat_str = '/WT_CONNECTION\.reconfigure: .*: COMPATIBILITY: Version now %d/' % self.log_downgrade_compat + if self.log_downgrade_compat != self.log_latest_compat: + self.check_file_contains('downgrade.out', compat_str) + else: + self.check_file_not_contains('downgrade.out', compat_str) + +if __name__ == '__main__': + wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_util20.py b/src/third_party/wiredtiger/test/suite/test_util20.py new file mode 100644 index 00000000000..851ea504b70 --- /dev/null +++ b/src/third_party/wiredtiger/test/suite/test_util20.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# +# Public Domain 2014-2021 MongoDB, Inc. +# Public Domain 2008-2014 WiredTiger, Inc. +# +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +from suite_subprocess import suite_subprocess +import wiredtiger, wttest +from wtdataset import SimpleDataSet, ComplexDataSet + +# test_util20.py +# Utilities: wt upgrade +class test_util20(wttest.WiredTigerTestCase, suite_subprocess): + name = 'test_util20.a' + create_params = 'key_format=S,value_format=S' + num_rows = 10 + + def test_upgrade_table_complex_data(self): + # Run wt upgrade on a complex dataset and test for successful completion. + uri = 'table:' + self.name + ComplexDataSet(self, uri, self.num_rows).populate() + self.runWt(['upgrade', uri]) + + def test_upgrade_table_simple_data(self): + # Run wt upgrade on a simple dataset and test for successful completion. + uri = 'table:' + self.name + SimpleDataSet(self, uri, self.num_rows).populate() + self.runWt(['upgrade', uri]) diff --git a/src/third_party/wiredtiger/test/suite/wtbackup.py b/src/third_party/wiredtiger/test/suite/wtbackup.py index f418d11ddff..8ac4dee2ce8 100644 --- a/src/third_party/wiredtiger/test/suite/wtbackup.py +++ b/src/third_party/wiredtiger/test/suite/wtbackup.py @@ -25,33 +25,38 @@ # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. -import os, glob +import os, glob, shutil import wttest, wiredtiger from suite_subprocess import suite_subprocess from helper import compare_files # Shared base class used by backup tests. class backup_base(wttest.WiredTigerTestCase, suite_subprocess): - cursor_config = None # a config string for cursors - mult = 0 # counter to have variance in data - nops = 100 # number of operations added to uri - - # We use counter to produce unique backup names for multiple iterations - # of incremental backup tests. - counter = 0 - # To determine whether to increase/decrease counter, which determines - initial_backup = True - # Used for populate function + data_cursor_config = None # a config string for cursors. + mult = 0 # counter to have variance in data. + nops = 100 # number of operations added to uri. + + # We use counter to produce unique backup ids for multiple iterations + # of incremental backup. + bkup_id = 0 + # Setup some of the backup tests, and increments the backup id. + initial_backup = False + # Used for populate function. rows = 100 populate_big = None + # Specify a logpath directory to be used to place wiredtiger log files. + logpath='' + # Temporary directory used to verify consistent data between multiple incremental backups. + home_tmp = "WT_TEST_TMP" + # # Add data to the given uri. # Allows the option for doing a session checkpoint after adding data. # def add_data(self, uri, key, val, do_checkpoint=False): assert(self.nops != 0) - c = self.session.open_cursor(uri, None, self.cursor_config) + c = self.session.open_cursor(uri, None, self.data_cursor_config) for i in range(0, self.nops): num = i + (self.mult * self.nops) k = key + str(num) @@ -62,7 +67,7 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess): self.session.checkpoint() # Increase the counter so that later backups have unique ids. if not self.initial_backup: - self.counter += 1 + self.bkup_id += 1 # Increase the multiplier so that later calls insert unique items. self.mult += 1 @@ -83,31 +88,36 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess): cg_config = i[3] i[1](self, i[0], self.rows, cgconfig = cg_config).populate() - # Backup needs a checkpoint + # Backup needs a checkpoint. if do_checkpoint: self.session.checkpoint() # - # Set up all the directories needed for the test. We have a full backup directory for each - # iteration and an incremental backup for each iteration. That way we can compare the full and - # incremental each time through. + # Set up all the directories needed for the test. We have a full backup directory, an incremental backup and + # temporary directory. The temp directory is used to hold updated data for incremental backups, and will overwrite + # the contents of the incremental directory when this function is called, to setup future backup calls. + # That way we can compare the full and incremental backup each time through. # - def setup_directories(self, max_iteration, home_incr, home_full, logpath): - for i in range(0, max_iteration): - # The log directory is a subdirectory of the home directory, - # creating that will make the home directory also. + # Note: The log directory is a subdirectory of the home directory, creating that will make the home directory also. + # The incremental backup function, copies the latest data into the temporary directory. + def setup_directories(self, home_incr, home_full): + # Create the temp directory, if the path doesn't exist + # as we only want to create this directory at the start + if not os.path.exists(self.home_tmp): + os.makedirs(self.home_tmp + '/' + self.logpath) - home_incr_dir = home_incr + '.' + str(i) - if os.path.exists(home_incr_dir): - os.remove(home_incr_dir) - os.makedirs(home_incr_dir + '/' + logpath) + if os.path.exists(home_full): + shutil.rmtree(home_full) + os.makedirs(home_full + '/' + self.logpath) - if i == 0: - continue - home_full_dir = home_full + '.' + str(i) - if os.path.exists(home_full_dir): - os.remove(home_full_dir) - os.makedirs(home_full_dir + '/' + logpath) + # If the incremental directory exists, then remove the contents of the directory + # and place all the contents of temporary directory into the incremental directory + # such that the test can now perform further incremental backups on the directory. + if os.path.exists(home_incr): + shutil.rmtree(home_incr) + shutil.copytree(self.home_tmp, self.home_incr) + else: + os.makedirs(home_incr + '/' + self.logpath) # # Check that a URI doesn't exist, both the meta-data and the file names. @@ -125,16 +135,57 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess): uri.split(":")[1] + '\" found') # + # Copy a file into given directory. + # + def copy_file(self, file, dir): + copy_from = file + # If it is log file, prepend the path. + if self.logpath and "WiredTigerLog" in file: + copy_to = dir + '/' + self.logpath + else: + copy_to = dir + shutil.copy(copy_from, copy_to) + + # + # Uses a backup cursor to perform a full backup, by iterating through the cursor + # grabbing files to copy over into a given directory. When dealing with a test + # that performs multiple incremental backups, we initially perform a full backup + # on each incremental directory as a starting base. + # Optional arguments: + # backup_cur: A backup cursor that can be given into the function, but function caller + # holds reponsibility of closing the cursor. + # + def take_full_backup(self, backup_dir, backup_cur=None): + self.pr('Full backup to ' + backup_dir + ': ') + bkup_c = backup_cur + if backup_cur == None: + config = None + if self.initial_backup: + config = 'incremental=(granularity=1M,enabled=true,this_id=ID0)' + bkup_c = self.session.open_cursor('backup:', None, config) + all_files = [] + # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while bkup_c.next() == 0: + newfile = bkup_c.get_key() + sz = os.path.getsize(newfile) + self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) + self.copy_file(newfile, backup_dir) + all_files.append(newfile) + if backup_cur == None: + bkup_c.close() + return all_files + + # # Compare against two directory paths using the wt dump command. - # The suffix allows the option to add distinctive tests adding suffix to both the output files and directories + # The suffix allows the option to add distinctive tests adding suffix to the output files. # - def compare_backups(self, uri, base_dir_home, other_dir_home, suffix = None): + def compare_backups(self, uri, base_dir, other_dir, suffix = None): sfx = "" if suffix != None: sfx = "." + suffix base_out = "./backup_base" + sfx - base_dir = base_dir_home + sfx - if os.path.exists(base_out): os.remove(base_out) @@ -144,6 +195,133 @@ class backup_base(wttest.WiredTigerTestCase, suite_subprocess): if os.path.exists(other_out): os.remove(other_out) # Run wt dump on incremental backup - other_dir = other_dir_home + sfx self.runWt(['-R', '-h', other_dir, 'dump', uri], outfilename=other_out) + self.pr("compare_files: " + base_out + ", " + other_out) self.assertEqual(True, compare_files(self, base_out, other_out)) + + # + # Perform a block range copy for a given offset and file. + # + def range_copy(self, filename, offset, size, backup_incr_dir): + read_from = filename + write_to = backup_incr_dir + '/' + filename + rfp = open(read_from, "rb") + rfp.seek(offset, 0) + buf = rfp.read(size) + # Perform between previous incremental directory, to check that + # the old file and the new file is different. + old_to = self.home_tmp + '/' + filename + if os.path.exists(old_to): + self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size)) + old_rfp = open(old_to, "rb") + old_rfp.seek(offset, 0) + old_buf = old_rfp.read(size) + old_rfp.close() + # This assertion tests that the offset range we're given actually changed + # from the previous backup. + self.assertNotEqual(buf, old_buf) + wfp = None + # Create file if the file doesn't exist. + if not os.path.exists(write_to): + wfp = open(write_to, "w+b") + else: + wfp = open(write_to, "r+b") + wfp.seek(offset, 0) + wfp.write(buf) + rfp.close() + wfp.close() + + # + # With a given backup cursor, open an incremental block cursor to copy the blocks of a + # given file. If the type of file is WT_BACKUP_FILE, perform full copy into given directory, + # otherwise if type of file is WT_BACKUP_RANGE, perform partial copy of the file using range copy. + # + # Note: we return the sizes of WT_BACKUP_RANGE type files for tests that check for consolidate config. + # + def take_incr_backup_block(self, bkup_c, newfile, backup_incr_dir): + config = 'incremental=(file=' + newfile + ')' + self.pr('Open incremental cursor with ' + config) + # For each file listed, open a duplicate backup cursor and copy the blocks. + incr_c = self.session.open_cursor(None, bkup_c, config) + # For consolidate + lens = [] + # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while incr_c.next() == 0: + incrlist = incr_c.get_keys() + offset = incrlist[0] + size = incrlist[1] + curtype = incrlist[2] + self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE) + if curtype == wiredtiger.WT_BACKUP_FILE: + sz = os.path.getsize(newfile) + self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + backup_incr_dir) + # Copy the whole file. + self.copy_file(newfile, backup_incr_dir) + else: + # Copy the block range. + self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) + self.range_copy(newfile, offset, size, backup_incr_dir) + lens.append(size) + incr_c.close() + return lens + + # + # Given a backup cursor, open a log cursor, and copy all log files that are not + # in the given log list. Return all the log files. + # + def take_log_backup(self, bkup_c, backup_dir, orig_logs, log_cursor=None): + # Now open a duplicate backup cursor. + dupc = log_cursor + if log_cursor == None: + config = 'target=("log:")' + dupc = self.session.open_cursor(None, bkup_c, config) + dup_logs = [] + while dupc.next() == 0: + newfile = dupc.get_key() + self.assertTrue("WiredTigerLog" in newfile) + sz = os.path.getsize(newfile) + if (newfile not in orig_logs): + self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + backup_dir) + shutil.copy(newfile, backup_dir) + # Record all log files returned for later verification. + dup_logs.append(newfile) + if log_cursor == None: + dupc.close() + return dup_logs + + # + # Open incremental backup cursor, with an id and iterate through all the files + # and perform incremental block copy for each of them. Returns the information about + # the backup files. + # + # Optional arguments: + # consolidate: Add consolidate option to the cursor. + # + def take_incr_backup(self, backup_incr_dir, id=0, consolidate=False): + self.assertTrue(id > 0 or self.bkup_id > 0) + if id == 0: + id = self.bkup_id + # Open the backup data source for incremental backup. + config = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"' + if consolidate: + config += ',consolidate=true' + config += ')' + self.pr("Incremental backup cursor with config " + config) + bkup_c = self.session.open_cursor('backup:', None, config) + + file_sizes = [] + file_names = [] + + # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while bkup_c.next() == 0: + newfile = bkup_c.get_key() + file_sizes += self.take_incr_backup_block(bkup_c, newfile, backup_incr_dir) + file_names.append(newfile) + # Copy into temp directory for tests that require further iterations of incremental backups. + self.copy_file(newfile, self.home_tmp) + bkup_c.close() + return (file_names, file_sizes) |