diff options
author | Chenhao Qu <chenhao.qu@mongodb.com> | 2020-11-04 21:59:17 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-11-04 22:42:45 +0000 |
commit | 359ebf7e91cacf9c80b7cd5f13d126fa85494ac1 (patch) | |
tree | 4d5fc8def5fc4f77d3fdbd7e5ced1641610f4ba1 | |
parent | bff7491483d50c11ef9f76382affa5eaed8d1b18 (diff) | |
download | mongo-359ebf7e91cacf9c80b7cd5f13d126fa85494ac1.tar.gz |
Import wiredtiger: b01325d9e9ae56e00c790c6ac53ef06a9493e079 from branch mongodb-5.0
ref: 5fb9ea60da..b01325d9e9
for: 4.9.0
WT-6722 Review function names in history store module
WT-6828 Fix doc link in README
WT-6830 Encode bytes before concatenating with string
WT-6835 Add API to consolidate incremental backup information
WT-6842 Add example showing using src_id only
WT-6844 Make force stop durable
22 files changed, 639 insertions, 83 deletions
diff --git a/src/third_party/wiredtiger/README b/src/third_party/wiredtiger/README index 6ddef162bd4..b70dd32fcf4 100644 --- a/src/third_party/wiredtiger/README +++ b/src/third_party/wiredtiger/README @@ -8,7 +8,7 @@ WiredTiger release packages and documentation can be found at: The documentation for this specific release can be found at: - https://source.wiredtiger.com/develop/index.html + https://source.wiredtiger.com/10.0.0/index.html The WiredTiger source code can be found at: diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py index 0df17bcfb1f..367fe924a0d 100644 --- a/src/third_party/wiredtiger/dist/api_data.py +++ b/src/third_party/wiredtiger/dist/api_data.py @@ -1281,6 +1281,12 @@ methods = { configure the cursor for block incremental backup usage. These formats are only compatible with the backup data source; see @ref backup''', type='category', subconfig=[ + Config('consolidate', 'false', r''' + causes block incremental backup information to be consolidated if adjacent + granularity blocks are modified. If false, information will be returned in + granularity sized blocks only. This must be set on the primary backup cursor and it + applies to all files for this backup''', + type='boolean'), Config('enabled', 'false', r''' whether to configure this backup as the starting point for a subsequent incremental backup''', diff --git a/src/third_party/wiredtiger/examples/c/ex_backup_block.c b/src/third_party/wiredtiger/examples/c/ex_backup_block.c index 96207444dd7..01d0431fd10 100644 --- a/src/third_party/wiredtiger/examples/c/ex_backup_block.c +++ b/src/third_party/wiredtiger/examples/c/ex_backup_block.c @@ -342,7 +342,8 @@ take_incr_backup(WT_SESSION *session, int i) /*! [Query existing IDs] */ /* Open the backup data source for incremental backup. */ - (void)snprintf(buf, sizeof(buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\")", i - 1, i); + (void)snprintf(buf, sizeof(buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\"%s)", i - 1, i, + i % 2 == 0 ? "" : ",consolidate=true"); error_check(session->open_cursor(session, "backup:", NULL, buf, &backup_cur)); rfd = wfd = -1; count = 0; diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data index f2830e4e5a7..9f744faf941 100644 --- a/src/third_party/wiredtiger/import.data +++ b/src/third_party/wiredtiger/import.data @@ -2,5 +2,5 @@ "vendor": "wiredtiger", "github": "wiredtiger/wiredtiger.git", "branch": "mongodb-5.0", - "commit": "5fb9ea60da1923d97a353a160f703da73c6f72fc" + "commit": "b01325d9e9ae56e00c790c6ac53ef06a9493e079" } diff --git a/src/third_party/wiredtiger/lang/python/wiredtiger/packing.py b/src/third_party/wiredtiger/lang/python/wiredtiger/packing.py index 83821f2657f..543326e022f 100755 --- a/src/third_party/wiredtiger/lang/python/wiredtiger/packing.py +++ b/src/third_party/wiredtiger/lang/python/wiredtiger/packing.py @@ -174,7 +174,10 @@ def pack(fmt, *values): if _is_string(val) and f in 'Ss': result += str(val[:l]).encode() else: - result += val[:l] + if type(val) is bytes: + result += val[:l] + else: + result += val[:l].encode() if f == 'S' and not havesize: result += x00 elif size > l and havesize: diff --git a/src/third_party/wiredtiger/src/btree/bt_vrfy.c b/src/third_party/wiredtiger/src/btree/bt_vrfy.c index 660652a8ab2..13abe891dd8 100644 --- a/src/third_party/wiredtiger/src/btree/bt_vrfy.c +++ b/src/third_party/wiredtiger/src/btree/bt_vrfy.c @@ -283,7 +283,7 @@ __wt_verify(WT_SESSION_IMPL *session, const char *cfg[]) if (ret == 0 && (ckpt + 1)->name == NULL && !skip_hs) { /* Open a history store cursor. */ WT_ERR(__wt_hs_cursor_open(session)); - WT_TRET(__wt_history_store_verify_one(session)); + WT_TRET(__wt_hs_verify_one(session)); WT_TRET(__wt_hs_cursor_close(session)); /* * We cannot error out here. If we got an error verifying the history store, we need diff --git a/src/third_party/wiredtiger/src/config/config_def.c b/src/third_party/wiredtiger/src/config/config_def.c index 69a33d7ac6a..edb45b25472 100644 --- a/src/third_party/wiredtiger/src/config/config_def.c +++ b/src/third_party/wiredtiger/src/config/config_def.c @@ -298,8 +298,8 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_log_flush[] = { {NULL, NULL, NULL, NULL, NULL, 0}}; static const WT_CONFIG_CHECK confchk_WT_SESSION_open_cursor_incremental_subconfigs[] = { - {"enabled", "boolean", NULL, NULL, NULL, 0}, {"file", "string", NULL, NULL, NULL, 0}, - {"force_stop", "boolean", NULL, NULL, NULL, 0}, + {"consolidate", "boolean", NULL, NULL, NULL, 0}, {"enabled", "boolean", NULL, NULL, NULL, 0}, + {"file", "string", NULL, NULL, NULL, 0}, {"force_stop", "boolean", NULL, NULL, NULL, 0}, {"granularity", "int", NULL, "min=4KB,max=2GB", NULL, 0}, {"src_id", "string", NULL, NULL, NULL, 0}, {"this_id", "string", NULL, NULL, NULL, 0}, {NULL, NULL, NULL, NULL, NULL, 0}}; @@ -309,7 +309,7 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_open_cursor[] = { {"checkpoint", "string", NULL, NULL, NULL, 0}, {"checkpoint_wait", "boolean", NULL, NULL, NULL, 0}, {"dump", "string", NULL, "choices=[\"hex\",\"json\",\"pretty\",\"print\"]", NULL, 0}, - {"incremental", "category", NULL, NULL, confchk_WT_SESSION_open_cursor_incremental_subconfigs, 6}, + {"incremental", "category", NULL, NULL, confchk_WT_SESSION_open_cursor_incremental_subconfigs, 7}, {"next_random", "boolean", NULL, NULL, NULL, 0}, {"next_random_sample_size", "string", NULL, NULL, NULL, 0}, {"overwrite", "boolean", NULL, NULL, NULL, 0}, {"raw", "boolean", NULL, NULL, NULL, 0}, @@ -926,11 +926,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator", {"WT_SESSION.log_printf", "", NULL, 0}, {"WT_SESSION.open_cursor", "append=false,bulk=false,checkpoint=,checkpoint_wait=true,dump=," - "incremental=(enabled=false,file=,force_stop=false," - "granularity=16MB,src_id=,this_id=),next_random=false," - "next_random_sample_size=0,overwrite=true,raw=false," - "read_once=false,readonly=false,skip_sort_check=false,statistics=" - ",target=", + "incremental=(consolidate=false,enabled=false,file=," + "force_stop=false,granularity=16MB,src_id=,this_id=)," + "next_random=false,next_random_sample_size=0,overwrite=true," + "raw=false,read_once=false,readonly=false,skip_sort_check=false," + "statistics=,target=", confchk_WT_SESSION_open_cursor, 15}, {"WT_SESSION.prepare_transaction", "prepare_timestamp=", confchk_WT_SESSION_prepare_transaction, 1}, diff --git a/src/third_party/wiredtiger/src/conn/conn_api.c b/src/third_party/wiredtiger/src/conn/conn_api.c index bc2ad006ab6..9b5e2394b61 100644 --- a/src/third_party/wiredtiger/src/conn/conn_api.c +++ b/src/third_party/wiredtiger/src/conn/conn_api.c @@ -2762,7 +2762,7 @@ wiredtiger_open(const char *home, WT_EVENT_HANDLER *event_handler, const char *c */ if (verify_meta) { WT_ERR(__wt_open_internal_session(conn, "verify hs", false, 0, &verify_session)); - ret = __wt_history_store_verify(verify_session); + ret = __wt_hs_verify(verify_session); WT_TRET(__wt_session_close_internal(verify_session)); WT_ERR(ret); } diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup.c b/src/third_party/wiredtiger/src/cursor/cur_backup.c index c9ba9d8d5fd..a32b5ddf974 100644 --- a/src/third_party/wiredtiger/src/cursor/cur_backup.c +++ b/src/third_party/wiredtiger/src/cursor/cur_backup.c @@ -195,6 +195,7 @@ __curbackup_close(WT_CURSOR *cursor) WT_CURSOR_BACKUP *cb; WT_DECL_RET; WT_SESSION_IMPL *session; + const char *cfg[3] = {NULL, NULL, NULL}; cb = (WT_CURSOR_BACKUP *)cursor; CURSOR_API_CALL_PREPARE_ALLOWED(cursor, session, close, NULL); @@ -204,6 +205,14 @@ err: __wt_verbose( session, WT_VERB_BACKUP, "%s", "Releasing resources from forced stop incremental"); __wt_backup_destroy(session); + /* + * We need to force a checkpoint to the metadata to make the force stop durable. Without it, + * the backup information could reappear if we crash and restart. + */ + cfg[0] = WT_CONFIG_BASE(session, WT_SESSION_checkpoint); + cfg[1] = "force=true"; + WT_WITH_DHANDLE(session, WT_SESSION_META_DHANDLE(session), + WT_WITH_METADATA_LOCK(session, ret = __wt_checkpoint(session, cfg))); } /* @@ -440,7 +449,7 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[ WT_DECL_ITEM(tmp); WT_DECL_RET; const char *uri; - bool incremental_config, is_dup, log_config, target_list; + bool consolidate, incremental_config, is_dup, log_config, target_list; *foundp = *incr_only = *log_only = false; @@ -466,6 +475,19 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[ } /* + * Consolidation can be on a per incremental basis or a per-file duplicate cursor basis. + */ + WT_RET(__wt_config_gets(session, cfg, "incremental.consolidate", &cval)); + consolidate = F_MASK(cb, WT_CURBACKUP_CONSOLIDATE); + if (cval.val) { + if (is_dup) + WT_RET_MSG(session, EINVAL, + "Incremental consolidation can only be specified on a primary backup cursor"); + F_SET(cb, WT_CURBACKUP_CONSOLIDATE); + incremental_config = true; + } + + /* * Specifying an incremental file means we're opening a duplicate backup cursor. */ WT_RET(__wt_config_gets(session, cfg, "incremental.file", &cval)); @@ -584,8 +606,11 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[ F_SET(cb, WT_CURBACKUP_INCR); } err: - if (ret != 0 && cb->incr_src != NULL) + if (ret != 0 && cb->incr_src != NULL) { F_CLR(cb->incr_src, WT_BLKINCR_INUSE); + F_CLR(cb, WT_CURBACKUP_CONSOLIDATE); + F_SET(cb, consolidate); + } __wt_scr_free(session, &tmp); return (ret); } diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c index c71676b2082..63ee8c0eb2c 100644 --- a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c +++ b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c @@ -111,8 +111,10 @@ __curbackup_incr_next(WT_CURSOR *cursor) WT_DECL_RET; WT_SESSION_IMPL *session; wt_off_t size; + uint64_t start_bitoff, total_len; uint32_t raw; const char *file; + bool found; cb = (WT_CURSOR_BACKUP *)cursor; btree = cb->incr_cursor == NULL ? NULL : CUR2BT(cb->incr_cursor); @@ -144,18 +146,7 @@ __curbackup_incr_next(WT_CURSOR *cursor) F_SET(cb, WT_CURBACKUP_INCR_INIT); __wt_cursor_set_key(cursor, 0, size, WT_BACKUP_FILE); } else { - if (F_ISSET(cb, WT_CURBACKUP_INCR_INIT)) { - /* Look for the next chunk that had modifications. */ - while (cb->bit_offset < cb->nbits) - if (__bit_test(cb->bitstring.mem, cb->bit_offset)) - break; - else - ++cb->bit_offset; - - /* We either have this object's incremental information or we're done. */ - if (cb->bit_offset >= cb->nbits) - WT_ERR(WT_NOTFOUND); - } else { + if (!F_ISSET(cb, WT_CURBACKUP_INCR_INIT)) { /* * We don't have this object's incremental information, and it's not a full file copy. * Get a list of the block modifications for the file. The block modifications are from @@ -186,8 +177,37 @@ __curbackup_incr_next(WT_CURSOR *cursor) WT_ERR(WT_NOTFOUND); } } - __wt_cursor_set_key(cursor, cb->offset + cb->granularity * cb->bit_offset++, - cb->granularity, WT_BACKUP_RANGE); + /* We have initialized incremental information. */ + start_bitoff = cb->bit_offset; + total_len = cb->granularity; + found = false; + /* The bit offset can be less than or equal to but never greater than the number of bits. */ + WT_ASSERT(session, cb->bit_offset <= cb->nbits); + /* Look for the next chunk that had modifications. */ + while (cb->bit_offset < cb->nbits) + if (__bit_test(cb->bitstring.mem, cb->bit_offset)) { + found = true; + /* + * Care must be taken to leave the bit_offset field set to the next offset bit so + * that the next call is set to the correct offset. + */ + start_bitoff = cb->bit_offset++; + if (F_ISSET(cb, WT_CURBACKUP_CONSOLIDATE)) { + while ( + cb->bit_offset < cb->nbits && __bit_test(cb->bitstring.mem, cb->bit_offset++)) + total_len += cb->granularity; + } + break; + } else + ++cb->bit_offset; + + /* We either have this object's incremental information or we're done. */ + if (!found) + WT_ERR(WT_NOTFOUND); + WT_ASSERT(session, cb->granularity != 0); + WT_ASSERT(session, total_len != 0); + __wt_cursor_set_key( + cursor, cb->offset + cb->granularity * start_bitoff, total_len, WT_BACKUP_RANGE); } done: @@ -249,6 +269,11 @@ __wt_curbackup_open_incr(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *o cb->incr_file, other_cb->incr_src->id_str); F_SET(cb, WT_CURBACKUP_FORCE_FULL); } + if (F_ISSET(other_cb, WT_CURBACKUP_CONSOLIDATE)) + F_SET(cb, WT_CURBACKUP_CONSOLIDATE); + else + F_CLR(cb, WT_CURBACKUP_CONSOLIDATE); + /* * Set up the incremental backup information, if we are not forcing a full file copy. We need an * open cursor on the file. Open the backup checkpoint, confirming it exists. diff --git a/src/third_party/wiredtiger/src/history/hs_rec.c b/src/third_party/wiredtiger/src/history/hs_rec.c index 9e4a5e9f2b6..bc15dc7a8c3 100644 --- a/src/third_party/wiredtiger/src/history/hs_rec.c +++ b/src/third_party/wiredtiger/src/history/hs_rec.c @@ -25,12 +25,12 @@ static int __hs_fixup_out_of_order_from_pos(WT_SESSION_IMPL *session, WT_CURSOR const WT_ITEM *srch_key); /* - * __hs_insert_updates_verbose -- + * __hs_verbose_cache_stats -- * Display a verbose message once per checkpoint with details about the cache state when * performing a history store table write. */ static void -__hs_insert_updates_verbose(WT_SESSION_IMPL *session, WT_BTREE *btree) +__hs_verbose_cache_stats(WT_SESSION_IMPL *session, WT_BTREE *btree) { WT_CACHE *cache; WT_CONNECTION_IMPL *conn; @@ -699,7 +699,7 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi) err: if (ret == 0 && insert_cnt > 0) - __hs_insert_updates_verbose(session, btree); + __hs_verbose_cache_stats(session, btree); __wt_scr_free(session, &key); /* modify_value is allocated in __wt_modify_pack. Free it if it is allocated. */ diff --git a/src/third_party/wiredtiger/src/history/hs_verify.c b/src/third_party/wiredtiger/src/history/hs_verify.c index d31c0c51afd..d5b0100dd95 100644 --- a/src/third_party/wiredtiger/src/history/hs_verify.c +++ b/src/third_party/wiredtiger/src/history/hs_verify.c @@ -9,13 +9,13 @@ #include "wt_internal.h" /* - * __verify_history_store_id -- + * __hs_verify_id -- * Verify the history store for a single btree. Given a cursor to the tree, walk all history * store keys. This function assumes any caller has already opened a cursor to the history * store. */ static int -__verify_history_store_id(WT_SESSION_IMPL *session, WT_CURSOR_BTREE *ds_cbt, uint32_t this_btree_id) +__hs_verify_id(WT_SESSION_IMPL *session, WT_CURSOR_BTREE *ds_cbt, uint32_t this_btree_id) { WT_CURSOR *hs_cursor; WT_CURSOR_BTREE *hs_cbt; @@ -104,12 +104,12 @@ err: } /* - * __wt_history_store_verify_one -- + * __wt_hs_verify_one -- * Verify the history store for the btree that is set up in this session. This must be called * when we are known to have exclusive access to the btree. */ int -__wt_history_store_verify_one(WT_SESSION_IMPL *session) +__wt_hs_verify_one(WT_SESSION_IMPL *session) { WT_CURSOR *hs_cursor; WT_CURSOR_BTREE ds_cbt; @@ -141,19 +141,19 @@ __wt_history_store_verify_one(WT_SESSION_IMPL *session) if (ret == 0) { __wt_btcur_init(session, &ds_cbt); __wt_btcur_open(&ds_cbt); - ret = __verify_history_store_id(session, &ds_cbt, btree_id); + ret = __hs_verify_id(session, &ds_cbt, btree_id); WT_TRET(__wt_btcur_close(&ds_cbt, false)); } return (ret == WT_NOTFOUND ? 0 : ret); } /* - * __wt_history_store_verify -- + * __wt_hs_verify -- * Verify the history store. There can't be an entry in the history store without having the * latest value for the respective key in the data store. */ int -__wt_history_store_verify(WT_SESSION_IMPL *session) +__wt_hs_verify(WT_SESSION_IMPL *session) { WT_CURSOR *ds_cursor, *hs_cursor; WT_DECL_ITEM(buf); @@ -198,7 +198,7 @@ __wt_history_store_verify(WT_SESSION_IMPL *session) } WT_ERR(__wt_open_cursor(session, uri_data, NULL, NULL, &ds_cursor)); F_SET(ds_cursor, WT_CURSOR_RAW_OK); - ret = __verify_history_store_id(session, (WT_CURSOR_BTREE *)ds_cursor, btree_id); + ret = __hs_verify_id(session, (WT_CURSOR_BTREE *)ds_cursor, btree_id); if (ret == WT_NOTFOUND) stop = true; WT_TRET(ds_cursor->close(ds_cursor)); diff --git a/src/third_party/wiredtiger/src/include/cursor.h b/src/third_party/wiredtiger/src/include/cursor.h index a5f0ca58fa8..8cd3e211f13 100644 --- a/src/third_party/wiredtiger/src/include/cursor.h +++ b/src/third_party/wiredtiger/src/include/cursor.h @@ -62,15 +62,16 @@ struct __wt_cursor_backup { /* AUTOMATIC FLAG VALUE GENERATION START */ #define WT_CURBACKUP_CKPT_FAKE 0x001u /* Object has fake checkpoint */ -#define WT_CURBACKUP_DUP 0x002u /* Duplicated backup cursor */ -#define WT_CURBACKUP_FORCE_FULL 0x004u /* Force full file copy for this cursor */ -#define WT_CURBACKUP_FORCE_STOP 0x008u /* Force stop incremental backup */ -#define WT_CURBACKUP_HAS_CB_INFO 0x010u /* Object has checkpoint backup info */ -#define WT_CURBACKUP_INCR 0x020u /* Incremental backup cursor */ -#define WT_CURBACKUP_INCR_INIT 0x040u /* Cursor traversal initialized */ -#define WT_CURBACKUP_LOCKER 0x080u /* Hot-backup started */ -#define WT_CURBACKUP_QUERYID 0x100u /* Backup cursor for incremental ids */ -#define WT_CURBACKUP_RENAME 0x200u /* Object had a rename */ +#define WT_CURBACKUP_CONSOLIDATE 0x002u /* Consolidate returned info on this object */ +#define WT_CURBACKUP_DUP 0x004u /* Duplicated backup cursor */ +#define WT_CURBACKUP_FORCE_FULL 0x008u /* Force full file copy for this cursor */ +#define WT_CURBACKUP_FORCE_STOP 0x010u /* Force stop incremental backup */ +#define WT_CURBACKUP_HAS_CB_INFO 0x020u /* Object has checkpoint backup info */ +#define WT_CURBACKUP_INCR 0x040u /* Incremental backup cursor */ +#define WT_CURBACKUP_INCR_INIT 0x080u /* Cursor traversal initialized */ +#define WT_CURBACKUP_LOCKER 0x100u /* Hot-backup started */ +#define WT_CURBACKUP_QUERYID 0x200u /* Backup cursor for incremental ids */ +#define WT_CURBACKUP_RENAME 0x400u /* Object had a rename */ /* AUTOMATIC FLAG VALUE GENERATION STOP */ uint32_t flags; }; diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h index 724d59514df..10d5a737718 100644 --- a/src/third_party/wiredtiger/src/include/extern.h +++ b/src/third_party/wiredtiger/src/include/extern.h @@ -732,10 +732,6 @@ extern int __wt_hex2byte(const u_char *from, u_char *to) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_hex_to_raw(WT_SESSION_IMPL *session, const char *from, WT_ITEM *to) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); -extern int __wt_history_store_verify(WT_SESSION_IMPL *session) - WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); -extern int __wt_history_store_verify_one(WT_SESSION_IMPL *session) - WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_hs_config(WT_SESSION_IMPL *session, const char **cfg) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_hs_cursor_cache(WT_SESSION_IMPL *session) @@ -769,6 +765,10 @@ extern int __wt_hs_open(WT_SESSION_IMPL *session, const char **cfg) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_hs_row_search(WT_CURSOR_BTREE *hs_cbt, WT_ITEM *srch_key, bool insert) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); +extern int __wt_hs_verify(WT_SESSION_IMPL *session) + WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); +extern int __wt_hs_verify_one(WT_SESSION_IMPL *session) + WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_huffman_decode(WT_SESSION_IMPL *session, void *huffman_arg, const uint8_t *from_arg, size_t from_len, WT_ITEM *to_buf) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result)); extern int __wt_huffman_encode(WT_SESSION_IMPL *session, void *huffman_arg, const uint8_t *from_arg, diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in index 7d4aed9df66..008f194e815 100644 --- a/src/third_party/wiredtiger/src/include/wiredtiger.in +++ b/src/third_party/wiredtiger/src/include/wiredtiger.in @@ -877,29 +877,33 @@ struct __wt_session { * @config{incremental = (, configure the cursor for block incremental backup usage. These * formats are only compatible with the backup data source; see @ref backup., a set of * related configuration options defined below.} - * @config{ enabled, - * whether to configure this backup as the starting point for a subsequent incremental - * backup., a boolean flag; default \c false.} - * @config{ file, the - * file name when opening a duplicate incremental backup cursor. That duplicate cursor will - * return the block modifications relevant to the given file name., a string; default - * empty.} - * @config{ force_stop, causes all block incremental backup - * information to be released. This is on an open_cursor call and the resources will be - * released when this cursor is closed. No other operations should be done on this open - * cursor., a boolean flag; default \c false.} - * @config{ granularity, - * this setting manages the granularity of how WiredTiger maintains modification maps - * internally. The larger the granularity\, the smaller amount of information WiredTiger - * need to maintain., an integer between 4KB and 2GB; default \c 16MB.} - * @config{ src_id, a string that identifies a previous checkpoint - * backup source as the source of this incremental backup. This identifier must have - * already been created by use of the 'this_id' configuration in an earlier backup. A - * source id is required to begin an incremental backup., a string; default empty.} - * @config{ this_id, a string that identifies the current system - * state as a future backup source for an incremental backup via 'src_id'. This identifier - * is required when opening an incremental backup cursor and an error will be returned if - * one is not provided., a string; default empty.} + * @config{ + * consolidate, causes block incremental backup information to be consolidated if adjacent + * granularity blocks are modified. If false\, information will be returned in granularity + * sized blocks only. This must be set on the primary backup cursor and it applies to all + * files for this backup., a boolean flag; default \c false.} + * @config{ enabled, whether to configure this backup as the starting + * point for a subsequent incremental backup., a boolean flag; default \c false.} + * @config{ file, the file name when opening a duplicate incremental + * backup cursor. That duplicate cursor will return the block modifications relevant to the + * given file name., a string; default empty.} + * @config{ force_stop, + * causes all block incremental backup information to be released. This is on an + * open_cursor call and the resources will be released when this cursor is closed. No other + * operations should be done on this open cursor., a boolean flag; default \c false.} + * @config{ granularity, this setting manages the granularity of how + * WiredTiger maintains modification maps internally. The larger the granularity\, the + * smaller amount of information WiredTiger need to maintain., an integer between 4KB and + * 2GB; default \c 16MB.} + * @config{ src_id, a string that identifies a + * previous checkpoint backup source as the source of this incremental backup. This + * identifier must have already been created by use of the 'this_id' configuration in an + * earlier backup. A source id is required to begin an incremental backup., a string; + * default empty.} + * @config{ this_id, a string that identifies the + * current system state as a future backup source for an incremental backup via 'src_id'. + * This identifier is required when opening an incremental backup cursor and an error will + * be returned if one is not provided., a string; default empty.} * @config{ ),,} * @config{next_random, configure the cursor to return a pseudo-random record from the * object when the WT_CURSOR::next method is called; valid only for row-store cursors. See diff --git a/src/third_party/wiredtiger/test/format/backup.c b/src/third_party/wiredtiger/test/format/backup.c index 8a358343aeb..12b657ca576 100644 --- a/src/third_party/wiredtiger/test/format/backup.c +++ b/src/third_party/wiredtiger/test/format/backup.c @@ -574,9 +574,10 @@ backup(void *arg) else active_now = &active[0]; src_id = g.backup_id - 1; + /* Use consolidation too. */ testutil_check(__wt_snprintf(cfg, sizeof(cfg), - "incremental=(enabled,src_id=%" PRIu64 ",this_id=%" PRIu64 ")", src_id, - g.backup_id)); + "incremental=(enabled,consolidate=true,src_id=%" PRIu64 ",this_id=%" PRIu64 ")", + src_id, g.backup_id)); /* Restart a full incremental every once in a while. */ full = false; incr_full = mmrand(NULL, 1, 8) == 1; diff --git a/src/third_party/wiredtiger/test/suite/test_backup11.py b/src/third_party/wiredtiger/test/suite/test_backup11.py index d9a8b657620..3e6bd347b03 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup11.py +++ b/src/third_party/wiredtiger/test/suite/test_backup11.py @@ -135,6 +135,16 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess): self.pr("Opened backup for error testing") # Now test all the error cases with an incremental primary open. + # - We cannot specify consolidate on the duplicate cursor. + config = 'incremental=(consolidate=true,file=test.wt)' + msg = "/consolidation can only be specified on a primary/" + self.pr("Test consolidation on a dup") + self.pr("=========") + # Test multiple duplicate backup cursors. + self.assertRaisesWithMessage(wiredtiger.WiredTigerError, + lambda:self.assertEquals(self.session.open_cursor(None, + bkup_c, config), 0), msg) + # - We cannot make multiple incremental duplcate backup cursors. # - We cannot duplicate the duplicate backup cursor. config = 'incremental=(file=test.wt)' diff --git a/src/third_party/wiredtiger/test/suite/test_backup13.py b/src/third_party/wiredtiger/test/suite/test_backup13.py index 46d360c09bb..10faed087ca 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup13.py +++ b/src/third_party/wiredtiger/test/suite/test_backup13.py @@ -48,6 +48,24 @@ class test_backup13(wttest.WiredTigerTestCase, suite_subprocess): bigkey = 'Key' * 100 bigval = 'Value' * 100 + def simulate_crash_restart(self, olddir, newdir): + ''' Simulate a crash from olddir and restart in newdir. ''' + # with the connection still open, copy files to new directory + shutil.rmtree(newdir, ignore_errors=True) + os.mkdir(newdir) + for fname in os.listdir(olddir): + fullname = os.path.join(olddir, fname) + # Skip lock file on Windows since it is locked + if os.path.isfile(fullname) and \ + "WiredTiger.lock" not in fullname and \ + "Tmplog" not in fullname and \ + "Preplog" not in fullname: + shutil.copy(fullname, newdir) + # close the original connection and open to new directory + self.close_conn() + self.conn = self.setUpConnectionOpen(newdir) + self.session = self.setUpSessionOpen(self.conn) + def add_data(self, uri): c = self.session.open_cursor(uri) for i in range(0, self.nops): @@ -156,8 +174,15 @@ class test_backup13(wttest.WiredTigerTestCase, suite_subprocess): # Make sure after a force stop we cannot access old backup info. config = 'incremental=(src_id="ID1",this_id="ID3")' + + self.assertRaises(wiredtiger.WiredTigerError, + lambda: self.session.open_cursor('backup:', None, config)) + + # Make sure after a crash we cannot access old backup info. + self.simulate_crash_restart(".", "RESTART") self.assertRaises(wiredtiger.WiredTigerError, lambda: self.session.open_cursor('backup:', None, config)) + self.reopen_conn() # Make sure after a restart we cannot access old backup info. self.assertRaises(wiredtiger.WiredTigerError, diff --git a/src/third_party/wiredtiger/test/suite/test_backup14.py b/src/third_party/wiredtiger/test/suite/test_backup14.py index c312020bcef..67ebe68a8af 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup14.py +++ b/src/third_party/wiredtiger/test/suite/test_backup14.py @@ -106,10 +106,10 @@ class test_backup14(wttest.WiredTigerTestCase, suite_subprocess): newfile = cursor.get_key() if self.counter == 0: - # Take a full bakcup into each incremental directory + # Take a full backup into each incremental directory for i in range(0, self.max_iteration): copy_from = newfile - # If it is log file, prepend the path. + # If it is a log file, prepend the path. if ("WiredTigerLog" in newfile): copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath else: diff --git a/src/third_party/wiredtiger/test/suite/test_backup15.py b/src/third_party/wiredtiger/test/suite/test_backup15.py index 0327abe9838..509efcacb4f 100644 --- a/src/third_party/wiredtiger/test/suite/test_backup15.py +++ b/src/third_party/wiredtiger/test/suite/test_backup15.py @@ -124,10 +124,10 @@ class test_backup15(wttest.WiredTigerTestCase, suite_subprocess): newfile = bkup_c.get_key() if self.counter == 0: - # Take a full bakcup into each incremental directory + # Take a full backup into each incremental directory for i in range(0, self.max_iteration): copy_from = newfile - # If it is log file, prepend the path. + # If it is a log file, prepend the path. if ("WiredTigerLog" in newfile): copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath else: diff --git a/src/third_party/wiredtiger/test/suite/test_backup17.py b/src/third_party/wiredtiger/test/suite/test_backup17.py new file mode 100644 index 00000000000..5fa250fd485 --- /dev/null +++ b/src/third_party/wiredtiger/test/suite/test_backup17.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# +# Public Domain 2014-2020 MongoDB, Inc. +# Public Domain 2008-2014 WiredTiger, Inc. +# +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +import wiredtiger, wttest +import os, shutil +from helper import compare_files +from suite_subprocess import suite_subprocess +from wtdataset import simple_key +from wtscenario import make_scenarios + +# test_backup17.py +# Test cursor backup with a block-based incremental cursor and consolidate. +class test_backup17(wttest.WiredTigerTestCase, suite_subprocess): + dir='backup.dir' # Backup directory name + gran="100K" + granval=100*1024 + logmax="100K" + uri="table:test" + uri2="table:test2" + nops=1000 + mult=0 + + conn_config='cache_size=1G,log=(enabled,file_max=%s)' % logmax + + pfx = 'test_backup' + # Set the key and value big enough that we modify a few blocks. + bigkey = 'Key' * 100 + bigval = 'Value' * 100 + + def add_data(self, uri): + c = self.session.open_cursor(uri) + for i in range(0, self.nops): + num = i + (self.mult * self.nops) + key = self.bigkey + str(num) + val = self.bigval + str(num) + c[key] = val + self.session.checkpoint() + c.close() + + def take_incr_backup(self, id, consolidate): + # Open the backup data source for incremental backup. + buf = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"' + if consolidate: + buf += ',consolidate=true' + buf += ')' + bkup_c = self.session.open_cursor('backup:', None, buf) + lens = [] + saw_multiple = False + while True: + ret = bkup_c.next() + if ret != 0: + break + newfile = bkup_c.get_key() + config = 'incremental=(file=' + newfile + ')' + self.pr('Open incremental cursor with ' + config) + dup_cnt = 0 + dupc = self.session.open_cursor(None, bkup_c, config) + while True: + ret = dupc.next() + if ret != 0: + break + incrlist = dupc.get_keys() + offset = incrlist[0] + size = incrlist[1] + curtype = incrlist[2] + # 1 is WT_BACKUP_FILE + # 2 is WT_BACKUP_RANGE + self.assertTrue(curtype == 1 or curtype == 2) + if curtype == 1: + self.pr('Copy from: ' + newfile + ' (' + str(size) + ') to ' + self.dir) + shutil.copy(newfile, self.dir) + else: + self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) + lens.append(size) + rfp = open(newfile, "r+b") + wfp = open(self.dir + '/' + newfile, "w+b") + rfp.seek(offset, 0) + wfp.seek(offset, 0) + if size > self.granval: + saw_multiple = True + buf = rfp.read(size) + wfp.write(buf) + rfp.close() + wfp.close() + dup_cnt += 1 + dupc.close() + self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + bkup_c.close() + if consolidate: + self.assertTrue(saw_multiple) + else: + self.assertFalse(saw_multiple) + return lens + + def test_backup17(self): + + self.session.create(self.uri, "key_format=S,value_format=S") + self.session.create(self.uri2, "key_format=S,value_format=S") + self.add_data(self.uri) + self.add_data(self.uri2) + self.mult += 1 + + # Open up the backup cursor. This causes a new log file to be created. + # That log file is not part of the list returned. This is a full backup + # primary cursor with incremental configured. + os.mkdir(self.dir) + config = 'incremental=(enabled,granularity=%s,this_id="ID1")' % self.gran + bkup_c = self.session.open_cursor('backup:', None, config) + + # Now copy the files returned by the backup cursor. + all_files = [] + while True: + ret = bkup_c.next() + if ret != 0: + break + newfile = bkup_c.get_key() + sz = os.path.getsize(newfile) + self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir) + shutil.copy(newfile, self.dir) + all_files.append(newfile) + self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + bkup_c.close() + + # This is the main part of the test for consolidate. Add data to the first table. + # Then perform the incremental backup with consolidate off (the default). Then add the + # same data to the second table. Perform an incremental backup with consolidate on and + # verify we get fewer, consolidated values. + self.add_data(self.uri) + uri1_lens = self.take_incr_backup(2, False) + + self.add_data(self.uri2) + uri2_lens = self.take_incr_backup(3, True) + + # Assert that we recorded fewer lengths on the consolidated backup. + self.assertLess(len(uri2_lens), len(uri1_lens)) + # Assert that we recorded the same total data length for both. + self.assertEqual(sum(uri2_lens), sum(uri1_lens)) + +if __name__ == '__main__': + wttest.run() diff --git a/src/third_party/wiredtiger/test/suite/test_backup19.py b/src/third_party/wiredtiger/test/suite/test_backup19.py new file mode 100644 index 00000000000..547445cafbe --- /dev/null +++ b/src/third_party/wiredtiger/test/suite/test_backup19.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python +# +# Public Domain 2014-2020 MongoDB, Inc. +# Public Domain 2008-2014 WiredTiger, Inc. +# +# This is free and unencumbered software released into the public domain. +# +# Anyone is free to copy, modify, publish, use, compile, sell, or +# distribute this software, either in source code form or as a compiled +# binary, for any purpose, commercial or non-commercial, and by any +# means. +# +# In jurisdictions that recognize copyright laws, the author or authors +# of this software dedicate any and all copyright interest in the +# software to the public domain. We make this dedication for the benefit +# of the public at large and to the detriment of our heirs and +# successors. We intend this dedication to be an overt act of +# relinquishment in perpetuity of all present and future rights to this +# software under copyright law. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +import wiredtiger, wttest +import os, shutil +from helper import compare_files +from suite_subprocess import suite_subprocess +from wtdataset import simple_key +from wtscenario import make_scenarios +import glob + +# test_backup19.py +# Test cursor backup with a block-based incremental cursor source id only. +class test_backup19(wttest.WiredTigerTestCase, suite_subprocess): + bkp_home = "WT_BLOCK" + counter=0 + conn_config='cache_size=1G,log=(enabled,file_max=100K)' + logmax="100K" + mult=0 + nops=10000 + savefirst=0 + savekey='NOTSET' + uri="table:main" + + dir='backup.dir' # Backup directory name + home_full = "WT_BLOCK_LOG_FULL" + home_incr = "WT_BLOCK_LOG_INCR" + + full_out = "./backup_block_full" + incr_out = "./backup_block_incr" + logpath = "logpath" + new_table=False + initial_backup=False + + pfx = 'test_backup' + # Set the key and value big enough that we modify a few blocks. + bigkey = 'Key' * 100 + bigval = 'Value' * 100 + + # + # Set up all the directories needed for the test. We have a full backup directory for each + # iteration and an incremental backup for each iteration. That way we can compare the full and + # incremental each time through. + # + def setup_directories(self): + # We're only coming through once so just set up the 0 and 1 directories. + for i in range(0, 2): + # The log directory is a subdirectory of the home directory, + # creating that will make the home directory also. + log_dir = self.home_incr + '.' + str(i) + '/' + self.logpath + os.makedirs(log_dir) + if i != 0: + log_dir = self.home_full + '.' + str(i) + '/' + self.logpath + os.makedirs(log_dir) + + def range_copy(self, filename, offset, size): + read_from = filename + old_to = self.home_incr + '.' + str(self.counter - 1) + '/' + filename + write_to = self.home_incr + '.' + str(self.counter) + '/' + filename + rfp = open(read_from, "r+b") + self.pr('RANGE CHECK file ' + old_to + ' offset ' + str(offset) + ' len ' + str(size)) + rfp2 = open(old_to, "r+b") + rfp.seek(offset, 0) + rfp2.seek(offset, 0) + buf = rfp.read(size) + buf2 = rfp2.read(size) + # This assertion tests that the offset range we're given actually changed + # from the previous backup. + self.assertNotEqual(buf, buf2) + wfp = open(write_to, "w+b") + wfp.seek(offset, 0) + wfp.write(buf) + rfp.close() + rfp2.close() + wfp.close() + + def take_full_backup(self): + if self.counter != 0: + hdir = self.home_full + '.' + str(self.counter) + else: + hdir = self.home_incr + + # + # First time through we take a full backup into the incremental directories. Otherwise only + # into the appropriate full directory. + # + buf = None + if self.initial_backup == True: + buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)' + + bkup_c = self.session.open_cursor('backup:', None, buf) + # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while True: + ret = bkup_c.next() + if ret != 0: + break + newfile = bkup_c.get_key() + + if self.counter == 0: + # Take a full backup into each incremental directory + for i in range(0, 2): + copy_from = newfile + # If it is a log file, prepend the path. + if ("WiredTigerLog" in newfile): + copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath + else: + copy_to = self.home_incr + '.' + str(i) + shutil.copy(copy_from, copy_to) + else: + copy_from = newfile + # If it is log file, prepend the path. + if ("WiredTigerLog" in newfile): + copy_to = hdir + '/' + self.logpath + else: + copy_to = hdir + + shutil.copy(copy_from, copy_to) + self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + bkup_c.close() + + def take_incr_backup(self): + self.assertTrue(self.counter > 0) + # Open the backup data source for incremental backup. + buf = 'incremental=(src_id="ID' + str(self.counter - 1) + '")' + self.pr(buf) + bkup_c = self.session.open_cursor('backup:', None, buf) + + # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while True: + ret = bkup_c.next() + if ret != 0: + break + newfile = bkup_c.get_key() + h = self.home_incr + '.0' + copy_from = newfile + # If it is log file, prepend the path. + if ("WiredTigerLog" in newfile): + copy_to = h + '/' + self.logpath + else: + copy_to = h + + shutil.copy(copy_from, copy_to) + first = True + config = 'incremental=(file=' + newfile + ')' + dup_cnt = 0 + # For each file listed, open a duplicate backup cursor and copy the blocks. + incr_c = self.session.open_cursor(None, bkup_c, config) + + # We cannot use 'for newfile in incr_c:' usage because backup cursors don't have + # values and adding in get_values returns ENOTSUP and causes the usage to fail. + # If that changes then this, and the use of the duplicate below can change. + while True: + ret = incr_c.next() + if ret != 0: + break + incrlist = incr_c.get_keys() + offset = incrlist[0] + size = incrlist[1] + curtype = incrlist[2] + self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE) + if curtype == wiredtiger.WT_BACKUP_FILE: + # Copy the whole file. + if first == True: + h = self.home_incr + '.' + str(self.counter) + first = False + + copy_from = newfile + if ("WiredTigerLog" in newfile): + copy_to = h + '/' + self.logpath + else: + copy_to = h + shutil.copy(copy_from, copy_to) + else: + # Copy the block range. + self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size)) + self.range_copy(newfile, offset, size) + dup_cnt += 1 + self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + incr_c.close() + + # For each file, we want to copy it into each of the later incremental directories. + for i in range(self.counter, 2): + h = self.home_incr + '.' + str(i) + copy_from = newfile + if ("WiredTigerLog" in newfile): + copy_to = h + '/' + self.logpath + else: + copy_to = h + shutil.copy(copy_from, copy_to) + self.assertEqual(ret, wiredtiger.WT_NOTFOUND) + bkup_c.close() + + def compare_backups(self, t_uri): + # Run wt dump on full backup directory. + full_backup_out = self.full_out + '.' + str(self.counter) + home_dir = self.home_full + '.' + str(self.counter) + if self.counter == 0: + home_dir = self.home + self.runWt(['-R', '-h', home_dir, 'dump', t_uri], outfilename=full_backup_out) + + # Run wt dump on incremental backup directory. + incr_backup_out = self.incr_out + '.' + str(self.counter) + home_dir = self.home_incr + '.' + str(self.counter) + self.runWt(['-R', '-h', home_dir, 'dump', t_uri], outfilename=incr_backup_out) + + self.assertEqual(True, + compare_files(self, full_backup_out, incr_backup_out)) + + # + # Add data to the given uri. + # + def add_data(self, uri): + c = self.session.open_cursor(uri, None, None) + # The first time we want to add in a lot of data. Then after that we want to + # rapidly change a single key to create a hotspot in one block. + if self.savefirst < 2: + nops = self.nops + else: + nops = self.nops // 10 + for i in range(0, nops): + num = i + (self.mult * nops) + if self.savefirst >= 2: + key = self.savekey + else: + key = str(num) + self.bigkey + str(num) + val = str(num) + self.bigval + str(num) + c[key] = val + if self.savefirst == 0: + self.savekey = key + self.savefirst += 1 + c.close() + + # Increase the multiplier so that later calls insert unique items. + self.mult += 1 + # Increase the counter so that later backups have unique ids. + if self.initial_backup == False: + self.counter += 1 + + def test_backup19(self): + os.mkdir(self.bkp_home) + self.home = self.bkp_home + self.session.create(self.uri, "key_format=S,value_format=S") + + self.setup_directories() + + self.pr('*** Add data, checkpoint, take backups and validate ***') + self.pr('Adding initial data') + self.initial_backup = True + self.add_data(self.uri) + self.take_full_backup() + self.initial_backup = False + self.session.checkpoint() + + self.add_data(self.uri) + self.session.checkpoint() + self.take_full_backup() + self.take_incr_backup() + self.compare_backups(self.uri) + +if __name__ == '__main__': + wttest.run() |