summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2021-01-06 08:48:42 +1100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-05 22:19:51 +0000
commite0bf15d44afb336a58a189d092437f966152e04e (patch)
treea22b988f14dc78ef2a86dd71f343094549653feb
parent52b11e90efa467dbe6b55977e5d2239aba3f6ec4 (diff)
downloadmongo-e0bf15d44afb336a58a189d092437f966152e04e.tar.gz
Import wiredtiger: 7e416a7dd98de324bba158b087197124df70c5a9 from branch mongodb-4.2
ref: eb1b72848c..7e416a7dd9 for: 4.2.12 WT-6835 Add API to allow consolidating incremental backup info WT-6839 Add API to query existing incremental backup IDs WT-6882 Files created during incremental backup should be copied in full WT-6922 Add random consolidate testing to incr_backup
-rw-r--r--src/third_party/wiredtiger/dist/api_data.py6
-rw-r--r--src/third_party/wiredtiger/examples/c/ex_backup_block.c25
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/block/block_ckpt.c8
-rw-r--r--src/third_party/wiredtiger/src/block/block_open.c7
-rw-r--r--src/third_party/wiredtiger/src/config/config_def.c16
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_backup.c66
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_backup_incr.c53
-rw-r--r--src/third_party/wiredtiger/src/docs/backup.dox9
-rw-r--r--src/third_party/wiredtiger/src/docs/data-sources.dox7
-rw-r--r--src/third_party/wiredtiger/src/include/block.h2
-rw-r--r--src/third_party/wiredtiger/src/include/cursor.h18
-rw-r--r--src/third_party/wiredtiger/src/include/wiredtiger.in50
-rw-r--r--src/third_party/wiredtiger/test/csuite/incr_backup/main.c50
-rw-r--r--src/third_party/wiredtiger/test/format/backup.c5
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup11.py11
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup12.py1
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup13.py1
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup16.py1
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup17.py165
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup18.py136
21 files changed, 557 insertions, 82 deletions
diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py
index 93fc5cdb002..591ed2e38e5 100644
--- a/src/third_party/wiredtiger/dist/api_data.py
+++ b/src/third_party/wiredtiger/dist/api_data.py
@@ -1240,6 +1240,12 @@ methods = {
configure the cursor for block incremental backup usage. These formats
are only compatible with the backup data source; see @ref backup''',
type='category', subconfig=[
+ Config('consolidate', 'false', r'''
+ causes block incremental backup information to be consolidated if adjacent
+ granularity blocks are modified. If false, information will be returned in
+ granularity sized blocks only. This must be set on the primary backup cursor and it
+ applies to all files for this backup''',
+ type='boolean'),
Config('enabled', 'false', r'''
whether to configure this backup as the starting point for a subsequent
incremental backup''',
diff --git a/src/third_party/wiredtiger/examples/c/ex_backup_block.c b/src/third_party/wiredtiger/examples/c/ex_backup_block.c
index 24ec718af53..dddfaedaa98 100644
--- a/src/third_party/wiredtiger/examples/c/ex_backup_block.c
+++ b/src/third_party/wiredtiger/examples/c/ex_backup_block.c
@@ -327,13 +327,23 @@ take_incr_backup(WT_SESSION *session, int i)
size_t alloc, count, rdsize, tmp_sz;
int j, ret, rfd, wfd;
char buf[1024], h[256], *tmp;
- const char *filename;
+ const char *filename, *idstr;
bool first;
tmp = NULL;
tmp_sz = 0;
+ /*! [Query existing IDs] */
+ error_check(session->open_cursor(session, "backup:query_id", NULL, NULL, &backup_cur));
+ while ((ret = backup_cur->next(backup_cur)) == 0) {
+ error_check(backup_cur->get_key(backup_cur, &idstr));
+ printf("Existing incremental ID string: %s\n", idstr);
+ }
+ error_check(backup_cur->close(backup_cur));
+ /*! [Query existing IDs] */
+
/* Open the backup data source for incremental backup. */
- (void)snprintf(buf, sizeof(buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\")", i - 1, i);
+ (void)snprintf(buf, sizeof(buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\"%s)", i - 1, i,
+ i % 2 == 0 ? "" : ",consolidate=true");
error_check(session->open_cursor(session, "backup:", NULL, buf, &backup_cur));
rfd = wfd = -1;
count = 0;
@@ -447,7 +457,7 @@ main(int argc, char *argv[])
WT_CURSOR *backup_cur;
WT_SESSION *session;
int i, j, ret;
- char cmd_buf[256];
+ char cmd_buf[256], *idstr;
(void)argc; /* Unused variable */
(void)testutil_set_progname(argv);
@@ -500,6 +510,15 @@ main(int argc, char *argv[])
error_check(wt_conn->close(wt_conn, NULL));
error_check(wiredtiger_open(home, NULL, CONN_CONFIG, &wt_conn));
error_check(wt_conn->open_session(wt_conn, NULL, NULL, &session));
+
+ printf("Verify query after reopen\n");
+ error_check(session->open_cursor(session, "backup:query_id", NULL, NULL, &backup_cur));
+ while ((ret = backup_cur->next(backup_cur)) == 0) {
+ error_check(backup_cur->get_key(backup_cur, &idstr));
+ printf("Existing incremental ID string: %s\n", idstr);
+ }
+ error_check(backup_cur->close(backup_cur));
+
/*
* We should have an entry for i-1 and i-2. Use the older one.
*/
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index d44307b3485..b9c0cb846c3 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-4.2",
- "commit": "eb1b72848c33e5c19a31a316ba6831c76c3a0241"
+ "commit": "7e416a7dd98de324bba158b087197124df70c5a9"
}
diff --git a/src/third_party/wiredtiger/src/block/block_ckpt.c b/src/third_party/wiredtiger/src/block/block_ckpt.c
index 786a66c9c67..f0651c16d28 100644
--- a/src/third_party/wiredtiger/src/block/block_ckpt.c
+++ b/src/third_party/wiredtiger/src/block/block_ckpt.c
@@ -397,7 +397,8 @@ __ckpt_add_blkmod_entry(
* Add the checkpoint's allocated blocks to all valid incremental backup source identifiers.
*/
static int
-__ckpt_add_blk_mods_alloc(WT_SESSION_IMPL *session, WT_CKPT *ckptbase, WT_BLOCK_CKPT *ci)
+__ckpt_add_blk_mods_alloc(
+ WT_SESSION_IMPL *session, WT_CKPT *ckptbase, WT_BLOCK_CKPT *ci, WT_BLOCK *block)
{
WT_BLOCK_MODS *blk_mod;
WT_CKPT *ckpt;
@@ -417,10 +418,13 @@ __ckpt_add_blk_mods_alloc(WT_SESSION_IMPL *session, WT_CKPT *ckptbase, WT_BLOCK_
if (!F_ISSET(blk_mod, WT_BLOCK_MODS_VALID))
continue;
+ if (block->created_during_backup)
+ WT_RET(__ckpt_add_blkmod_entry(session, blk_mod, 0, block->allocsize));
WT_EXT_FOREACH (ext, ci->alloc.off) {
WT_RET(__ckpt_add_blkmod_entry(session, blk_mod, ext->off, ext->size));
}
}
+ block->created_during_backup = false;
return (0);
}
@@ -595,7 +599,7 @@ __ckpt_process(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_CKPT *ckptbase)
* Record the checkpoint's allocated blocks. Do so before skipping any processing and before
* possibly merging in blocks from any previous checkpoint.
*/
- WT_ERR(__ckpt_add_blk_mods_alloc(session, ckptbase, ci));
+ WT_ERR(__ckpt_add_blk_mods_alloc(session, ckptbase, ci, block));
/* Skip the additional processing if we aren't deleting checkpoints. */
if (!deleting)
diff --git a/src/third_party/wiredtiger/src/block/block_open.c b/src/third_party/wiredtiger/src/block/block_open.c
index bc4b8cd5a03..2459020478f 100644
--- a/src/third_party/wiredtiger/src/block/block_open.c
+++ b/src/third_party/wiredtiger/src/block/block_open.c
@@ -207,6 +207,13 @@ __wt_block_open(WT_SESSION_IMPL *session, const char *filename, const char *cfg[
/* Set the file's size. */
WT_ERR(__wt_filesize(session, block->fh, &block->size));
+ /*
+ * If we're opening a file and it only contains a header and we're doing incremental backup
+ * indicate this so that the first checkpoint is sure to set all the bits as dirty to cover the
+ * header so that the header gets copied.
+ */
+ if (block->size == allocsize && F_ISSET(conn, WT_CONN_INCR_BACKUP))
+ block->created_during_backup = true;
/* Initialize the live checkpoint's lock. */
WT_ERR(__wt_spin_init(session, &block->live_lock, "block manager"));
diff --git a/src/third_party/wiredtiger/src/config/config_def.c b/src/third_party/wiredtiger/src/config/config_def.c
index d6cae821ce1..fd9e3495f95 100644
--- a/src/third_party/wiredtiger/src/config/config_def.c
+++ b/src/third_party/wiredtiger/src/config/config_def.c
@@ -297,8 +297,8 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_log_flush[] = {
{NULL, NULL, NULL, NULL, NULL, 0}};
static const WT_CONFIG_CHECK confchk_WT_SESSION_open_cursor_incremental_subconfigs[] = {
- {"enabled", "boolean", NULL, NULL, NULL, 0}, {"file", "string", NULL, NULL, NULL, 0},
- {"force_stop", "boolean", NULL, NULL, NULL, 0},
+ {"consolidate", "boolean", NULL, NULL, NULL, 0}, {"enabled", "boolean", NULL, NULL, NULL, 0},
+ {"file", "string", NULL, NULL, NULL, 0}, {"force_stop", "boolean", NULL, NULL, NULL, 0},
{"granularity", "int", NULL, "min=4KB,max=2GB", NULL, 0},
{"src_id", "string", NULL, NULL, NULL, 0}, {"this_id", "string", NULL, NULL, NULL, 0},
{NULL, NULL, NULL, NULL, NULL, 0}};
@@ -308,7 +308,7 @@ static const WT_CONFIG_CHECK confchk_WT_SESSION_open_cursor[] = {
{"checkpoint", "string", NULL, NULL, NULL, 0},
{"checkpoint_wait", "boolean", NULL, NULL, NULL, 0},
{"dump", "string", NULL, "choices=[\"hex\",\"json\",\"print\"]", NULL, 0},
- {"incremental", "category", NULL, NULL, confchk_WT_SESSION_open_cursor_incremental_subconfigs, 6},
+ {"incremental", "category", NULL, NULL, confchk_WT_SESSION_open_cursor_incremental_subconfigs, 7},
{"next_random", "boolean", NULL, NULL, NULL, 0},
{"next_random_sample_size", "string", NULL, NULL, NULL, 0},
{"overwrite", "boolean", NULL, NULL, NULL, 0}, {"raw", "boolean", NULL, NULL, NULL, 0},
@@ -912,11 +912,11 @@ static const WT_CONFIG_ENTRY config_entries[] = {{"WT_CONNECTION.add_collator",
{"WT_SESSION.log_printf", "", NULL, 0},
{"WT_SESSION.open_cursor",
"append=false,bulk=false,checkpoint=,checkpoint_wait=true,dump=,"
- "incremental=(enabled=false,file=,force_stop=false,"
- "granularity=16MB,src_id=,this_id=),next_random=false,"
- "next_random_sample_size=0,overwrite=true,raw=false,"
- "read_once=false,readonly=false,skip_sort_check=false,statistics="
- ",target=",
+ "incremental=(consolidate=false,enabled=false,file=,"
+ "force_stop=false,granularity=16MB,src_id=,this_id=),"
+ "next_random=false,next_random_sample_size=0,overwrite=true,"
+ "raw=false,read_once=false,readonly=false,skip_sort_check=false,"
+ "statistics=,target=",
confchk_WT_SESSION_open_cursor, 15},
{"WT_SESSION.prepare_transaction", "prepare_timestamp=", confchk_WT_SESSION_prepare_transaction,
1},
diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup.c b/src/third_party/wiredtiger/src/cursor/cur_backup.c
index 1340dfafb95..2fe3b7dfc4a 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_backup.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_backup.c
@@ -273,6 +273,15 @@ __wt_curbackup_open(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *other,
if (othercb != NULL)
WT_CURSOR_BACKUP_CHECK_STOP(othercb);
+ /* Special backup cursor to query incremental IDs. */
+ if (strcmp(uri, "backup:query_id") == 0) {
+ /* Top level cursor code does not allow a URI and cursor. We don't need to check here. */
+ WT_ASSERT(session, othercb == NULL);
+ if (!F_ISSET(S2C(session), WT_CONN_INCR_BACKUP))
+ WT_RET_MSG(session, EINVAL, "Incremental backup is not configured");
+ F_SET(cb, WT_CURBACKUP_QUERYID);
+ }
+
/*
* Start the backup and fill in the cursor's list. Acquire the schema lock, we need a consistent
* view when creating a copy.
@@ -432,7 +441,7 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
WT_DECL_ITEM(tmp);
WT_DECL_RET;
const char *uri;
- bool incremental_config, is_dup, log_config, target_list;
+ bool consolidate, incremental_config, is_dup, log_config, target_list;
*foundp = *incr_only = *log_only = false;
@@ -458,6 +467,19 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
}
/*
+ * Consolidation can be on a per incremental basis or a per-file duplicate cursor basis.
+ */
+ WT_RET(__wt_config_gets(session, cfg, "incremental.consolidate", &cval));
+ consolidate = F_MASK(cb, WT_CURBACKUP_CONSOLIDATE);
+ if (cval.val) {
+ if (is_dup)
+ WT_RET_MSG(session, EINVAL,
+ "Incremental consolidation can only be specified on a primary backup cursor");
+ F_SET(cb, WT_CURBACKUP_CONSOLIDATE);
+ incremental_config = true;
+ }
+
+ /*
* Specifying an incremental file means we're opening a duplicate backup cursor.
*/
WT_RET(__wt_config_gets(session, cfg, "incremental.file", &cval));
@@ -578,13 +600,36 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
F_SET(cb, WT_CURBACKUP_INCR);
}
err:
- if (ret != 0 && cb->incr_src != NULL)
+ if (ret != 0 && cb->incr_src != NULL) {
F_CLR(cb->incr_src, WT_BLKINCR_INUSE);
+ F_CLR(cb, WT_CURBACKUP_CONSOLIDATE);
+ F_SET(cb, consolidate);
+ }
__wt_scr_free(session, &tmp);
return (ret);
}
/*
+ * __backup_query_setup --
+ * Setup the names to return with a backup query cursor.
+ */
+static int
+__backup_query_setup(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb)
+{
+ WT_BLKINCR *blkincr;
+ u_int i;
+
+ for (i = 0; i < WT_BLKINCR_MAX; ++i) {
+ blkincr = &S2C(session)->incr_backups[i];
+ /* If it isn't valid, skip it. */
+ if (!F_ISSET(blkincr, WT_BLKINCR_VALID))
+ continue;
+ WT_RET(__backup_list_append(session, cb, blkincr->id_str));
+ }
+ return (0);
+}
+
+/*
* __backup_start --
* Start a backup.
*/
@@ -625,7 +670,7 @@ __backup_start(
* set a flag and we're done. Actions will be performed on cursor close.
*/
WT_RET_NOTFOUND_OK(__wt_config_gets(session, cfg, "incremental.force_stop", &cval));
- if (cval.val) {
+ if (!F_ISSET(cb, WT_CURBACKUP_QUERYID) && cval.val) {
/*
* If we're force stopping incremental backup, set the flag. The resources involved in
* incremental backup will be released on cursor close and that is the only expected usage
@@ -655,7 +700,16 @@ __backup_start(
/* We're the lock holder, we own cleanup. */
F_SET(cb, WT_CURBACKUP_LOCKER);
-
+ /*
+ * If we are a query backup cursor there are no configuration settings and it will set up
+ * its own list of strings to return. We don't have to do any of the other processing. A
+ * query creates a list to return but does not create the backup file. After appending the
+ * list of IDs we are done.
+ */
+ if (F_ISSET(cb, WT_CURBACKUP_QUERYID)) {
+ ret = __backup_query_setup(session, cb);
+ goto query_done;
+ }
/*
* Create a temporary backup file. This must be opened before generating the list of targets
* in backup_config. This file will later be renamed to the correct name depending on
@@ -727,6 +781,7 @@ __backup_start(
WT_ERR(__backup_list_append(session, cb, WT_WIREDTIGER));
}
+query_done:
err:
/* Close the hot backup file. */
if (srcfs != NULL)
@@ -734,7 +789,8 @@ err:
/*
* Sync and rename the temp file into place.
*/
- if (ret == 0)
+ WT_TRET(__wt_fs_exist(session, WT_BACKUP_TMP, &exist));
+ if (ret == 0 && exist)
ret = __wt_sync_and_rename(session, &cb->bfs, WT_BACKUP_TMP, dest);
if (ret == 0) {
WT_WITH_HOTBACKUP_WRITE_LOCK(session, conn->hot_backup_list = cb->list);
diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
index 96d0d362ff9..ea22bb40343 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
@@ -111,8 +111,10 @@ __curbackup_incr_next(WT_CURSOR *cursor)
WT_DECL_RET;
WT_SESSION_IMPL *session;
wt_off_t size;
+ uint64_t start_bitoff, total_len;
uint32_t raw;
const char *file;
+ bool found;
cb = (WT_CURSOR_BACKUP *)cursor;
btree = cb->incr_cursor == NULL ? NULL : ((WT_CURSOR_BTREE *)cb->incr_cursor)->btree;
@@ -144,18 +146,7 @@ __curbackup_incr_next(WT_CURSOR *cursor)
F_SET(cb, WT_CURBACKUP_INCR_INIT);
__wt_cursor_set_key(cursor, 0, size, WT_BACKUP_FILE);
} else {
- if (F_ISSET(cb, WT_CURBACKUP_INCR_INIT)) {
- /* Look for the next chunk that had modifications. */
- while (cb->bit_offset < cb->nbits)
- if (__bit_test(cb->bitstring.mem, cb->bit_offset))
- break;
- else
- ++cb->bit_offset;
-
- /* We either have this object's incremental information or we're done. */
- if (cb->bit_offset >= cb->nbits)
- WT_ERR(WT_NOTFOUND);
- } else {
+ if (!F_ISSET(cb, WT_CURBACKUP_INCR_INIT)) {
/*
* We don't have this object's incremental information, and it's not a full file copy.
* Get a list of the block modifications for the file. The block modifications are from
@@ -186,8 +177,37 @@ __curbackup_incr_next(WT_CURSOR *cursor)
WT_ERR(WT_NOTFOUND);
}
}
- __wt_cursor_set_key(cursor, cb->offset + cb->granularity * cb->bit_offset++,
- cb->granularity, WT_BACKUP_RANGE);
+ /* We have initialized incremental information. */
+ start_bitoff = cb->bit_offset;
+ total_len = cb->granularity;
+ found = false;
+ /* The bit offset can be less than or equal to but never greater than the number of bits. */
+ WT_ASSERT(session, cb->bit_offset <= cb->nbits);
+ /* Look for the next chunk that had modifications. */
+ while (cb->bit_offset < cb->nbits)
+ if (__bit_test(cb->bitstring.mem, cb->bit_offset)) {
+ found = true;
+ /*
+ * Care must be taken to leave the bit_offset field set to the next offset bit so
+ * that the next call is set to the correct offset.
+ */
+ start_bitoff = cb->bit_offset++;
+ if (F_ISSET(cb, WT_CURBACKUP_CONSOLIDATE)) {
+ while (
+ cb->bit_offset < cb->nbits && __bit_test(cb->bitstring.mem, cb->bit_offset++))
+ total_len += cb->granularity;
+ }
+ break;
+ } else
+ ++cb->bit_offset;
+
+ /* We either have this object's incremental information or we're done. */
+ if (!found)
+ WT_ERR(WT_NOTFOUND);
+ WT_ASSERT(session, cb->granularity != 0);
+ WT_ASSERT(session, total_len != 0);
+ __wt_cursor_set_key(
+ cursor, cb->offset + cb->granularity * start_bitoff, total_len, WT_BACKUP_RANGE);
}
done:
@@ -245,6 +265,11 @@ __wt_curbackup_open_incr(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *o
cb->incr_file, other_cb->incr_src->id_str);
F_SET(cb, WT_CURBACKUP_FORCE_FULL);
}
+ if (F_ISSET(other_cb, WT_CURBACKUP_CONSOLIDATE))
+ F_SET(cb, WT_CURBACKUP_CONSOLIDATE);
+ else
+ F_CLR(cb, WT_CURBACKUP_CONSOLIDATE);
+
/*
* Set up the incremental backup information, if we are not forcing a full file copy. We need an
* open cursor on the file. Open the backup checkpoint, confirming it exists.
diff --git a/src/third_party/wiredtiger/src/docs/backup.dox b/src/third_party/wiredtiger/src/docs/backup.dox
index ac18263eff0..d4b42d08e84 100644
--- a/src/third_party/wiredtiger/src/docs/backup.dox
+++ b/src/third_party/wiredtiger/src/docs/backup.dox
@@ -175,6 +175,15 @@ An example of opening the backup data source for block-based incremental backup:
@snippet ex_all.c incremental block backup
+The URI \c backup:query_id can be used to return existing block incremental identifier
+strings. It operates like a backup cursor but will return the identifier strings
+as the keys of the cursor. There are no values. As with all backup cursors, there
+can only be one backup cursor of any type open at a time.
+
+@m_if{c}
+An example of opening the backup data source to query incremental identifiers:
+@snippet ex_backup_block.c Query existing IDs
+@m_endif
@section backup_incremental Log-based Incremental backup
Once a backup has been done, it can be rolled forward incrementally by
diff --git a/src/third_party/wiredtiger/src/docs/data-sources.dox b/src/third_party/wiredtiger/src/docs/data-sources.dox
index aaee4adc04a..fe3e49db477 100644
--- a/src/third_party/wiredtiger/src/docs/data-sources.dox
+++ b/src/third_party/wiredtiger/src/docs/data-sources.dox
@@ -22,8 +22,9 @@ Some administrative tasks can be accomplished using the following special
cursor types that give access to data managed by WiredTiger:
<table>
@hrow{URI, Type, Notes}
-@row{<tt>backup:</tt>,
- backup cursor,
+@row{<tt>backup:[query_id]</tt>,
+ backup cursor (optionally only returning block incremental ids if
+ <code>query_id</code> is appended),
key=<code>string</code>\, see @ref backup for details}
@row{<code>log:</code>,
log cursor,
@@ -34,7 +35,7 @@ cursor types that give access to data managed by WiredTiger:
see @ref cursor_log for details}
@row{<tt>metadata:[create]</tt>,
metadata cursor (optionally only returning configuration strings for
- WT_SESSION::create if <code>create</code> is appended,
+ WT_SESSION::create if <code>create</code> is appended),
key=<code>string</code>\, value=<code>string</code>\,<br>
see @ref metadata for details}
@row{<tt>statistics:[\<data source URI\>]</tt>,
diff --git a/src/third_party/wiredtiger/src/include/block.h b/src/third_party/wiredtiger/src/include/block.h
index cd34baec52c..f35d09f05f3 100644
--- a/src/third_party/wiredtiger/src/include/block.h
+++ b/src/third_party/wiredtiger/src/include/block.h
@@ -231,6 +231,8 @@ struct __wt_block {
wt_off_t extend_size; /* File extended size */
wt_off_t extend_len; /* File extend chunk size */
+ bool created_during_backup; /* Created during incremental backup */
+
/* Configuration information, set when the file is opened. */
uint32_t allocfirst; /* Allocation is first-fit */
uint32_t allocsize; /* Allocation size */
diff --git a/src/third_party/wiredtiger/src/include/cursor.h b/src/third_party/wiredtiger/src/include/cursor.h
index 696da29a05c..8bc6ea43dbd 100644
--- a/src/third_party/wiredtiger/src/include/cursor.h
+++ b/src/third_party/wiredtiger/src/include/cursor.h
@@ -59,14 +59,16 @@ struct __wt_cursor_backup {
/* AUTOMATIC FLAG VALUE GENERATION START */
#define WT_CURBACKUP_CKPT_FAKE 0x001u /* Object has fake checkpoint */
-#define WT_CURBACKUP_DUP 0x002u /* Duplicated backup cursor */
-#define WT_CURBACKUP_FORCE_FULL 0x004u /* Force full file copy for this cursor */
-#define WT_CURBACKUP_FORCE_STOP 0x008u /* Force stop incremental backup */
-#define WT_CURBACKUP_HAS_CB_INFO 0x010u /* Object has checkpoint backup info */
-#define WT_CURBACKUP_INCR 0x020u /* Incremental backup cursor */
-#define WT_CURBACKUP_INCR_INIT 0x040u /* Cursor traversal initialized */
-#define WT_CURBACKUP_LOCKER 0x080u /* Hot-backup started */
-#define WT_CURBACKUP_RENAME 0x100u /* Object had a rename */
+#define WT_CURBACKUP_CONSOLIDATE 0x002u /* Consolidate returned info on this object */
+#define WT_CURBACKUP_DUP 0x004u /* Duplicated backup cursor */
+#define WT_CURBACKUP_FORCE_FULL 0x008u /* Force full file copy for this cursor */
+#define WT_CURBACKUP_FORCE_STOP 0x010u /* Force stop incremental backup */
+#define WT_CURBACKUP_HAS_CB_INFO 0x020u /* Object has checkpoint backup info */
+#define WT_CURBACKUP_INCR 0x040u /* Incremental backup cursor */
+#define WT_CURBACKUP_INCR_INIT 0x080u /* Cursor traversal initialized */
+#define WT_CURBACKUP_LOCKER 0x100u /* Hot-backup started */
+#define WT_CURBACKUP_QUERYID 0x200u /* Backup cursor for incremental ids */
+#define WT_CURBACKUP_RENAME 0x400u /* Object had a rename */
/* AUTOMATIC FLAG VALUE GENERATION STOP */
uint32_t flags;
};
diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in
index 24633fe7ab0..61cbdf9e9e9 100644
--- a/src/third_party/wiredtiger/src/include/wiredtiger.in
+++ b/src/third_party/wiredtiger/src/include/wiredtiger.in
@@ -1067,29 +1067,33 @@ struct __wt_session {
* @config{incremental = (, configure the cursor for block incremental backup usage. These
* formats are only compatible with the backup data source; see @ref backup., a set of
* related configuration options defined below.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;enabled,
- * whether to configure this backup as the starting point for a subsequent incremental
- * backup., a boolean flag; default \c false.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;file, the
- * file name when opening a duplicate incremental backup cursor. That duplicate cursor will
- * return the block modifications relevant to the given file name., a string; default
- * empty.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;force_stop, causes all block incremental backup
- * information to be released. This is on an open_cursor call and the resources will be
- * released when this cursor is closed. No other operations should be done on this open
- * cursor., a boolean flag; default \c false.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;granularity,
- * this setting manages the granularity of how WiredTiger maintains modification maps
- * internally. The larger the granularity\, the smaller amount of information WiredTiger
- * need to maintain., an integer between 4KB and 2GB; default \c 16MB.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;src_id, a string that identifies a previous checkpoint
- * backup source as the source of this incremental backup. This identifier must have
- * already been created by use of the 'this_id' configuration in an earlier backup. A
- * source id is required to begin an incremental backup., a string; default empty.}
- * @config{&nbsp;&nbsp;&nbsp;&nbsp;this_id, a string that identifies the current system
- * state as a future backup source for an incremental backup via 'src_id'. This identifier
- * is required when opening an incremental backup cursor and an error will be returned if
- * one is not provided., a string; default empty.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;
+ * consolidate, causes block incremental backup information to be consolidated if adjacent
+ * granularity blocks are modified. If false\, information will be returned in granularity
+ * sized blocks only. This must be set on the primary backup cursor and it applies to all
+ * files for this backup., a boolean flag; default \c false.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;enabled, whether to configure this backup as the starting
+ * point for a subsequent incremental backup., a boolean flag; default \c false.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;file, the file name when opening a duplicate incremental
+ * backup cursor. That duplicate cursor will return the block modifications relevant to the
+ * given file name., a string; default empty.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;force_stop,
+ * causes all block incremental backup information to be released. This is on an
+ * open_cursor call and the resources will be released when this cursor is closed. No other
+ * operations should be done on this open cursor., a boolean flag; default \c false.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;granularity, this setting manages the granularity of how
+ * WiredTiger maintains modification maps internally. The larger the granularity\, the
+ * smaller amount of information WiredTiger need to maintain., an integer between 4KB and
+ * 2GB; default \c 16MB.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;src_id, a string that identifies a
+ * previous checkpoint backup source as the source of this incremental backup. This
+ * identifier must have already been created by use of the 'this_id' configuration in an
+ * earlier backup. A source id is required to begin an incremental backup., a string;
+ * default empty.}
+ * @config{&nbsp;&nbsp;&nbsp;&nbsp;this_id, a string that identifies the
+ * current system state as a future backup source for an incremental backup via 'src_id'.
+ * This identifier is required when opening an incremental backup cursor and an error will
+ * be returned if one is not provided., a string; default empty.}
* @config{ ),,}
* @config{next_random, configure the cursor to return a pseudo-random record from the
* object when the WT_CURSOR::next method is called; valid only for row-store cursors. See
diff --git a/src/third_party/wiredtiger/test/csuite/incr_backup/main.c b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
index cfaa35bae47..cc7b99cce08 100644
--- a/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
+++ b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
@@ -47,9 +47,13 @@
#define URI_MAX_LEN 32
#define URI_FORMAT "table:t%d-%d"
#define KEY_FORMAT "key-%d-%d"
+#define TABLE_FORMAT "key_format=S,value_format=u"
#define CONN_CONFIG_COMMON "timing_stress_for_test=[backup_rename]"
+#define NUM_ALLOC 5
+static const char *alloc_sizes[] = {"512B", "8K", "64K", "1M", "16M"};
+
static int verbose_level = 0;
static uint64_t seed = 0;
@@ -404,17 +408,31 @@ table_changes(WT_SESSION *session, TABLE *table)
* Create a table for the given slot.
*/
static void
-create_table(WT_SESSION *session, TABLE_INFO *tinfo, uint32_t slot)
+create_table(WT_SESSION *session, WT_RAND_STATE *rand, TABLE_INFO *tinfo, uint32_t slot)
{
- char *uri;
+ uint32_t alloc;
+ char buf[4096], *uri;
+ const char *allocstr;
testutil_assert(!TABLE_VALID(&tinfo->table[slot]));
uri = dcalloc(1, URI_MAX_LEN);
testutil_check(
__wt_snprintf(uri, URI_MAX_LEN, URI_FORMAT, (int)slot, (int)tinfo->table[slot].name_index++));
- VERBOSE(3, "create %s\n", uri);
- testutil_check(session->create(session, uri, "key_format=S,value_format=u"));
+ /*
+ * A quarter of the time use a non-default allocation size on the table. This is set
+ * independently of the granularity to stress mismatched values.
+ */
+ if (__wt_random(rand) % 4 == 0) {
+ alloc = __wt_random(rand) % NUM_ALLOC;
+ allocstr = alloc_sizes[alloc];
+ testutil_check(__wt_snprintf(buf, sizeof(buf),
+ "%s,allocation_size=%s,internal_page_max=%s,leaf_page_max=%s", TABLE_FORMAT, allocstr,
+ allocstr, allocstr));
+ } else
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "%s", TABLE_FORMAT));
+ VERBOSE(3, "create %s: %s\n", uri, buf);
+ testutil_check(session->create(session, uri, buf));
tinfo->table[slot].name = uri;
tinfo->tables_in_use++;
}
@@ -463,6 +481,7 @@ base_backup(WT_CONNECTION *conn, WT_RAND_STATE *rand, const char *home, const ch
char buf[4096];
char *filename;
char granularity_unit;
+ const char *cons;
nfiles = 0;
@@ -486,9 +505,13 @@ base_backup(WT_CONNECTION *conn, WT_RAND_STATE *rand, const char *home, const ch
granularity_unit = 'M';
granularity += 1;
}
+ if (__wt_random(rand) % 2 == 0)
+ cons = ",consolidate=true";
+ else
+ cons = ",consolidate=false";
testutil_check(__wt_snprintf(buf, sizeof(buf),
- "incremental=(granularity=%" PRIu32 "%c,enabled=true,this_id=ID%" PRIu32 ")", granularity,
- granularity_unit, tinfo->full_backup_number));
+ "incremental=(granularity=%" PRIu32 "%c,enabled=true,%s,this_id=ID%" PRIu32 ")", granularity,
+ granularity_unit, cons, tinfo->full_backup_number));
VERBOSE(3, "open_cursor(session, \"backup:\", NULL, \"%s\", &cursor)\n", buf);
testutil_check(session->open_cursor(session, "backup:", NULL, buf, &cursor));
@@ -734,11 +757,11 @@ main(int argc, char *argv[])
WT_RAND_STATE rnd;
WT_SESSION *session;
uint32_t file_max, iter, max_value_size, next_checkpoint, rough_size, slot;
- int ch, ncheckpoints, status;
+ int ch, ncheckpoints, nreopens, status;
const char *backup_verbose, *working_dir;
char conf[1024], home[1024], backup_check[1024], backup_dir[1024], command[4096];
- ncheckpoints = 0;
+ ncheckpoints = nreopens = 0;
(void)testutil_set_progname(argv);
custom_die = die; /* Set our own abort handler */
WT_CLEAR(tinfo);
@@ -840,7 +863,7 @@ main(int argc, char *argv[])
*/
slot = __wt_random(&rnd) % tinfo.table_count;
if (!TABLE_VALID(&tinfo.table[slot]))
- create_table(session, &tinfo, slot);
+ create_table(session, &rnd, &tinfo, slot);
else if (__wt_random(&rnd) % 3 == 0 && do_rename)
rename_table(session, &tinfo, slot);
else if (do_drop)
@@ -858,6 +881,15 @@ main(int argc, char *argv[])
}
}
+ /* Close and reopen the connection once in a while. */
+ if (__wt_random(&rnd) % 10 == 0) {
+ VERBOSE(2, "Close and reopen the connection %d\n", nreopens);
+ testutil_check(conn->close(conn, NULL));
+ testutil_check(wiredtiger_open(home, NULL, conf, &conn));
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+ nreopens++;
+ }
+
if (iter == 0) {
base_backup(conn, &rnd, home, backup_dir, &tinfo, &active);
check_backup(backup_dir, backup_check, &tinfo);
diff --git a/src/third_party/wiredtiger/test/format/backup.c b/src/third_party/wiredtiger/test/format/backup.c
index 9454032160c..b9b1f2a0a70 100644
--- a/src/third_party/wiredtiger/test/format/backup.c
+++ b/src/third_party/wiredtiger/test/format/backup.c
@@ -567,9 +567,10 @@ backup(void *arg)
else
active_now = &active[0];
src_id = g.backup_id - 1;
+ /* Use consolidation too. */
testutil_check(__wt_snprintf(cfg, sizeof(cfg),
- "incremental=(enabled,src_id=%" PRIu64 ",this_id=%" PRIu64 ")", src_id,
- g.backup_id));
+ "incremental=(enabled,consolidate=true,src_id=%" PRIu64 ",this_id=%" PRIu64 ")",
+ src_id, g.backup_id));
/* Restart a full incremental every once in a while. */
full = false;
incr_full = mmrand(NULL, 1, 8) == 1;
diff --git a/src/third_party/wiredtiger/test/suite/test_backup11.py b/src/third_party/wiredtiger/test/suite/test_backup11.py
index 76fa70c4b2b..3e6bd347b03 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup11.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup11.py
@@ -44,7 +44,6 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
uri="table:test"
def add_data(self):
-
c = self.session.open_cursor(self.uri)
for i in range(0, self.nops):
num = i + (self.mult * self.nops)
@@ -136,6 +135,16 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
self.pr("Opened backup for error testing")
# Now test all the error cases with an incremental primary open.
+ # - We cannot specify consolidate on the duplicate cursor.
+ config = 'incremental=(consolidate=true,file=test.wt)'
+ msg = "/consolidation can only be specified on a primary/"
+ self.pr("Test consolidation on a dup")
+ self.pr("=========")
+ # Test multiple duplicate backup cursors.
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor(None,
+ bkup_c, config), 0), msg)
+
# - We cannot make multiple incremental duplcate backup cursors.
# - We cannot duplicate the duplicate backup cursor.
config = 'incremental=(file=test.wt)'
diff --git a/src/third_party/wiredtiger/test/suite/test_backup12.py b/src/third_party/wiredtiger/test/suite/test_backup12.py
index f5fadcee393..90ca31aa76e 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup12.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup12.py
@@ -51,7 +51,6 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
bigval = 'Value' * 100
def add_data(self, uri):
-
c = self.session.open_cursor(uri)
for i in range(0, self.nops):
num = i + (self.mult * self.nops)
diff --git a/src/third_party/wiredtiger/test/suite/test_backup13.py b/src/third_party/wiredtiger/test/suite/test_backup13.py
index 445cbaa6dc1..46d360c09bb 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup13.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup13.py
@@ -49,7 +49,6 @@ class test_backup13(wttest.WiredTigerTestCase, suite_subprocess):
bigval = 'Value' * 100
def add_data(self, uri):
-
c = self.session.open_cursor(uri)
for i in range(0, self.nops):
num = i + (self.mult * self.nops)
diff --git a/src/third_party/wiredtiger/test/suite/test_backup16.py b/src/third_party/wiredtiger/test/suite/test_backup16.py
index 0265c39b73f..82df7c39307 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup16.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup16.py
@@ -63,7 +63,6 @@ class test_backup16(wttest.WiredTigerTestCase, suite_subprocess):
bigval = 'Value' * 10
def add_data(self, uri):
-
c = self.session.open_cursor(uri)
for i in range(0, self.nops):
num = i + (self.mult * self.nops)
diff --git a/src/third_party/wiredtiger/test/suite/test_backup17.py b/src/third_party/wiredtiger/test/suite/test_backup17.py
new file mode 100644
index 00000000000..5fa250fd485
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_backup17.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wiredtiger, wttest
+import os, shutil
+from helper import compare_files
+from suite_subprocess import suite_subprocess
+from wtdataset import simple_key
+from wtscenario import make_scenarios
+
+# test_backup17.py
+# Test cursor backup with a block-based incremental cursor and consolidate.
+class test_backup17(wttest.WiredTigerTestCase, suite_subprocess):
+ dir='backup.dir' # Backup directory name
+ gran="100K"
+ granval=100*1024
+ logmax="100K"
+ uri="table:test"
+ uri2="table:test2"
+ nops=1000
+ mult=0
+
+ conn_config='cache_size=1G,log=(enabled,file_max=%s)' % logmax
+
+ pfx = 'test_backup'
+ # Set the key and value big enough that we modify a few blocks.
+ bigkey = 'Key' * 100
+ bigval = 'Value' * 100
+
+ def add_data(self, uri):
+ c = self.session.open_cursor(uri)
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = self.bigkey + str(num)
+ val = self.bigval + str(num)
+ c[key] = val
+ self.session.checkpoint()
+ c.close()
+
+ def take_incr_backup(self, id, consolidate):
+ # Open the backup data source for incremental backup.
+ buf = 'incremental=(src_id="ID' + str(id - 1) + '",this_id="ID' + str(id) + '"'
+ if consolidate:
+ buf += ',consolidate=true'
+ buf += ')'
+ bkup_c = self.session.open_cursor('backup:', None, buf)
+ lens = []
+ saw_multiple = False
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ newfile = bkup_c.get_key()
+ config = 'incremental=(file=' + newfile + ')'
+ self.pr('Open incremental cursor with ' + config)
+ dup_cnt = 0
+ dupc = self.session.open_cursor(None, bkup_c, config)
+ while True:
+ ret = dupc.next()
+ if ret != 0:
+ break
+ incrlist = dupc.get_keys()
+ offset = incrlist[0]
+ size = incrlist[1]
+ curtype = incrlist[2]
+ # 1 is WT_BACKUP_FILE
+ # 2 is WT_BACKUP_RANGE
+ self.assertTrue(curtype == 1 or curtype == 2)
+ if curtype == 1:
+ self.pr('Copy from: ' + newfile + ' (' + str(size) + ') to ' + self.dir)
+ shutil.copy(newfile, self.dir)
+ else:
+ self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
+ lens.append(size)
+ rfp = open(newfile, "r+b")
+ wfp = open(self.dir + '/' + newfile, "w+b")
+ rfp.seek(offset, 0)
+ wfp.seek(offset, 0)
+ if size > self.granval:
+ saw_multiple = True
+ buf = rfp.read(size)
+ wfp.write(buf)
+ rfp.close()
+ wfp.close()
+ dup_cnt += 1
+ dupc.close()
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ bkup_c.close()
+ if consolidate:
+ self.assertTrue(saw_multiple)
+ else:
+ self.assertFalse(saw_multiple)
+ return lens
+
+ def test_backup17(self):
+
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.session.create(self.uri2, "key_format=S,value_format=S")
+ self.add_data(self.uri)
+ self.add_data(self.uri2)
+ self.mult += 1
+
+ # Open up the backup cursor. This causes a new log file to be created.
+ # That log file is not part of the list returned. This is a full backup
+ # primary cursor with incremental configured.
+ os.mkdir(self.dir)
+ config = 'incremental=(enabled,granularity=%s,this_id="ID1")' % self.gran
+ bkup_c = self.session.open_cursor('backup:', None, config)
+
+ # Now copy the files returned by the backup cursor.
+ all_files = []
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ newfile = bkup_c.get_key()
+ sz = os.path.getsize(newfile)
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
+ shutil.copy(newfile, self.dir)
+ all_files.append(newfile)
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ bkup_c.close()
+
+ # This is the main part of the test for consolidate. Add data to the first table.
+ # Then perform the incremental backup with consolidate off (the default). Then add the
+ # same data to the second table. Perform an incremental backup with consolidate on and
+ # verify we get fewer, consolidated values.
+ self.add_data(self.uri)
+ uri1_lens = self.take_incr_backup(2, False)
+
+ self.add_data(self.uri2)
+ uri2_lens = self.take_incr_backup(3, True)
+
+ # Assert that we recorded fewer lengths on the consolidated backup.
+ self.assertLess(len(uri2_lens), len(uri1_lens))
+ # Assert that we recorded the same total data length for both.
+ self.assertEqual(sum(uri2_lens), sum(uri1_lens))
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup18.py b/src/third_party/wiredtiger/test/suite/test_backup18.py
new file mode 100644
index 00000000000..2034c8ac6d1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_backup18.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wiredtiger, wttest
+import os, shutil
+from helper import compare_files
+from suite_subprocess import suite_subprocess
+from wtdataset import simple_key
+from wtscenario import make_scenarios
+
+# test_backup18.py
+# Test backup:query_id API.
+class test_backup18(wttest.WiredTigerTestCase, suite_subprocess):
+ conn_config= 'cache_size=1G,log=(enabled,file_max=100K)'
+ mult=0
+ nops=100
+ pfx = 'test_backup'
+ uri="table:test"
+
+ def id_check(self, expect):
+ got = []
+ bkup_c = self.session.open_cursor('backup:query_id', None, None)
+ # We cannot use 'for idstr in bkup_c:' usage because backup cursors don't have
+ # values and adding in get_values returns ENOTSUP and causes the usage to fail.
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ idstr = bkup_c.get_key()
+ got.append(idstr)
+ bkup_c.close()
+ got.sort()
+ expect.sort()
+ self.assertEqual(got, expect)
+
+ def add_data(self):
+ c = self.session.open_cursor(self.uri)
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = 'key' + str(num)
+ val = 'value' + str(num)
+ c[key] = val
+ self.mult += 1
+ self.session.checkpoint()
+ c.close()
+
+ def test_backup18(self):
+ # We're not taking actual backups in this test, but we do want a table to
+ # exist for the backup cursor to generate something.
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.add_data()
+
+ msg = "/is not configured/"
+ self.pr("Query IDs before any backup")
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor('backup:query_id',
+ None, None), 0), msg)
+
+ # Open up the backup cursor.
+ config = 'incremental=(enabled,this_id="ID1")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+
+ # Try to open the query cursor as a duplicate on the backup.
+ msg = "/should be passed either/"
+ self.pr("Query IDs as duplicate cursor")
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor('backup:query_id',
+ bkup_c, None), 0), msg)
+
+ # Try to open the query cursor while backup cursor is open.
+ msg = "/there is already a backup/"
+ self.pr("Query IDs while backup cursor open")
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor('backup:query_id',
+ None, None), 0), msg)
+ bkup_c.close()
+
+ # Check a few basic cases.
+ self.pr("Query IDs basic cases")
+ expect = ["ID1"]
+ self.id_check(expect)
+
+ config = 'incremental=(enabled,src_id="ID1",this_id="ID2")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ bkup_c.close()
+ expect = ["ID1", "ID2"]
+ self.id_check(expect)
+
+ config = 'incremental=(enabled,src_id="ID2",this_id="ID3")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ bkup_c.close()
+ expect = ["ID2", "ID3"]
+ self.id_check(expect)
+
+ self.reopen_conn()
+ self.pr("Query after reopen")
+ expect = ["ID2", "ID3"]
+ self.id_check(expect)
+
+ # Force stop and then recheck. Incremental is no longer configured.
+ msg = "/is not configured/"
+ self.pr("Query after force stop")
+ config = 'incremental=(force_stop=true)'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ bkup_c.close()
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor('backup:query_id',
+ None, None), 0), msg)
+
+if __name__ == '__main__':
+ wttest.run()