summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2020-05-07 18:54:04 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-05-07 09:36:48 +0000
commit76a0765f97040553d0bf4e20c106e94714180520 (patch)
treee75210e268bd4b38c5dee6dee7a9992fa13356dc
parentbf3227e11dd689044ff4555c823c682899f41cf9 (diff)
downloadmongo-76a0765f97040553d0bf4e20c106e94714180520.tar.gz
Import wiredtiger: 5de95caf8b4514906b38e10e2063592907a5b3e6 from branch mongodb-4.2
ref: 2a9cabc310..5de95caf8b for: 4.2.7 WT-4954 Document duplicate backup cursors WT-5212 Backup data validation tests WT-5214 Verify potential incremental failures WT-5215 Stress testing of incremental backup WT-5246 Update WiredTiger backup documentation WT-5589 force_stop on duplicate cursor open not returning error WT-5624 Incremental unit test should use offset/length ranges WT-5695 Fixed incremental backup example to use O_CREAT in the backup range case WT-5697 Dropping or renaming tables returns EBUSY in incremental backup test WT-5699 Refactor incremental backup RANGE code WT-5719 Incremental backup metadata should quote the ID string WT-5722 Incremental backup should do a name check on identifiers WT-5834 Incremental backup returning too large offset WT-5914 Only configure log-incremental backup if archiving is off in test/format WT-5989 Support arguments in workgen WT-5999 Update format so it's possible for it to restart on an existing database
-rw-r--r--src/third_party/wiredtiger/SConstruct16
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/evict-btree-lookaside.py3
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/example_simple.py19
-rw-r--r--src/third_party/wiredtiger/bench/workgen/runner/example_txn.py4
-rw-r--r--src/third_party/wiredtiger/bench/workgen/runner/insert_stress.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/insert_test.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/maintain_low_dirty_cache.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/multi_btree_heavy_stress.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/read_write_storms.py4
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/read_write_sync_long.py4
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/read_write_sync_short.py4
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/runner/__init__.py6
-rw-r--r--src/third_party/wiredtiger/bench/workgen/runner/small_btree.py2
-rw-r--r--src/third_party/wiredtiger/bench/workgen/runner/small_btree_reopen.py2
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/runner/split_stress.py4
-rw-r--r--src/third_party/wiredtiger/bench/workgen/workgen.swig56
-rwxr-xr-xsrc/third_party/wiredtiger/bench/workgen/wtperf.py8
-rw-r--r--src/third_party/wiredtiger/dist/s_string.ok8
-rw-r--r--src/third_party/wiredtiger/examples/c/ex_all.c17
-rw-r--r--src/third_party/wiredtiger/examples/c/ex_backup_block.c15
-rw-r--r--src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_all.java29
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/block/block_ckpt.c27
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_backup.c25
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_backup_incr.c96
-rw-r--r--src/third_party/wiredtiger/src/docs/backup.dox95
-rw-r--r--src/third_party/wiredtiger/src/docs/spell.ok2
-rw-r--r--src/third_party/wiredtiger/src/include/extern.h2
-rw-r--r--src/third_party/wiredtiger/src/meta/meta_ckpt.c2
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_util.c59
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_ckpt.c2
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_nsnap.c2
-rw-r--r--src/third_party/wiredtiger/test/csuite/Makefile.am4
-rw-r--r--src/third_party/wiredtiger/test/csuite/incr_backup/main.c870
-rwxr-xr-xsrc/third_party/wiredtiger/test/csuite/incr_backup/smoke.sh12
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml15
-rw-r--r--src/third_party/wiredtiger/test/format/CONFIG.endian10
-rw-r--r--src/third_party/wiredtiger/test/format/CONFIG.stress12
-rw-r--r--src/third_party/wiredtiger/test/format/Makefile.am7
-rw-r--r--src/third_party/wiredtiger/test/format/backup.c364
-rw-r--r--src/third_party/wiredtiger/test/format/bulk.c55
-rw-r--r--src/third_party/wiredtiger/test/format/checkpoint.c120
-rw-r--r--src/third_party/wiredtiger/test/format/config.c655
-rw-r--r--src/third_party/wiredtiger/test/format/config.h346
-rw-r--r--src/third_party/wiredtiger/test/format/config_compat.c92
-rw-r--r--src/third_party/wiredtiger/test/format/config_compat.sed89
-rw-r--r--src/third_party/wiredtiger/test/format/format.h94
-rw-r--r--src/third_party/wiredtiger/test/format/format.i73
-rwxr-xr-xsrc/third_party/wiredtiger/test/format/format.sh17
-rw-r--r--src/third_party/wiredtiger/test/format/kv.c282
-rw-r--r--src/third_party/wiredtiger/test/format/ops.c176
-rw-r--r--src/third_party/wiredtiger/test/format/rebalance.c28
-rw-r--r--src/third_party/wiredtiger/test/format/recover.sh2
-rw-r--r--src/third_party/wiredtiger/test/format/salvage.c41
-rwxr-xr-xsrc/third_party/wiredtiger/test/format/smoke.sh18
-rw-r--r--src/third_party/wiredtiger/test/format/snap.c14
-rw-r--r--src/third_party/wiredtiger/test/format/t.c218
-rw-r--r--src/third_party/wiredtiger/test/format/util.c558
-rw-r--r--src/third_party/wiredtiger/test/format/vt21
-rw-r--r--src/third_party/wiredtiger/test/format/vt.suppress29
-rw-r--r--src/third_party/wiredtiger/test/format/wts.c81
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup11.py147
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup12.py118
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup13.py168
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup14.py367
66 files changed, 4138 insertions, 1490 deletions
diff --git a/src/third_party/wiredtiger/SConstruct b/src/third_party/wiredtiger/SConstruct
index ab5f3ab49cc..ff3fa55a5a4 100644
--- a/src/third_party/wiredtiger/SConstruct
+++ b/src/third_party/wiredtiger/SConstruct
@@ -488,22 +488,6 @@ t = env.Program("t_fops",
LIBS=[wtlib, shim, testutil] + wtlibs)
Default(t)
-t = env.Program("t_format",
- ["test/format/backup.c",
- "test/format/bulk.c",
- "test/format/compact.c",
- "test/format/config.c",
- "test/format/ops.c",
- "test/format/rebalance.c",
- "test/format/random.c",
- "test/format/salvage.c",
- "test/format/snap.c",
- "test/format/t.c",
- "test/format/util.c",
- "test/format/wts.c"],
- LIBS=[wtlib, shim, testutil] + wtlibs)
-Default(t)
-
t = env.Program("t_huge",
"test/huge/huge.c",
LIBS=[wtlib, shim, testutil] + wtlibs)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py b/src/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py
index 8fa92170d72..7e1bb9af166 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py
@@ -97,7 +97,7 @@ compression_opts = {
#
#conn_config += extensions_config(['compressors/snappy'])
-conn = wiredtiger_open("WT_TEST", conn_config)
+conn = context.wiredtiger_open(conn_config)
s = conn.open_session()
tables = []
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/evict-btree-lookaside.py b/src/third_party/wiredtiger/bench/workgen/runner/evict-btree-lookaside.py
index fd9cfd51fb6..4753a45fa59 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/evict-btree-lookaside.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/evict-btree-lookaside.py
@@ -79,11 +79,10 @@ from wiredtiger import *
from workgen import *
context = Context()
-homedir = "WT_TEST"
conn_config = "cache_size=1G,checkpoint=(wait=60,log_size=2GB),\
eviction=(threads_min=12,threads_max=12),log=(enabled=true),session_max=800,\
eviction_target=60,statistics=(fast),statistics_log=(wait=1,json)"# explicitly added
-conn = wiredtiger_open(homedir, "create," + conn_config)
+conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/example_simple.py b/src/third_party/wiredtiger/bench/workgen/runner/example_simple.py
index eb21d6c3d52..2575c872ccb 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/example_simple.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/example_simple.py
@@ -31,7 +31,9 @@ from runner import *
from wiredtiger import *
from workgen import *
-def show(tname):
+def show(tname, s, args):
+ if not args.verbose:
+ return
print('')
print('<><><><> ' + tname + ' <><><><>')
c = s.open_cursor(tname, None)
@@ -42,7 +44,16 @@ def show(tname):
c.close()
context = Context()
-conn = wiredtiger_open("WT_TEST", "create,cache_size=1G")
+
+# Using the context's wiredtiger_open() method has benefits:
+# * there is a default home directory (WT_TEST), which is automatically cleared before the open.
+# * the args on the python command line are parsed, allowing for:
+# --home homedir
+# --keep (don't remove homedir before starting)
+# --verbose
+# and the ability to add additional command line arguments.
+
+conn = context.wiredtiger_open("create,cache_size=1G")
s = conn.open_session()
tname = 'table:simple'
s.create(tname, 'key_format=S,value_format=S')
@@ -51,9 +62,9 @@ ops = Operation(Operation.OP_INSERT, Table(tname), Key(Key.KEYGEN_APPEND, 10), V
thread = Thread(ops)
workload = Workload(context, thread)
workload.run(conn)
-show(tname)
+show(tname, s, context.args)
thread = Thread(ops * 5)
workload = Workload(context, thread)
workload.run(conn)
-show(tname)
+show(tname, s, context.args)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/example_txn.py b/src/third_party/wiredtiger/bench/workgen/runner/example_txn.py
index 14fd6b9ac77..9f0d2b10079 100644
--- a/src/third_party/wiredtiger/bench/workgen/runner/example_txn.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/example_txn.py
@@ -31,7 +31,8 @@ from runner import *
from wiredtiger import *
from workgen import *
-conn = wiredtiger_open("WT_TEST", "create,cache_size=500MB")
+context = Context()
+conn = context.wiredtiger_open("create,cache_size=500MB")
s = conn.open_session()
tname = "table:test"
s.create(tname, 'key_format=S,value_format=S')
@@ -39,7 +40,6 @@ table = Table(tname)
table.options.key_size = 20
table.options.value_size = 100
-context = Context()
op = Operation(Operation.OP_INSERT, table)
thread = Thread(op * 500000)
pop_workload = Workload(context, thread)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/insert_stress.py b/src/third_party/wiredtiger/bench/workgen/runner/insert_stress.py
index 6e373c0e8b5..e43d8045967 100644
--- a/src/third_party/wiredtiger/bench/workgen/runner/insert_stress.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/insert_stress.py
@@ -34,7 +34,7 @@ from workgen import *
context = Context()
conn_config="create,cache_size=4GB,session_max=1000,eviction=(threads_min=4,threads_max=8),log=(enabled=false),transaction_sync=(enabled=false),checkpoint_sync=true,checkpoint=(wait=10),statistics=(fast),statistics_log=(json,wait=1)"
table_config="allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,internal_page_max=16k,type=file,block_compressor=snappy"
-conn = wiredtiger_open("WT_TEST", conn_config)
+conn = context.wiredtiger_open(conn_config)
s = conn.open_session()
tname = "file:test.wt"
table_config="key_format=S,value_format=S,allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,leaf_value_max=64MB,internal_page_max=16k,type=file,block_compressor=snappy"
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/insert_test.py b/src/third_party/wiredtiger/bench/workgen/runner/insert_test.py
index 4d9ce0f8142..0962f17d3ec 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/insert_test.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/insert_test.py
@@ -55,7 +55,7 @@ def expectException(expr):
raise Exception("missing expected exception")
context = Context()
-conn = wiredtiger_open("WT_TEST", "create,cache_size=1G")
+conn = context.wiredtiger_open("create,cache_size=1G")
s = conn.open_session()
tname0 = tablename(0)
tname1 = tablename(1)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/maintain_low_dirty_cache.py b/src/third_party/wiredtiger/bench/workgen/runner/maintain_low_dirty_cache.py
index 8e029b4c8c9..114b5e474e7 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/maintain_low_dirty_cache.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/maintain_low_dirty_cache.py
@@ -81,7 +81,7 @@ context = Context()
conn_config="create,cache_size=2GB,session_max=1000,eviction=(threads_min=4,threads_max=8),log=(enabled=false),transaction_sync=(enabled=false),checkpoint_sync=true,checkpoint=(wait=8),statistics=(fast),statistics_log=(json,wait=1)"
table_config="allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,internal_page_max=16k,type=file,block_compressor=snappy"
conn_config += extensions_config(['compressors/snappy'])
-conn = wiredtiger_open("WT_TEST", conn_config)
+conn = context.wiredtiger_open(conn_config)
s = conn.open_session()
tables = []
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/multi_btree_heavy_stress.py b/src/third_party/wiredtiger/bench/workgen/runner/multi_btree_heavy_stress.py
index 8160e362d7c..ecd99bb9502 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/multi_btree_heavy_stress.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/multi_btree_heavy_stress.py
@@ -80,7 +80,7 @@ context = Context()
conn_config="create,cache_size=1GB,session_max=1000,eviction=(threads_min=4,threads_max=8),log=(enabled=false),transaction_sync=(enabled=false),checkpoint_sync=true,checkpoint=(wait=60),statistics=(fast),statistics_log=(json,wait=1)"
table_config="allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,internal_page_max=16k,type=file,block_compressor=snappy"
conn_config += extensions_config(['compressors/snappy'])
-conn = wiredtiger_open("WT_TEST", conn_config)
+conn = context.wiredtiger_open(conn_config)
s = conn.open_session()
tables = []
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/read_write_storms.py b/src/third_party/wiredtiger/bench/workgen/runner/read_write_storms.py
index 7f850f51b82..612c4eada2f 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/read_write_storms.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/read_write_storms.py
@@ -37,7 +37,7 @@ from workgen import *
context = Context()
conn_config = ""
conn_config += ",cache_size=2GB,eviction=(threads_max=8),log=(enabled=true),session_max=250,statistics=(fast),statistics_log=(wait=1,json),io_capacity=(total=30M)" # explicitly added
-conn = wiredtiger_open("WT_TEST", "create," + conn_config)
+conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
@@ -140,5 +140,5 @@ workload.options.warmup=0
workload.options.sample_interval_ms = 1000
workload.run(conn)
-latency_filename = "WT_TEST/latency.out"
+latency_filename = context.args.home + "/latency.out"
latency.workload_latency(workload, latency_filename)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_long.py b/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_long.py
index 54c03363a8f..4a78c786a14 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_long.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_long.py
@@ -38,7 +38,7 @@ from workgen import *
context = Context()
conn_config = ""
conn_config += ",cache_size=2GB,eviction=(threads_max=8),log=(enabled=true),session_max=250,statistics=(fast),statistics_log=(wait=1,json)" # explicitly added
-conn = wiredtiger_open("WT_TEST", "create," + conn_config)
+conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
@@ -132,5 +132,5 @@ workload.options.warmup=0
workload.options.sample_interval_ms = 1000
workload.run(conn)
-latency_filename = "WT_TEST/latency.out"
+latency_filename = context.args.home + "/latency.out"
latency.workload_latency(workload, latency_filename)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_short.py b/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_short.py
index 659deac3fe7..b851ab42811 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_short.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/read_write_sync_short.py
@@ -38,7 +38,7 @@ from workgen import *
context = Context()
conn_config = ""
conn_config += ",cache_size=2GB,eviction=(threads_max=8),log=(enabled=true),session_max=250,statistics=(fast),statistics_log=(wait=1,json)" # explicitly added
-conn = wiredtiger_open("WT_TEST", "create," + conn_config)
+conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
wtperf_table_config = "key_format=S,value_format=S," +\
@@ -147,5 +147,5 @@ workload.options.warmup=0
workload.options.sample_interval_ms = 1000
workload.run(conn)
-latency_filename = "WT_TEST/latency.out"
+latency_filename = context.args.home + "/latency.out"
latency.workload_latency(workload, latency_filename)
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/runner/__init__.py b/src/third_party/wiredtiger/bench/workgen/runner/runner/__init__.py
index f3c0ad3243f..0af409099b6 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/runner/__init__.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/runner/__init__.py
@@ -30,7 +30,7 @@
# Used as a first import by runners, does any common initialization.
from __future__ import print_function
-import os, shutil, sys
+import os, sys
thisdir = os.path.dirname(os.path.abspath(__file__))
workgen_src = os.path.dirname(os.path.dirname(thisdir))
wt_dir = os.path.dirname(os.path.dirname(workgen_src))
@@ -84,9 +84,5 @@ except:
sys.path.insert(0, os.path.join(wt_builddir, 'bench', 'workgen'))
import workgen
-# Clear out the WT_TEST directory.
-shutil.rmtree('WT_TEST', True)
-os.mkdir('WT_TEST')
-
from .core import txn, extensions_config, op_append, op_group_transaction, op_log_like, op_multi_table, op_populate_with_range, sleep, timed
from .latency import workload_latency
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/small_btree.py b/src/third_party/wiredtiger/bench/workgen/runner/small_btree.py
index 735b80e9780..4ea36b238f8 100644
--- a/src/third_party/wiredtiger/bench/workgen/runner/small_btree.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/small_btree.py
@@ -32,7 +32,7 @@ from wiredtiger import *
from workgen import *
context = Context()
-conn = wiredtiger_open("WT_TEST", "create,cache_size=500MB")
+conn = context.wiredtiger_open("create,cache_size=500MB")
s = conn.open_session()
tname = "file:test.wt"
s.create(tname, 'key_format=S,value_format=S')
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/small_btree_reopen.py b/src/third_party/wiredtiger/bench/workgen/runner/small_btree_reopen.py
index 0a2363a38d6..3fdd7cedada 100644
--- a/src/third_party/wiredtiger/bench/workgen/runner/small_btree_reopen.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/small_btree_reopen.py
@@ -32,7 +32,7 @@ from wiredtiger import *
from workgen import *
context = Context()
-conn = wiredtiger_open("WT_TEST", "create,cache_size=500MB")
+conn = context.wiredtiger_open("create,cache_size=500MB")
s = conn.open_session()
tname = "file:test.wt"
s.create(tname, 'key_format=S,value_format=S')
diff --git a/src/third_party/wiredtiger/bench/workgen/runner/split_stress.py b/src/third_party/wiredtiger/bench/workgen/runner/split_stress.py
index 74a02c02f00..761cc7aedc7 100755
--- a/src/third_party/wiredtiger/bench/workgen/runner/split_stress.py
+++ b/src/third_party/wiredtiger/bench/workgen/runner/split_stress.py
@@ -38,7 +38,7 @@ from workgen import *
context = Context()
# Connection configuration.
conn_config = "cache_size=100MB,log=(enabled=false),statistics=[fast],statistics_log=(wait=1,json=false)"
-conn = wiredtiger_open("WT_TEST", "create," + conn_config)
+conn = context.wiredtiger_open("create," + conn_config)
s = conn.open_session("")
# Table configuration.
@@ -78,6 +78,6 @@ workload.options.run_time=300
print('Split stress workload running...')
workload.run(conn)
-latency_filename = "WT_TEST/latency.out"
+latency_filename = context.args.home + "/latency.out"
latency.workload_latency(workload, latency_filename)
conn.close()
diff --git a/src/third_party/wiredtiger/bench/workgen/workgen.swig b/src/third_party/wiredtiger/bench/workgen/workgen.swig
index 8d038f673ff..ecd090ae94d 100644
--- a/src/third_party/wiredtiger/bench/workgen/workgen.swig
+++ b/src/third_party/wiredtiger/bench/workgen/workgen.swig
@@ -51,7 +51,7 @@
%}
%pythoncode %{
-import numbers
+ import argparse,numbers,os,shutil,wiredtiger
%}
%exception {
@@ -142,6 +142,60 @@ WorkgenFrozenClass(TableOptions)
WorkgenFrozenClass(ThreadOptions)
WorkgenFrozenClass(WorkloadOptions)
+%extend workgen::Context {
+%pythoncode %{
+ # This will be the actual __init__ function after we shuffle names below!
+ def Xinit(self, parser = None):
+ self.__original_init__()
+ self._internal_init(parser)
+
+ def _internal_init(self, parser):
+ self.default_home = "WT_TEST"
+ self.default_config = "create"
+ if not parser:
+ parser = argparse.ArgumentParser("Execute workgen.")
+ parser.add_argument("--home", dest="home", type=str,
+ help="home directory for the run (default=%s)" % self.default_home)
+ parser.add_argument("--keep", dest="keep", action="store_true",
+ help="Run the workload on an existing home directory")
+ parser.add_argument("--verbose", dest="verbose", action="store_true",
+ help="Run the workload verbosely")
+ self.parser = parser
+ self._initialized = False
+
+ def parse_arguments(self, parser):
+ self.args = parser.parse_args()
+
+ def wiredtiger_open_config(self, config):
+ return config
+
+ def wiredtiger_open(self, config = None):
+ if config == None:
+ config = self.default_config
+ self.initialize()
+ return wiredtiger.wiredtiger_open(self.args.home, self.wiredtiger_open_config(config))
+
+ def initialize(self):
+ if not self._initialized:
+ self.parse_arguments(self.parser)
+ if self.args.home == None:
+ self.args.home = self.default_home
+ self._initialized = True
+ if not self.args.keep:
+ shutil.rmtree(self.args.home, True)
+ os.mkdir(self.args.home)
+ return self
+%}
+};
+
+%pythoncode %{
+# Shuffle the names of the __init__ function, we want ours (Xinit above), called first.
+# This seems to be the most natural way to intercept a C++ constructor, and do
+# Python-specific actions as part of the regular constructor.
+Context.__original_init__ = Context.__init__
+Context.__init__ = Context.Xinit
+%}
+
%extend workgen::Operation {
%pythoncode %{
def __mul__(self, other):
diff --git a/src/third_party/wiredtiger/bench/workgen/wtperf.py b/src/third_party/wiredtiger/bench/workgen/wtperf.py
index e0927b0b19a..66c009ef96e 100755
--- a/src/third_party/wiredtiger/bench/workgen/wtperf.py
+++ b/src/third_party/wiredtiger/bench/workgen/wtperf.py
@@ -601,7 +601,6 @@ class Translator:
s += ' return op_ret\n'
s += '\n'
s += 'context = Context()\n'
- s += 'homedir = "' + self.homedir + '"\n'
extra_config = ''
s += 'conn_config = ""\n'
@@ -616,7 +615,7 @@ class Translator:
s += 'conn_config += extensions_config(["compressors/' + \
compression + '"])\n'
compression = 'block_compressor=' + compression + ','
- s += 'conn = wiredtiger_open(homedir, "create," + conn_config)\n'
+ s += 'conn = context.wiredtiger_open("create," + conn_config)\n'
s += 's = conn.open_session("' + sess_config + '")\n'
s += '\n'
s += self.translate_table_create()
@@ -634,8 +633,7 @@ class Translator:
s += 'conn.close()\n'
if readonly:
'conn_config += ",readonly=true"\n'
- s += 'conn = wiredtiger_open(homedir, ' + \
- '"create," + conn_config)\n'
+ s += 'conn = context.wiredtiger_open("create," + conn_config)\n'
s += '\n'
s += 'workload = Workload(context, ' + t_var + ')\n'
s += workloadopts
@@ -643,7 +641,7 @@ class Translator:
if self.verbose > 0:
s += 'print("workload:")\n'
s += 'workload.run(conn)\n\n'
- s += 'latency_filename = homedir + "/latency.out"\n'
+ s += 'latency_filename = context.args.home + "/latency.out"\n'
s += 'latency.workload_latency(workload, latency_filename)\n'
if close_conn:
diff --git a/src/third_party/wiredtiger/dist/s_string.ok b/src/third_party/wiredtiger/dist/s_string.ok
index 2e5fcd94a2e..d12c125a6eb 100644
--- a/src/third_party/wiredtiger/dist/s_string.ok
+++ b/src/third_party/wiredtiger/dist/s_string.ok
@@ -43,6 +43,7 @@ Barack
BerkeleyDB
Bitfield
Bitwise
+BlqRr
Brueckner
Bsearch
Btree
@@ -174,6 +175,7 @@ HHHHLL
HHHLL
HILQr
HOTBACKUP
+HSdump
Hendrik
HyperLevelDB
ID's
@@ -267,6 +269,7 @@ Mutex
MySecret
NEEDKEY
NEEDVALUE
+NNN
NOLINT
NOLINTNEXTLINE
NOLL
@@ -318,6 +321,7 @@ RCS
RDNOLOCK
RDONLY
READONLY
+REBALANCE
RECNO
REF's
REFs
@@ -826,6 +830,7 @@ hhh
highjack
hilq
hotbackup
+hs
hselasky
html
huffman
@@ -905,6 +910,7 @@ kb
kbits
keycmp
keyid
+keylen
keyv
kv
kvraw
@@ -950,6 +956,7 @@ lookaside
lookup
lookups
lossy
+lqRrt
lqr
lqrt
lr
@@ -1150,6 +1157,7 @@ rduppo
readlock
readonly
readunlock
+readv
realloc
rebalance
rebalanced
diff --git a/src/third_party/wiredtiger/examples/c/ex_all.c b/src/third_party/wiredtiger/examples/c/ex_all.c
index 5cd014493dd..cfb0099ad9c 100644
--- a/src/third_party/wiredtiger/examples/c/ex_all.c
+++ b/src/third_party/wiredtiger/examples/c/ex_all.c
@@ -1102,6 +1102,7 @@ backup(WT_SESSION *session)
{
char buf[1024];
+ WT_CURSOR *dup_cursor;
/*! [backup]*/
WT_CURSOR *cursor;
const char *filename;
@@ -1125,12 +1126,26 @@ backup(WT_SESSION *session)
error_check(cursor->close(cursor));
/*! [backup]*/
+ /*! [backup log duplicate]*/
+ /* Open the backup data source. */
+ error_check(session->open_cursor(session, "backup:", NULL, NULL, &cursor));
+ /* Open a duplicate cursor for additional log files. */
+ error_check(session->open_cursor(session, NULL, cursor, "target=(\"log:\")", &dup_cursor));
+ /*! [backup log duplicate]*/
+
/*! [incremental backup]*/
- /* Open the backup data source for incremental backup. */
+ /* Open the backup data source for log-based incremental backup. */
error_check(session->open_cursor(session, "backup:", NULL, "target=(\"log:\")", &cursor));
/*! [incremental backup]*/
error_check(cursor->close(cursor));
+ /*! [incremental block backup]*/
+ /* Open the backup data source for block-based incremental backup. */
+ error_check(session->open_cursor(
+ session, "backup:", NULL, "incremental=(enabled,src_id=ID0,this_id=ID1)", &cursor));
+ /*! [incremental block backup]*/
+ error_check(cursor->close(cursor));
+
/*! [backup of a checkpoint]*/
error_check(session->checkpoint(session, "drop=(from=June01),name=June01"));
/*! [backup of a checkpoint]*/
diff --git a/src/third_party/wiredtiger/examples/c/ex_backup_block.c b/src/third_party/wiredtiger/examples/c/ex_backup_block.c
index f374424d442..24ec718af53 100644
--- a/src/third_party/wiredtiger/examples/c/ex_backup_block.c
+++ b/src/third_party/wiredtiger/examples/c/ex_backup_block.c
@@ -269,7 +269,7 @@ take_full_backup(WT_SESSION *session, int i)
hdir = home_incr;
if (i == 0) {
(void)snprintf(
- buf, sizeof(buf), "incremental=(granularity=1M,enabled=true,this_id=ID%d)", i);
+ buf, sizeof(buf), "incremental=(granularity=1M,enabled=true,this_id=\"ID%d\")", i);
error_check(session->open_cursor(session, "backup:", NULL, buf, &cursor));
} else
error_check(session->open_cursor(session, "backup:", NULL, NULL, &cursor));
@@ -330,12 +330,10 @@ take_incr_backup(WT_SESSION *session, int i)
const char *filename;
bool first;
- /*! [incremental backup using block transfer]*/
-
tmp = NULL;
tmp_sz = 0;
/* Open the backup data source for incremental backup. */
- (void)snprintf(buf, sizeof(buf), "incremental=(src_id=ID%d,this_id=ID%d)", i - 1, i);
+ (void)snprintf(buf, sizeof(buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\")", i - 1, i);
error_check(session->open_cursor(session, "backup:", NULL, buf, &backup_cur));
rfd = wfd = -1;
count = 0;
@@ -385,7 +383,7 @@ take_incr_backup(WT_SESSION *session, int i)
error_sys_check(rfd = open(buf, O_RDONLY, 0));
(void)snprintf(h, sizeof(h), "%s.%d", home_incr, i);
(void)snprintf(buf, sizeof(buf), "%s/%s", h, filename);
- error_sys_check(wfd = open(buf, O_WRONLY, 0));
+ error_sys_check(wfd = open(buf, O_WRONLY | O_CREAT, 0));
first = false;
}
@@ -439,7 +437,6 @@ take_incr_backup(WT_SESSION *session, int i)
error_check(backup_cur->close(backup_cur));
error_check(finalize_files(flist, count));
free(tmp);
- /*! [incremental backup using block transfer]*/
}
int
@@ -506,7 +503,8 @@ main(int argc, char *argv[])
/*
* We should have an entry for i-1 and i-2. Use the older one.
*/
- (void)snprintf(cmd_buf, sizeof(cmd_buf), "incremental=(src_id=ID%d,this_id=ID%d)", i - 2, i);
+ (void)snprintf(
+ cmd_buf, sizeof(cmd_buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\")", i - 2, i);
error_check(session->open_cursor(session, "backup:", NULL, cmd_buf, &backup_cur));
error_check(backup_cur->close(backup_cur));
@@ -540,7 +538,8 @@ main(int argc, char *argv[])
/*
* We should not have any information.
*/
- (void)snprintf(cmd_buf, sizeof(cmd_buf), "incremental=(src_id=ID%d,this_id=ID%d)", i - 2, i);
+ (void)snprintf(
+ cmd_buf, sizeof(cmd_buf), "incremental=(src_id=\"ID%d\",this_id=\"ID%d\")", i - 2, i);
testutil_assert(session->open_cursor(session, "backup:", NULL, cmd_buf, &backup_cur) == ENOENT);
error_check(wt_conn->close(wt_conn, NULL));
diff --git a/src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_all.java b/src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_all.java
index 1258e195929..50130663462 100644
--- a/src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_all.java
+++ b/src/third_party/wiredtiger/examples/java/com/wiredtiger/examples/ex_all.java
@@ -845,6 +845,7 @@ backup(Session session)
{
char buf[] = new char[1024];
+ Cursor dup_cursor;
/*! [backup]*/
Cursor cursor;
String filename;
@@ -890,6 +891,21 @@ backup(Session session)
}
/*! [backup]*/
try {
+ /*! [backup log duplicate]*/
+ /* Open the backup data source. */
+ cursor = session.open_cursor("backup:", null, null);
+ /* Open a duplicate cursor for additional log files. */
+ dup_cursor = session.open_cursor(null, cursor, "target=(\"log:\")");
+ /*! [backup log duplicate]*/
+
+ ret = dup_cursor.close();
+ ret = cursor.close();
+ }
+ catch (Exception ex) {
+ System.err.println(progname +
+ ": duplicate log backup failed: " + ex.toString());
+ }
+ try {
/*! [incremental backup]*/
/* Open the backup data source for incremental backup. */
cursor = session.open_cursor("backup:", null, "target=(\"log:\")");
@@ -902,6 +918,19 @@ backup(Session session)
": incremental backup failed: " + ex.toString());
}
+ try {
+ /*! [incremental block backup]*/
+ /* Open the backup data source for incremental backup. */
+ cursor = session.open_cursor("backup:", null, "incremental=(enabled,src_id=ID0,this_id=ID1)");
+ /*! [incremental block backup]*/
+
+ ret = cursor.close();
+ }
+ catch (Exception ex) {
+ System.err.println(progname +
+ ": incremental backup failed: " + ex.toString());
+ }
+
/*! [backup of a checkpoint]*/
ret = session.checkpoint("drop=(from=June01),name=June01");
/*! [backup of a checkpoint]*/
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index eedce57dc92..4afe42eccce 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-4.2",
- "commit": "2a9cabc3100088666ae5b1e9f430bcfd1c6172f4"
+ "commit": "5de95caf8b4514906b38e10e2063592907a5b3e6"
}
diff --git a/src/third_party/wiredtiger/src/block/block_ckpt.c b/src/third_party/wiredtiger/src/block/block_ckpt.c
index 12c0db4e43d..6cbe80a9317 100644
--- a/src/third_party/wiredtiger/src/block/block_ckpt.c
+++ b/src/third_party/wiredtiger/src/block/block_ckpt.c
@@ -662,21 +662,32 @@ __ckpt_add_blkmod_entry(
WT_SESSION_IMPL *session, WT_BLOCK_MODS *blk_mod, wt_off_t offset, wt_off_t len)
{
uint64_t end, start;
- uint32_t end_rdup;
+ uint32_t end_buf_bytes, end_rdup_bytes;
WT_ASSERT(session, blk_mod->granularity != 0);
start = (uint64_t)offset / blk_mod->granularity;
end = (uint64_t)(offset + len) / blk_mod->granularity;
WT_ASSERT(session, end < UINT32_MAX);
- end_rdup = WT_MAX(__wt_rduppo2((uint32_t)end, 8), WT_BLOCK_MODS_LIST_MIN);
- if ((end_rdup << 3) > blk_mod->nbits) {
+ end_rdup_bytes = WT_MAX(__wt_rduppo2((uint32_t)end, 8), WT_BLOCK_MODS_LIST_MIN);
+ end_buf_bytes = (uint32_t)blk_mod->nbits >> 3;
+ /*
+ * We are doing a lot of shifting. Make sure that the number of bytes we end up with is a
+ * multiple of eight. We guarantee that in the rounding up call, but also make sure that the
+ * constant stays a multiple of eight.
+ */
+ WT_ASSERT(session, end_rdup_bytes % 8 == 0);
+ if (end_rdup_bytes > end_buf_bytes) {
/* If we don't have enough, extend the buffer. */
if (blk_mod->nbits == 0) {
- WT_RET(__wt_buf_initsize(session, &blk_mod->bitstring, end_rdup));
- memset(blk_mod->bitstring.mem, 0, end_rdup);
- } else
- WT_RET(__wt_buf_set(session, &blk_mod->bitstring, blk_mod->bitstring.data, end_rdup));
- blk_mod->nbits = end_rdup << 3;
+ WT_RET(__wt_buf_initsize(session, &blk_mod->bitstring, end_rdup_bytes));
+ memset(blk_mod->bitstring.mem, 0, end_rdup_bytes);
+ } else {
+ WT_RET(
+ __wt_buf_set(session, &blk_mod->bitstring, blk_mod->bitstring.data, end_rdup_bytes));
+ memset(
+ (uint8_t *)blk_mod->bitstring.mem + end_buf_bytes, 0, end_rdup_bytes - end_buf_bytes);
+ }
+ blk_mod->nbits = end_rdup_bytes << 3;
}
/* Set all the bits needed to record this offset/length pair. */
diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup.c b/src/third_party/wiredtiger/src/cursor/cur_backup.c
index af6cd9d0dc4..a16b7183a9d 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_backup.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_backup.c
@@ -368,6 +368,7 @@ __backup_find_id(WT_SESSION_IMPL *session, WT_CONFIG_ITEM *cval, WT_BLKINCR **in
u_int i;
conn = S2C(session);
+ WT_RET(__wt_name_check(session, cval->str, cval->len, false));
for (i = 0; i < WT_BLKINCR_MAX; ++i) {
blk = &conn->incr_backups[i];
/* If it isn't valid, skip it. */
@@ -416,6 +417,10 @@ err:
/*
* __backup_config --
* Backup configuration.
+ *
+ * NOTE: this function handles all of the backup configuration except for the incremental use of
+ * force_stop. That is handled at the beginning of __backup_start because we want to deal with
+ * that setting without any of the other cursor setup.
*/
static int
__backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[],
@@ -439,19 +444,6 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
* Per-file offset incremental hot backup configurations take a starting checkpoint and optional
* maximum transfer size, and the subsequent duplicate cursors take a file object.
*/
- WT_RET_NOTFOUND_OK(__wt_config_gets(session, cfg, "incremental.force_stop", &cval));
- if (cval.val) {
- /*
- * If we're force stopping incremental backup, set the flag. The resources involved in
- * incremental backup will be released on cursor close and that is the only expected usage
- * for this cursor.
- */
- if (is_dup)
- WT_RET_MSG(session, EINVAL,
- "Incremental force stop can only be specified on a primary backup cursor");
- F_SET(cb, WT_CURBACKUP_FORCE_STOP);
- return (0);
- }
WT_RET_NOTFOUND_OK(__wt_config_gets(session, cfg, "incremental.enabled", &cval));
if (cval.val) {
if (!F_ISSET(conn, WT_CONN_INCR_BACKUP)) {
@@ -504,8 +496,10 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
WT_ERR_MSG(session, EINVAL,
"Incremental identifier can only be specified on a primary backup cursor");
ret = __backup_find_id(session, &cval, NULL);
- if (ret != WT_NOTFOUND)
+ if (ret == 0)
WT_ERR_MSG(session, EINVAL, "Incremental identifier already exists");
+ if (ret != WT_NOTFOUND)
+ WT_ERR(ret);
WT_ERR(__backup_add_id(session, &cval));
incremental_config = true;
@@ -637,6 +631,9 @@ __backup_start(
* incremental backup will be released on cursor close and that is the only expected usage
* for this cursor.
*/
+ if (is_dup)
+ WT_RET_MSG(session, EINVAL,
+ "Incremental force stop can only be specified on a primary backup cursor");
F_SET(cb, WT_CURBACKUP_FORCE_STOP);
return (0);
}
diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
index d44070a2160..8a9416eaaa0 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_backup_incr.c
@@ -101,22 +101,11 @@ __curbackup_incr_next(WT_CURSOR *cursor)
CURSOR_API_CALL(cursor, session, get_value, btree);
F_CLR(cursor, WT_CURSTD_RAW);
- if (cb->incr_init) {
- /* Look for the next chunk that had modifications. */
- while (cb->bit_offset < cb->nbits)
- if (__bit_test(cb->bitstring.mem, cb->bit_offset))
- break;
- else
- ++cb->bit_offset;
-
- /* We either have this object's incremental information or we're done. */
- if (cb->bit_offset >= cb->nbits)
- WT_ERR(WT_NOTFOUND);
- __wt_cursor_set_key(cursor, cb->offset + cb->granularity * cb->bit_offset++,
- cb->granularity, WT_BACKUP_RANGE);
- } else if (btree == NULL || F_ISSET(cb, WT_CURBACKUP_FORCE_FULL)) {
- /* We don't have this object's incremental information, and it's a full file copy. */
- /* If this is a log file, use the full pathname that may include the log path. */
+ if (!cb->incr_init && (btree == NULL || F_ISSET(cb, WT_CURBACKUP_FORCE_FULL))) {
+ /*
+ * We don't have this object's incremental information or it's a forced file copy. If this
+ * is a log file, use the full pathname that may include the log path.
+ */
file = cb->incr_file;
if (WT_PREFIX_MATCH(file, WT_LOG_FILENAME)) {
WT_ERR(__wt_scr_alloc(session, 0, &buf));
@@ -128,26 +117,48 @@ __curbackup_incr_next(WT_CURSOR *cursor)
cb->nbits = 0;
cb->offset = 0;
cb->bit_offset = 0;
+ /*
+ * By setting this to true, the next call will detect we're done in the code for the
+ * incremental cursor below and return WT_NOTFOUND.
+ */
cb->incr_init = true;
__wt_cursor_set_key(cursor, 0, size, WT_BACKUP_FILE);
} else {
- /*
- * We don't have this object's incremental information, and it's not a full file copy. Get a
- * list of the block modifications for the file. The block modifications are from the
- * incremental identifier starting point. Walk the list looking for one with a source of our
- * id.
- */
- WT_ERR(__curbackup_incr_blkmod(session, btree, cb));
- /*
- * If there is no block modification information for this file, there is no information to
- * return to the user.
- */
- if (cb->bitstring.mem == NULL)
- WT_ERR(WT_NOTFOUND);
+ if (cb->incr_init) {
+ /* Look for the next chunk that had modifications. */
+ while (cb->bit_offset < cb->nbits)
+ if (__bit_test(cb->bitstring.mem, cb->bit_offset))
+ break;
+ else
+ ++cb->bit_offset;
+
+ /* We either have this object's incremental information or we're done. */
+ if (cb->bit_offset >= cb->nbits)
+ WT_ERR(WT_NOTFOUND);
+ } else {
+ /*
+ * We don't have this object's incremental information, and it's not a full file copy.
+ * Get a list of the block modifications for the file. The block modifications are from
+ * the incremental identifier starting point. Walk the list looking for one with a
+ * source of our id.
+ */
+ WT_ERR(__curbackup_incr_blkmod(session, btree, cb));
+ /*
+ * If there is no block modification information for this file, this is a newly created
+ * file without any checkpoint information. Return the whole file information.
+ */
+ if (cb->bitstring.mem == NULL) {
+ WT_ERR(__wt_fs_size(session, cb->incr_file, &size));
+ cb->incr_init = true;
+ __wt_cursor_set_key(cursor, 0, size, WT_BACKUP_FILE);
+ goto done;
+ }
+ }
__wt_cursor_set_key(cursor, cb->offset + cb->granularity * cb->bit_offset++,
cb->granularity, WT_BACKUP_RANGE);
}
+done:
err:
F_SET(cursor, raw);
__wt_scr_free(session, &buf);
@@ -163,7 +174,7 @@ __wt_curbackup_free_incr(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb)
{
__wt_free(session, cb->incr_file);
if (cb->incr_cursor != NULL)
- __wt_cursor_close(cb->incr_cursor);
+ cb->incr_cursor->close(cb->incr_cursor);
__wt_buf_free(session, &cb->bitstring);
}
@@ -178,6 +189,7 @@ __wt_curbackup_open_incr(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *o
WT_CURSOR_BACKUP *cb, *other_cb;
WT_DECL_ITEM(open_uri);
WT_DECL_RET;
+ uint64_t session_cache_flags;
cb = (WT_CURSOR_BACKUP *)cursor;
other_cb = (WT_CURSOR_BACKUP *)other;
@@ -197,8 +209,8 @@ __wt_curbackup_open_incr(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *o
/* All WiredTiger owned files are full file copies. */
if (F_ISSET(other_cb->incr_src, WT_BLKINCR_FULL) ||
WT_PREFIX_MATCH(cb->incr_file, "WiredTiger")) {
- __wt_verbose(session, WT_VERB_BACKUP, "Forcing full file copies for id %s",
- other_cb->incr_src->id_str);
+ __wt_verbose(session, WT_VERB_BACKUP, "Forcing full file copies for %s for id %s",
+ cb->incr_file, other_cb->incr_src->id_str);
F_SET(cb, WT_CURBACKUP_FORCE_FULL);
}
/*
@@ -208,16 +220,20 @@ __wt_curbackup_open_incr(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR *o
if (!F_ISSET(cb, WT_CURBACKUP_FORCE_FULL)) {
WT_ERR(__wt_scr_alloc(session, 0, &open_uri));
WT_ERR(__wt_buf_fmt(session, open_uri, "file:%s", cb->incr_file));
- __wt_free(session, cb->incr_file);
- WT_ERR(__wt_strdup(session, open_uri->data, &cb->incr_file));
-
- WT_ERR(__wt_curfile_open(session, cb->incr_file, NULL, cfg, &cb->incr_cursor));
- WT_ERR(__wt_cursor_init(cursor, uri, NULL, cfg, cursorp));
- WT_ERR(__wt_strdup(session, cb->incr_cursor->internal_uri, &cb->incr_cursor->internal_uri));
- } else
- WT_ERR(__wt_cursor_init(cursor, uri, NULL, cfg, cursorp));
+ /*
+ * Incremental cursors use file cursors, but in a non-standard way. Turn off cursor caching
+ * as we open the cursor.
+ */
+ session_cache_flags = F_ISSET(session, WT_SESSION_CACHE_CURSORS);
+ F_CLR(session, WT_SESSION_CACHE_CURSORS);
+ WT_ERR(__wt_curfile_open(session, open_uri->data, NULL, cfg, &cb->incr_cursor));
+ F_SET(session, session_cache_flags);
+ }
+ WT_ERR(__wt_cursor_init(cursor, uri, NULL, cfg, cursorp));
err:
+ if (ret != 0)
+ __wt_curbackup_free_incr(session, cb);
__wt_scr_free(session, &open_uri);
return (ret);
}
diff --git a/src/third_party/wiredtiger/src/docs/backup.dox b/src/third_party/wiredtiger/src/docs/backup.dox
index b59d099175f..610033d05cf 100644
--- a/src/third_party/wiredtiger/src/docs/backup.dox
+++ b/src/third_party/wiredtiger/src/docs/backup.dox
@@ -35,7 +35,7 @@ continue to read and write the databases while a snapshot is taken.
files have been copied.
The directory into which the files are copied may subsequently be
-specified as an directory to the ::wiredtiger_open function and
+specified as a directory to the ::wiredtiger_open function and
accessed as a WiredTiger database home.
Copying the database files for a backup does not require any special
@@ -57,7 +57,7 @@ arguments to a file archiver such as the system tar utility.
During the period the backup cursor is open, database checkpoints can
be created, but no checkpoints can be deleted. This may result in
-significant file growth. Additionally while the backup cursor is open
+significant file growth. Additionally while the backup cursor is open
automatic log file archiving, even if enabled, will not reclaim any
log files.
@@ -72,6 +72,24 @@ The following is a programmatic example of creating a backup:
@snippet ex_all.c backup
+When logging is enabled, opening the backup cursor forces a log file switch.
+The reason is so that only data that was committed and visible at the time of
+the backup is available in the backup when that log file is included in the
+list of files. WiredTiger offers a mechanism to gather additional log files that
+may be created during the backup.
+
+Since backups can take a long time, it may be desirable to catch up at the
+end of a backup with the log files so that operations that occurred during
+backup can be recovered. WiredTiger provides the ability to open a duplicate
+backup cursor with the configuration \c target=log:. This secondary backup
+cursor will return the file names of all log files via \c dup_cursor->get_key().
+There will be overlap with log file names returned in the original cursor. The user
+only needs to copy file names that are new but there is no error copying all
+log file names returned. This secondary cursor must be closed explicitly prior
+to closing the parent backup cursor.
+
+@snippet ex_all.c backup log duplicate
+
In cases where the backup is desired for a checkpoint other than the
most recent, applications can discard all checkpoints subsequent to the
checkpoint they want using the WT_SESSION::checkpoint method. For
@@ -89,7 +107,76 @@ rm -rf /path/database.backup &&
wt -h /path/database.source backup /path/database.backup
@endcode
-@section backup_incremental Incremental backup
+@section backup_incremental-block Block-based Incremental backup
+
+Once a full backup has been done, it can be rolled forward incrementally by
+copying only modified blocks and new files to the backup copy directory.
+The application is responsible for removing files that
+are no longer part of the backup when later incremental backups no longer
+return their name. This is especially important for WiredTiger log files
+that are no longer needed and must be removed before recovery is run.
+
+@copydoc doc_bulk_durability
+
+The following is the procedure for incrementally backing up a database
+using block modifications:
+
+1. Perform a full backup of the database (as described above), with the
+additional configuration \c incremental=(enabled=true,this_id=”ID1”).
+The identifier specified in \c this_id starts block tracking and that
+identifier can be used in the future as the source of an incremental
+backup.
+
+2. Begin the incremental backup by opening a backup cursor with the
+\c backup: URI and config string of \c incremental=(src_id="ID1",this_id="ID2").
+Call this \c backup_cursor. Like a normal full backup cursor,
+this cursor will return the filename as the key. There is no associated
+value. The information returned will be based on blocks tracked since the time of
+the previous backup designated with "ID1". New block tracking will be started as
+"ID2" as well. WiredTiger will maintain modifications from two IDs, the current
+and the most recent completed one. Note that all backup identifiers are subject to
+the same naming restrictions as other configuration naming. See @ref config_intro
+for details.
+
+3. For each file returned by \c backup_cursor->next(), open a duplicate
+backup cursor to do the incremental backup on that file. The list
+returned will also include log files (prefixed by \c WiredTigerLog) that need to
+be copied. Configure that duplicate cursor with \c incremental=(file=name).
+The \c name comes from the string returned from \c backup_cursor->get_key().
+Call this incr_cursor.
+
+4. The key format for the duplicate backup cursor, \c incr_cursor, is
+\c qqq, representing a file offset and size pair plus a type indicator
+for the range given. There is no associated value. The type indicator
+will be one of \c WT_BACKUP_FILE or \c WT_BACKUP_RANGE. For \c WT_BACKUP_RANGE,
+read the block from the source database file indicated by the file offset and
+size pair and write the block to the same offset in the
+backup database file, replacing the portion of the file represented by
+the offset/size pair. It is not an error for an offset/size pair to extend past
+the current end of the source file, and any missing file data should be ignored.
+For \c WT_BACKUP_FILE, the user can choose to copy the entire file in
+any way they choose, or to use the offset/size pair which will
+indicate the expected size WiredTiger knew at the time of the call.
+
+5. Close the duplicate backup cursor, \c incr_cursor.
+
+6. Repeat steps 3-5 as many times as necessary while \c backup_cursor->next()
+returns files to copy.
+
+7. Close the backup cursor, \c backup_cursor.
+
+8. Repeat steps 2-7 as often as desired.
+
+Full and incremental backups may be repeated as long as the backup
+database directory has not been opened and recovery run. Once recovery
+has run in a backup directory, you can no longer back up to that
+database directory.
+
+An example of opening the backup data source for block-based incremental backup:
+
+@snippet ex_all.c incremental block backup
+
+@section backup_incremental Log-based Incremental backup
Once a backup has been done, it can be rolled forward incrementally by
adding log files to the backup copy. Adding log files to the copy
@@ -139,7 +226,7 @@ database directory has not been opened and recovery run. Once recovery
has run in a backup directory, you can no longer back up to that
database directory.
-An example of opening the backup data source for an incremental backup:
+An example of opening the backup data source for log-based incremental backup:
@snippet ex_all.c incremental backup
diff --git a/src/third_party/wiredtiger/src/docs/spell.ok b/src/third_party/wiredtiger/src/docs/spell.ok
index 1f93969ca69..95fb2756f7c 100644
--- a/src/third_party/wiredtiger/src/docs/spell.ok
+++ b/src/third_party/wiredtiger/src/docs/spell.ok
@@ -283,6 +283,7 @@ hugepage
icount
ie
iflag
+incr
indices
init
insn
@@ -428,6 +429,7 @@ putValue
putValueString
py
qnx
+qqq
rVv
rdbms
rdlock
diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h
index 6f686251363..c52ad821daf 100644
--- a/src/third_party/wiredtiger/src/include/extern.h
+++ b/src/third_party/wiredtiger/src/include/extern.h
@@ -1082,7 +1082,7 @@ extern int __wt_msg(WT_SESSION_IMPL *session, const char *fmt, ...)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_multi_to_ref(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi,
WT_REF **refp, size_t *incrp, bool closing) WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
-extern int __wt_name_check(WT_SESSION_IMPL *session, const char *str, size_t len)
+extern int __wt_name_check(WT_SESSION_IMPL *session, const char *str, size_t len, bool check_uri)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
extern int __wt_nfilename(WT_SESSION_IMPL *session, const char *name, size_t namelen, char **path)
WT_GCC_FUNC_DECL_ATTRIBUTE((warn_unused_result));
diff --git a/src/third_party/wiredtiger/src/meta/meta_ckpt.c b/src/third_party/wiredtiger/src/meta/meta_ckpt.c
index ce693b606e7..341c1b8b5f7 100644
--- a/src/third_party/wiredtiger/src/meta/meta_ckpt.c
+++ b/src/third_party/wiredtiger/src/meta/meta_ckpt.c
@@ -763,7 +763,7 @@ __ckpt_blkmod_to_meta(WT_SESSION_IMPL *session, WT_ITEM *buf, WT_CKPT *ckpt)
if (!F_ISSET(blk, WT_BLOCK_MODS_VALID))
continue;
WT_RET(__wt_raw_to_hex(session, blk->bitstring.data, blk->bitstring.size, &bitstring));
- WT_RET(__wt_buf_catfmt(session, buf, "%s%s=(id=%" PRIu32 ",granularity=%" PRIu64
+ WT_RET(__wt_buf_catfmt(session, buf, "%s\"%s\"=(id=%" PRIu32 ",granularity=%" PRIu64
",nbits=%" PRIu64 ",offset=%" PRIu64 ",blocks=%.*s)",
i == 0 ? "" : ",", blk->id_str, i, blk->granularity, blk->nbits, blk->offset,
(int)bitstring.size, (char *)bitstring.data));
diff --git a/src/third_party/wiredtiger/src/schema/schema_util.c b/src/third_party/wiredtiger/src/schema/schema_util.c
index 2c65e4297db..25ef013648c 100644
--- a/src/third_party/wiredtiger/src/schema/schema_util.c
+++ b/src/third_party/wiredtiger/src/schema/schema_util.c
@@ -111,6 +111,33 @@ __wt_schema_session_release(WT_SESSION_IMPL *session, WT_SESSION_IMPL *int_sessi
}
/*
+ * __str_name_check --
+ * Internal function to disallow any use of the WiredTiger name space. Can be called directly or
+ * after skipping the URI prefix.
+ */
+static int
+__str_name_check(WT_SESSION_IMPL *session, const char *name, bool skip_wt)
+{
+
+ if (!skip_wt && WT_PREFIX_MATCH(name, "WiredTiger"))
+ WT_RET_MSG(session, EINVAL,
+ "%s: the \"WiredTiger\" name space may not be "
+ "used by applications",
+ name);
+
+ /*
+ * Disallow JSON quoting characters -- the config string parsing code supports quoted strings,
+ * but there's no good reason to use them in names and we're not going to do the testing.
+ */
+ if (strpbrk(name, "{},:[]\\\"'") != NULL)
+ WT_RET_MSG(session, EINVAL,
+ "%s: WiredTiger objects should not include grouping "
+ "characters in their names",
+ name);
+ return (0);
+}
+
+/*
* __wt_str_name_check --
* Disallow any use of the WiredTiger name space.
*/
@@ -119,36 +146,24 @@ __wt_str_name_check(WT_SESSION_IMPL *session, const char *str)
{
int skipped;
const char *name, *sep;
+ bool skip;
/*
* Check if name is somewhere in the WiredTiger name space: it would be
* "bad" if the application truncated the metadata file. Skip any
- * leading URI prefix, check and then skip over a table name.
+ * leading URI prefix if needed, check and then skip over a table name.
*/
name = str;
+ skip = false;
for (skipped = 0; skipped < 2; skipped++) {
- if ((sep = strchr(name, ':')) == NULL)
+ if ((sep = strchr(name, ':')) == NULL) {
+ skip = true;
break;
+ }
name = sep + 1;
- if (WT_PREFIX_MATCH(name, "WiredTiger"))
- WT_RET_MSG(session, EINVAL,
- "%s: the \"WiredTiger\" name space may not be "
- "used by applications",
- name);
}
-
- /*
- * Disallow JSON quoting characters -- the config string parsing code supports quoted strings,
- * but there's no good reason to use them in names and we're not going to do the testing.
- */
- if (strpbrk(name, "{},:[]\\\"'") != NULL)
- WT_RET_MSG(session, EINVAL,
- "%s: WiredTiger objects should not include grouping "
- "characters in their names",
- name);
-
- return (0);
+ return (__str_name_check(session, name, skip));
}
/*
@@ -156,7 +171,7 @@ __wt_str_name_check(WT_SESSION_IMPL *session, const char *str)
* Disallow any use of the WiredTiger name space.
*/
int
-__wt_name_check(WT_SESSION_IMPL *session, const char *str, size_t len)
+__wt_name_check(WT_SESSION_IMPL *session, const char *str, size_t len, bool check_uri)
{
WT_DECL_ITEM(tmp);
WT_DECL_RET;
@@ -165,7 +180,9 @@ __wt_name_check(WT_SESSION_IMPL *session, const char *str, size_t len)
WT_ERR(__wt_buf_fmt(session, tmp, "%.*s", (int)len, str));
- ret = __wt_str_name_check(session, tmp->data);
+ /* If we want to skip the URI check call the internal function directly. */
+ ret = check_uri ? __wt_str_name_check(session, tmp->data) :
+ __str_name_check(session, tmp->data, false);
err:
__wt_scr_free(session, &tmp);
diff --git a/src/third_party/wiredtiger/src/txn/txn_ckpt.c b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
index 5f538f7fcda..5c61faee070 100644
--- a/src/third_party/wiredtiger/src/txn/txn_ckpt.c
+++ b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
@@ -22,7 +22,7 @@ static int
__checkpoint_name_ok(WT_SESSION_IMPL *session, const char *name, size_t len)
{
/* Check for characters we don't want to see in a metadata file. */
- WT_RET(__wt_name_check(session, name, len));
+ WT_RET(__wt_name_check(session, name, len, true));
/*
* The internal checkpoint name is special, applications aren't allowed to use it. Be aggressive
diff --git a/src/third_party/wiredtiger/src/txn/txn_nsnap.c b/src/third_party/wiredtiger/src/txn/txn_nsnap.c
index 5ac6e3b62b4..ac842f32c44 100644
--- a/src/third_party/wiredtiger/src/txn/txn_nsnap.c
+++ b/src/third_party/wiredtiger/src/txn/txn_nsnap.c
@@ -344,7 +344,7 @@ __wt_txn_named_snapshot_config(
if (WT_STRING_MATCH("all", cval.str, cval.len))
WT_RET_MSG(session, EINVAL, "Can't create snapshot with reserved \"all\" name");
- WT_RET(__wt_name_check(session, cval.str, cval.len));
+ WT_RET(__wt_name_check(session, cval.str, cval.len, false));
if (F_ISSET(txn, WT_TXN_RUNNING) && txn->isolation != WT_ISO_SNAPSHOT)
WT_RET_MSG(session, EINVAL,
diff --git a/src/third_party/wiredtiger/test/csuite/Makefile.am b/src/third_party/wiredtiger/test/csuite/Makefile.am
index e2b7233f45b..266eb343f63 100644
--- a/src/third_party/wiredtiger/test/csuite/Makefile.am
+++ b/src/third_party/wiredtiger/test/csuite/Makefile.am
@@ -10,6 +10,10 @@ noinst_PROGRAMS=
# The import test is only a shell script
all_TESTS += import/smoke.sh
+test_incr_backup_SOURCES = incr_backup/main.c
+noinst_PROGRAMS += test_incr_backup
+all_TESTS += incr_backup/smoke.sh
+
test_random_abort_SOURCES = random_abort/main.c
noinst_PROGRAMS += test_random_abort
all_TESTS += random_abort/smoke.sh
diff --git a/src/third_party/wiredtiger/test/csuite/incr_backup/main.c b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
new file mode 100644
index 00000000000..2c8893780eb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
@@ -0,0 +1,870 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This program tests incremental backup in a randomized way. The random seed used is reported and
+ * can be used in another run.
+ */
+
+#include "test_util.h"
+
+#include <sys/wait.h>
+#include <signal.h>
+
+#define ITERATIONS 10
+#define MAX_NTABLES 100
+
+#define MAX_KEY_SIZE 100
+#define MAX_VALUE_SIZE 10000
+#define MAX_MODIFY_ENTRIES 10
+#define MAX_MODIFY_DIFF 500
+
+#define URI_MAX_LEN 32
+#define URI_FORMAT "table:t%d-%d"
+#define KEY_FORMAT "key-%d-%d"
+
+static int verbose_level = 0;
+static uint64_t seed = 0;
+
+static void usage(void) WT_GCC_FUNC_DECL_ATTRIBUTE((noreturn));
+
+/*
+ * Note: set this to true to copy incremental files completely.
+ */
+static bool slow_incremental = false;
+
+static bool do_drop = true;
+static bool do_rename = true;
+
+#define VERBOSE(level, fmt, ...) \
+ do { \
+ if (level <= verbose_level) \
+ printf(fmt, __VA_ARGS__); \
+ } while (0)
+
+/*
+ * We keep an array of tables, each one may or may not be in use.
+ * "In use" means it has been created, and will be updated from time to time.
+ */
+typedef struct {
+ char *name; /* non-null entries represent tables in use */
+ uint32_t name_index; /* bumped when we rename or drop, so we get unique names. */
+ uint64_t change_count; /* number of changes so far to the table */
+ WT_RAND_STATE rand;
+ uint32_t max_value_size;
+} TABLE;
+#define TABLE_VALID(tablep) ((tablep)->name != NULL)
+
+/*
+ * The set of all tables in play, and other information used for this run.
+ */
+typedef struct {
+ TABLE *table; /* set of potential tables */
+ uint32_t table_count; /* size of table array */
+ uint32_t tables_in_use; /* count of tables that exist */
+ uint32_t full_backup_number;
+ uint32_t incr_backup_number;
+} TABLE_INFO;
+
+/*
+ * The set of active files in a backup. This is our "memory" of files that are used in each backup,
+ * so we can remove any that are not mentioned in the next backup.
+ */
+typedef struct {
+ char **names;
+ uint32_t count;
+} ACTIVE_FILES;
+
+extern int __wt_optind;
+extern char *__wt_optarg;
+
+/*
+ * The choices of operations we do to each table.
+ */
+typedef enum { INSERT, MODIFY, REMOVE, UPDATE, _OPERATION_TYPE_COUNT } OPERATION_TYPE;
+
+/*
+ * Cycle of changes to a table.
+ *
+ * When making changes to a table, the first KEYS_PER_TABLE changes are all inserts, the next
+ * KEYS_PER_TABLE are updates of the same records. The next KEYS_PER_TABLE are modifications of
+ * existing records, and the last KEYS_PER_TABLE will be removes. This defines one "cycle", and
+ * CHANGES_PER_CYCLE is the number of changes in a complete cycle. Thus at the end/beginning of each
+ * cycle, there are zero keys in the table.
+ *
+ * Having a predictable cycle makes it easy on the checking side (knowing how many total changes
+ * have been made) to check the state of the table.
+ */
+#define KEYS_PER_TABLE 10000
+#define CHANGES_PER_CYCLE (KEYS_PER_TABLE * _OPERATION_TYPE_COUNT)
+
+/*
+ * usage --
+ * Print usage message and exit.
+ */
+static void
+usage(void)
+{
+ fprintf(stderr, "usage: %s [-h dir] [-S seed] [-v verbose_level]\n", progname);
+ exit(EXIT_FAILURE);
+}
+
+/*
+ * die --
+ * Called when testutil_assert or testutil_check fails.
+ */
+static void
+die(void)
+{
+ fprintf(stderr,
+ "**** FAILURE\n"
+ "To reproduce, please rerun with: %s -S %" PRIu64 "\n",
+ progname, seed);
+}
+
+/*
+ * key_value --
+ * Return the key, value and operation type for a given change to a table. See "Cycle of changes
+ * to a table" above.
+ *
+ * The keys generated are unique among the 10000, but we purposely don't make them sequential, so
+ * that insertions tend to be scattered among the pages in the B-tree.
+ *
+ * "key-0-0", "key-1-0", "key-2-0""... "key-99-0", "key-0-1", "key-1-1", ...
+ */
+static void
+key_value(uint64_t change_count, char *key, size_t key_size, WT_ITEM *item, OPERATION_TYPE *typep)
+{
+ uint32_t key_num;
+ OPERATION_TYPE op_type;
+ size_t pos, value_size;
+ char *cp;
+ char ch;
+
+ key_num = change_count % KEYS_PER_TABLE;
+ *typep = op_type = (OPERATION_TYPE)((change_count % CHANGES_PER_CYCLE) / KEYS_PER_TABLE);
+
+ testutil_check(
+ __wt_snprintf(key, key_size, KEY_FORMAT, (int)(key_num % 100), (int)(key_num / 100)));
+ if (op_type == REMOVE)
+ return; /* remove needs no key */
+
+ /* The value sizes vary "predictably" up to the max value size for this table. */
+ value_size = (change_count * 103) % (item->size + 1);
+ testutil_assert(value_size <= item->size);
+
+ /*
+ * For a given key, a value is first inserted, then later updated, then modified. When a value
+ * is inserted, it is all the letter 'a'. When the value is updated, is it mostly 'b', with some
+ * 'c' mixed in. When the value is to modified, we'll end up with a value with mostly 'b' and
+ * 'M' mixed in, in different spots. Thus the modify operation will have both additions ('M')
+ * and
+ * subtractions ('c') from the previous version.
+ */
+ if (op_type == INSERT)
+ ch = 'a';
+ else
+ ch = 'b';
+
+ cp = (char *)item->data;
+ for (pos = 0; pos < value_size; pos++) {
+ cp[pos] = ch;
+ if (op_type == UPDATE && ((50 < pos && pos < 60) || (150 < pos && pos < 160)))
+ cp[pos] = 'c';
+ else if (op_type == MODIFY && ((20 < pos && pos < 30) || (120 < pos && pos < 130)))
+ cp[pos] = 'M';
+ }
+ item->size = value_size;
+}
+
+/*
+ * active_files_init --
+ * Initialize (clear) the active file struct.
+ */
+static void
+active_files_init(ACTIVE_FILES *active)
+{
+ WT_CLEAR(*active);
+}
+
+/*
+ * active_files_print --
+ * Print the set of active files for debugging.
+ */
+static void
+active_files_print(ACTIVE_FILES *active, const char *msg)
+{
+ uint32_t i;
+
+ VERBOSE(6, "Active files: %s, %d entries\n", msg, (int)active->count);
+ for (i = 0; i < active->count; i++)
+ VERBOSE(6, " %s\n", active->names[i]);
+}
+
+/*
+ * active_files_add --
+ * Add a new name to the active file list.
+ */
+static void
+active_files_add(ACTIVE_FILES *active, const char *name)
+{
+ uint32_t pos;
+
+ pos = active->count++;
+ active->names = drealloc(active->names, sizeof(char *) * active->count);
+ active->names[pos] = strdup(name);
+}
+
+/*
+ * active_files_sort_function --
+ * Sort function for qsort.
+ */
+static int
+active_files_sort_function(const void *left, const void *right)
+{
+ return (strcmp(*(const char **)left, *(const char **)right));
+}
+
+/*
+ * active_files_sort --
+ * Sort the list of names in the active file list.
+ */
+static void
+active_files_sort(ACTIVE_FILES *active)
+{
+ __wt_qsort(active->names, active->count, sizeof(char *), active_files_sort_function);
+}
+
+/*
+ * active_files_remove_missing --
+ * Files in the previous list that are missing from the current list are removed.
+ */
+static void
+active_files_remove_missing(ACTIVE_FILES *prev, ACTIVE_FILES *cur, const char *dirname)
+{
+ uint32_t curpos, prevpos;
+ int cmp;
+ char filename[1024];
+
+ active_files_print(prev, "computing removals: previous list of active files");
+ active_files_print(cur, "computing removals: current list of active files");
+ curpos = 0;
+ /*
+ * Walk through the two lists looking for non-matches.
+ */
+ for (prevpos = 0; prevpos < prev->count; prevpos++) {
+again:
+ if (curpos >= cur->count)
+ cmp = -1; /* There are extra entries at the end of the prev list */
+ else
+ cmp = strcmp(prev->names[prevpos], cur->names[curpos]);
+
+ if (cmp == 0)
+ curpos++;
+ else if (cmp < 0) {
+ /*
+ * There is something in the prev list not in the current list. Remove it, and continue
+ * - don't advance the current list.
+ */
+ testutil_check(
+ __wt_snprintf(filename, sizeof(filename), "%s/%s", dirname, prev->names[prevpos]));
+ VERBOSE(3, "Removing file from backup: %s\n", filename);
+ remove(filename);
+ } else {
+ /*
+ * There is something in the current list not in the prev list. Walk past it in the
+ * current list and try again.
+ */
+ curpos++;
+ goto again;
+ }
+ }
+}
+
+/*
+ * active_files_free --
+ * Free the list of active files.
+ */
+static void
+active_files_free(ACTIVE_FILES *active)
+{
+ uint32_t i;
+
+ for (i = 0; i < active->count; i++)
+ free(active->names[i]);
+ free(active->names);
+ active_files_init(active);
+}
+
+/*
+ * active_files_move --
+ * Move an active file list to the destination list.
+ */
+static void
+active_files_move(ACTIVE_FILES *dest, ACTIVE_FILES *src)
+{
+ active_files_free(dest);
+ *dest = *src;
+ WT_CLEAR(*src);
+}
+
+/*
+ * table_changes --
+ * Potentially make changes to a single table.
+ */
+static void
+table_changes(WT_SESSION *session, TABLE *table)
+{
+ WT_CURSOR *cur;
+ WT_ITEM item, item2;
+ WT_MODIFY modify_entries[MAX_MODIFY_ENTRIES];
+ OPERATION_TYPE op_type;
+ uint64_t change_count;
+ uint32_t i, nrecords;
+ int modify_count;
+ u_char *value, *value2;
+ char key[MAX_KEY_SIZE];
+
+ /*
+ * We change each table in use about half the time.
+ */
+ if (__wt_random(&table->rand) % 2 == 0) {
+ value = dcalloc(1, table->max_value_size);
+ value2 = dcalloc(1, table->max_value_size);
+ nrecords = __wt_random(&table->rand) % 1000;
+ VERBOSE(4, "changing %d records in %s\n", (int)nrecords, table->name);
+ testutil_check(session->open_cursor(session, table->name, NULL, NULL, &cur));
+ for (i = 0; i < nrecords; i++) {
+ change_count = table->change_count++;
+ item.data = value;
+ item.size = table->max_value_size;
+ key_value(change_count, key, sizeof(key), &item, &op_type);
+ cur->set_key(cur, key);
+ switch (op_type) {
+ case INSERT:
+ cur->set_value(cur, &item);
+ testutil_check(cur->insert(cur));
+ break;
+ case MODIFY:
+ item2.data = value2;
+ item2.size = table->max_value_size;
+ key_value(change_count - KEYS_PER_TABLE, NULL, 0, &item2, &op_type);
+ modify_count = MAX_MODIFY_ENTRIES;
+ testutil_check(wiredtiger_calc_modify(
+ session, &item2, &item, MAX_MODIFY_DIFF, modify_entries, &modify_count));
+ testutil_check(cur->modify(cur, modify_entries, modify_count));
+ break;
+ case REMOVE:
+ testutil_check(cur->remove(cur));
+ break;
+ case UPDATE:
+ cur->set_value(cur, &item);
+ testutil_check(cur->update(cur));
+ break;
+ case _OPERATION_TYPE_COUNT:
+ testutil_assert(false);
+ break;
+ }
+ }
+ free(value);
+ free(value2);
+ testutil_check(cur->close(cur));
+ }
+}
+
+/*
+ * create_table --
+ * Create a table for the given slot.
+ */
+static void
+create_table(WT_SESSION *session, TABLE_INFO *tinfo, uint32_t slot)
+{
+ char *uri;
+
+ testutil_assert(!TABLE_VALID(&tinfo->table[slot]));
+ uri = dcalloc(1, URI_MAX_LEN);
+ testutil_check(
+ __wt_snprintf(uri, URI_MAX_LEN, URI_FORMAT, (int)slot, (int)tinfo->table[slot].name_index++));
+
+ VERBOSE(3, "create %s\n", uri);
+ testutil_check(session->create(session, uri, "key_format=S,value_format=u"));
+ tinfo->table[slot].name = uri;
+ tinfo->tables_in_use++;
+}
+
+static void
+rename_table(WT_SESSION *session, TABLE_INFO *tinfo, uint32_t slot)
+{
+ char *olduri, *uri;
+
+ testutil_assert(TABLE_VALID(&tinfo->table[slot]));
+ uri = dcalloc(1, URI_MAX_LEN);
+ testutil_check(
+ __wt_snprintf(uri, URI_MAX_LEN, URI_FORMAT, (int)slot, (int)tinfo->table[slot].name_index++));
+
+ olduri = tinfo->table[slot].name;
+ VERBOSE(3, "rename %s %s\n", olduri, uri);
+ testutil_check(session->rename(session, olduri, uri, NULL));
+ free(olduri);
+ tinfo->table[slot].name = uri;
+}
+
+static void
+drop_table(WT_SESSION *session, TABLE_INFO *tinfo, uint32_t slot)
+{
+ char *uri;
+
+ testutil_assert(TABLE_VALID(&tinfo->table[slot]));
+ uri = tinfo->table[slot].name;
+
+ VERBOSE(3, "drop %s\n", uri);
+ testutil_check(session->drop(session, uri, NULL));
+ free(uri);
+ tinfo->table[slot].name = NULL;
+ tinfo->table[slot].change_count = 0;
+ tinfo->tables_in_use--;
+}
+
+static void
+base_backup(WT_CONNECTION *conn, WT_RAND_STATE *rand, const char *home, const char *backup_home,
+ TABLE_INFO *tinfo, ACTIVE_FILES *active)
+{
+ WT_CURSOR *cursor;
+ WT_SESSION *session;
+ uint32_t granularity;
+ int nfiles, ret;
+ char buf[4096];
+ char *filename;
+
+ nfiles = 0;
+
+ VERBOSE(2, "BASE BACKUP: %s\n", backup_home);
+ active_files_free(active);
+ active_files_init(active);
+ testutil_check(
+ __wt_snprintf(buf, sizeof(buf), "rm -rf %s && mkdir %s", backup_home, backup_home));
+ VERBOSE(3, " => %s\n", buf);
+ testutil_check(system(buf));
+
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+ tinfo->full_backup_number = tinfo->incr_backup_number++;
+
+ /* Half of the runs with a low granularity: 1M */
+ if (__wt_random(rand) % 2 == 0)
+ granularity = 1;
+ else
+ granularity = 1 + __wt_random(rand) % 20;
+ testutil_check(__wt_snprintf(buf, sizeof(buf),
+ "incremental=(granularity=%" PRIu32 "M,enabled=true,this_id=ID%d)", granularity,
+ (int)tinfo->full_backup_number));
+ VERBOSE(3, "open_cursor(session, \"backup:\", NULL, \"%s\", &cursor)\n", buf);
+ testutil_check(session->open_cursor(session, "backup:", NULL, buf, &cursor));
+
+ while ((ret = cursor->next(cursor)) == 0) {
+ nfiles++;
+ testutil_check(cursor->get_key(cursor, &filename));
+ active_files_add(active, filename);
+ testutil_check(
+ __wt_snprintf(buf, sizeof(buf), "cp %s/%s %s/%s", home, filename, backup_home, filename));
+ VERBOSE(3, " => %s\n", buf);
+ testutil_check(system(buf));
+ }
+ testutil_assert(ret == WT_NOTFOUND);
+ testutil_check(cursor->close(cursor));
+ testutil_check(session->close(session, NULL));
+ active_files_sort(active);
+ VERBOSE(2, " finished base backup: %d files\n", nfiles);
+}
+
+/*
+ * Open a file if it isn't already open. The "memory" of the open file name is kept in the buffer
+ * passed in.
+ */
+static void
+reopen_file(int *fdp, char *buf, size_t buflen, const char *filename, int oflag)
+{
+ /* Do we already have this file open? */
+ if (strcmp(buf, filename) == 0 && *fdp != -1)
+ return;
+ if (*fdp != -1)
+ close(*fdp);
+ *fdp = open(filename, oflag, 0666);
+ strncpy(buf, filename, buflen);
+ testutil_assert(*fdp >= 0);
+}
+
+/*
+ * Perform an incremental backup into an existing backup directory.
+ */
+static void
+incr_backup(WT_CONNECTION *conn, const char *home, const char *backup_home, TABLE_INFO *tinfo,
+ ACTIVE_FILES *master_active)
+{
+ ACTIVE_FILES active;
+ WT_CURSOR *cursor, *file_cursor;
+ WT_SESSION *session;
+ void *tmp;
+ ssize_t rdsize;
+ uint64_t offset, size, type;
+ int rfd, ret, wfd, nfiles, nrange, ncopy;
+ char buf[4096], rbuf[4096], wbuf[4096];
+ char *filename;
+
+ VERBOSE(2, "INCREMENTAL BACKUP: %s\n", backup_home);
+ active_files_print(master_active, "master list before incremental backup");
+ WT_CLEAR(rbuf);
+ WT_CLEAR(wbuf);
+ rfd = wfd = -1;
+ nfiles = nrange = ncopy = 0;
+
+ active_files_init(&active);
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "incremental=(src_id=ID%d,this_id=ID%d)",
+ (int)tinfo->full_backup_number, (int)tinfo->incr_backup_number++));
+ VERBOSE(3, "open_cursor(session, \"backup:\", NULL, \"%s\", &cursor)\n", buf);
+ testutil_check(session->open_cursor(session, "backup:", NULL, buf, &cursor));
+
+ while ((ret = cursor->next(cursor)) == 0) {
+ nfiles++;
+ testutil_check(cursor->get_key(cursor, &filename));
+ active_files_add(&active, filename);
+ if (slow_incremental) {
+ /*
+ * The "slow" version of an incremental backup is to copy the entire file that was
+ * indicated to be changed. This may be useful for debugging problems that occur in
+ * backup. This path is typically disabled for the test program.
+ */
+ testutil_check(__wt_snprintf(
+ buf, sizeof(buf), "cp %s/%s %s/%s", home, filename, backup_home, filename));
+ VERBOSE(3, " => %s\n", buf);
+ testutil_check(system(buf));
+ } else {
+ /*
+ * Here is the normal incremental backup. Now that we know what file has changed, we get
+ * the specific changes
+ */
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "incremental=(file=%s)", filename));
+ testutil_check(session->open_cursor(session, NULL, cursor, buf, &file_cursor));
+ VERBOSE(3, "open_cursor(session, NULL, cursor, \"%s\", &file_cursor)\n", buf);
+ while ((ret = file_cursor->next(file_cursor)) == 0) {
+ error_check(file_cursor->get_key(file_cursor, &offset, &size, &type));
+ testutil_assert(type == WT_BACKUP_FILE || type == WT_BACKUP_RANGE);
+ if (type == WT_BACKUP_RANGE) {
+ nrange++;
+ tmp = dcalloc(1, size);
+
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/%s", home, filename));
+ VERBOSE(5, "Reopen read file: %s\n", buf);
+ reopen_file(&rfd, rbuf, sizeof(rbuf), buf, O_RDONLY);
+ rdsize = pread(rfd, tmp, (size_t)size, (wt_off_t)offset);
+ testutil_assert(rdsize >= 0);
+
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/%s", backup_home, filename));
+ VERBOSE(5, "Reopen write file: %s\n", buf);
+ reopen_file(&wfd, wbuf, sizeof(wbuf), buf, O_WRONLY | O_CREAT);
+ /* Use the read size since we may have read less than the granularity. */
+ testutil_assert(pwrite(wfd, tmp, (size_t)rdsize, (wt_off_t)offset) == rdsize);
+ free(tmp);
+ } else {
+ ncopy++;
+ testutil_check(__wt_snprintf(
+ buf, sizeof(buf), "cp %s/%s %s/%s", home, filename, backup_home, filename));
+ VERBOSE(3, " => %s\n", buf);
+ testutil_check(system(buf));
+ }
+ }
+ testutil_assert(ret == WT_NOTFOUND);
+ testutil_check(file_cursor->close(file_cursor));
+ }
+ }
+ testutil_assert(ret == WT_NOTFOUND);
+ if (rfd != -1)
+ testutil_check(close(rfd));
+ if (wfd != -1)
+ testutil_check(close(wfd));
+ testutil_check(cursor->close(cursor));
+ testutil_check(session->close(session, NULL));
+ VERBOSE(2, " finished incremental backup: %d files, %d range copy, %d file copy\n", nfiles,
+ nrange, ncopy);
+ active_files_sort(&active);
+ active_files_remove_missing(master_active, &active, backup_home);
+
+ /* Move the current active list to the master list */
+ active_files_move(master_active, &active);
+}
+
+static void
+check_table(WT_SESSION *session, TABLE *table)
+{
+ WT_CURSOR *cursor;
+ WT_ITEM item, got_value;
+ OPERATION_TYPE op_type;
+ uint64_t boundary, change_count, expect_records, got_records, total_changes;
+ int keylow, keyhigh, ret;
+ u_char *value;
+ char *got_key;
+ char key[MAX_KEY_SIZE];
+
+ expect_records = 0;
+ total_changes = table->change_count;
+ boundary = total_changes % KEYS_PER_TABLE;
+ op_type = (OPERATION_TYPE)(total_changes % CHANGES_PER_CYCLE) / KEYS_PER_TABLE;
+ value = dcalloc(1, table->max_value_size);
+
+ VERBOSE(3, "Checking: %s\n", table->name);
+ switch (op_type) {
+ case INSERT:
+ expect_records = total_changes % KEYS_PER_TABLE;
+ break;
+ case MODIFY:
+ case UPDATE:
+ expect_records = KEYS_PER_TABLE;
+ break;
+ case REMOVE:
+ expect_records = KEYS_PER_TABLE - (total_changes % KEYS_PER_TABLE);
+ break;
+ case _OPERATION_TYPE_COUNT:
+ testutil_assert(false);
+ break;
+ }
+
+ testutil_check(session->open_cursor(session, table->name, NULL, NULL, &cursor));
+ got_records = 0;
+ while ((ret = cursor->next(cursor)) == 0) {
+ got_records++;
+ testutil_check(cursor->get_key(cursor, &got_key));
+ testutil_check(cursor->get_value(cursor, &got_value));
+
+ /*
+ * Reconstruct the change number from the key. See key_value() for details on how the key is
+ * constructed.
+ */
+ testutil_assert(sscanf(got_key, KEY_FORMAT, &keylow, &keyhigh) == 2);
+ change_count = (u_int)keyhigh * 100 + (u_int)keylow;
+ item.data = value;
+ item.size = table->max_value_size;
+ if (op_type == INSERT || (op_type == UPDATE && change_count < boundary))
+ change_count += 0;
+ else if (op_type == UPDATE || (op_type == MODIFY && change_count < boundary))
+ change_count += KEYS_PER_TABLE;
+ else if (op_type == MODIFY || (op_type == REMOVE && change_count < boundary))
+ change_count += 20000;
+ else
+ testutil_assert(false);
+ key_value(change_count, key, sizeof(key), &item, &op_type);
+ testutil_assert(strcmp(key, got_key) == 0);
+ testutil_assert(got_value.size == item.size);
+ testutil_assert(memcmp(got_value.data, item.data, item.size) == 0);
+ }
+ testutil_assert(got_records == expect_records);
+ testutil_assert(ret == WT_NOTFOUND);
+ testutil_check(cursor->close(cursor));
+ free(value);
+}
+
+/*
+ * Verify the backup to make sure the proper tables exist and have the correct content.
+ */
+static void
+check_backup(const char *backup_home, const char *backup_check, TABLE_INFO *tinfo)
+{
+ WT_CONNECTION *conn;
+ WT_SESSION *session;
+ uint32_t slot;
+ char buf[4096];
+
+ VERBOSE(
+ 2, "CHECK BACKUP: copy %s to %s, then check %s\n", backup_home, backup_check, backup_check);
+
+ testutil_check(__wt_snprintf(
+ buf, sizeof(buf), "rm -rf %s && cp -r %s %s", backup_check, backup_home, backup_check));
+ testutil_check(system(buf));
+
+ testutil_check(wiredtiger_open(backup_check, NULL, NULL, &conn));
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+
+ for (slot = 0; slot < tinfo->table_count; slot++) {
+ if (TABLE_VALID(&tinfo->table[slot]))
+ check_table(session, &tinfo->table[slot]);
+ }
+
+ testutil_check(session->close(session, NULL));
+ testutil_check(conn->close(conn, NULL));
+}
+
+int
+main(int argc, char *argv[])
+{
+ ACTIVE_FILES active;
+ TABLE_INFO tinfo;
+ WT_CONNECTION *conn;
+ WT_RAND_STATE rnd;
+ WT_SESSION *session;
+ uint32_t file_max, iter, max_value_size, next_checkpoint, rough_size, slot;
+ int ch, ncheckpoints, status;
+ const char *backup_verbose, *working_dir;
+ char conf[1024], home[1024], backup_check[1024], backup_dir[1024], command[4096];
+
+ ncheckpoints = 0;
+ (void)testutil_set_progname(argv);
+ custom_die = die; /* Set our own abort handler */
+ WT_CLEAR(tinfo);
+ active_files_init(&active);
+
+ working_dir = "WT_TEST.incr_backup";
+
+ while ((ch = __wt_getopt(progname, argc, argv, "h:S:v:")) != EOF)
+ switch (ch) {
+ case 'h':
+ working_dir = __wt_optarg;
+ break;
+ case 'S':
+ seed = (uint64_t)atoll(__wt_optarg);
+ break;
+ case 'v':
+ verbose_level = atoi(__wt_optarg);
+ break;
+ default:
+ usage();
+ }
+ argc -= __wt_optind;
+ if (argc != 0)
+ usage();
+
+ if (seed == 0) {
+ __wt_random_init_seed(NULL, &rnd);
+ seed = rnd.v;
+ } else
+ rnd.v = seed;
+
+ testutil_work_dir_from_path(home, sizeof(home), working_dir);
+ testutil_check(__wt_snprintf(backup_dir, sizeof(backup_dir), "%s.BACKUP", home));
+ testutil_check(__wt_snprintf(backup_check, sizeof(backup_check), "%s.CHECK", home));
+ fprintf(stderr, "Seed: %" PRIu64 "\n", seed);
+
+ testutil_check(
+ __wt_snprintf(command, sizeof(command), "rm -rf %s %s; mkdir %s", home, backup_dir, home));
+ if ((status = system(command)) < 0)
+ testutil_die(status, "system: %s", command);
+
+ backup_verbose = (verbose_level >= 4) ? "verbose=(backup)" : "";
+
+ /*
+ * We create an overall max_value_size. From that, we'll set a random max_value_size per table.
+ * In addition, individual values put into each table vary randomly in size, up to the
+ * max_value_size of the table.
+ * This tends to make sure that 1) each table has a "personality" of size ranges within it
+ * 2) there are some runs that tend to have a lot more data than other runs. If we made every
+ * insert choose a uniform random size between 1 and MAX_VALUE_SIZE, once we did a bunch
+ * of inserts, each run would look very much the same with respect to value size.
+ */
+ max_value_size = __wt_random(&rnd) % MAX_VALUE_SIZE;
+
+ /* Compute a random value of file_max. */
+ rough_size = __wt_random(&rnd) % 3;
+ if (rough_size == 0)
+ file_max = 100 + __wt_random(&rnd) % 100; /* small log files, min 100K */
+ else if (rough_size == 1)
+ file_max = 200 + __wt_random(&rnd) % 1000; /* 200K to ~1M */
+ else
+ file_max = 1000 + __wt_random(&rnd) % 20000; /* 1M to ~20M */
+ testutil_check(__wt_snprintf(conf, sizeof(conf),
+ "create,%s,log=(enabled=true,file_max=%" PRIu32 "K)", backup_verbose, file_max));
+ VERBOSE(2, "wiredtiger config: %s\n", conf);
+ testutil_check(wiredtiger_open(home, NULL, conf, &conn));
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+
+ tinfo.table_count = __wt_random(&rnd) % MAX_NTABLES + 1;
+ tinfo.table = dcalloc(tinfo.table_count, sizeof(tinfo.table[0]));
+
+ /*
+ * Give each table its own random generator. This makes it easier to simplify a failing test to
+ * use fewer tables, but have those just tables behave the same.
+ */
+ for (slot = 0; slot < tinfo.table_count; slot++) {
+ tinfo.table[slot].rand.v = seed + slot;
+ testutil_assert(!TABLE_VALID(&tinfo.table[slot]));
+ tinfo.table[slot].max_value_size = __wt_random(&rnd) % (max_value_size + 1);
+ }
+
+ /* How many files should we update until next checkpoint. */
+ next_checkpoint = __wt_random(&rnd) % tinfo.table_count;
+
+ for (iter = 0; iter < ITERATIONS; iter++) {
+ VERBOSE(1, "**** iteration %d ****\n", (int)iter);
+
+ /*
+ * We have schema changes during about half the iterations. The number of schema changes
+ * varies, averaging 10.
+ */
+ if (tinfo.tables_in_use == 0 || __wt_random(&rnd) % 2 != 0) {
+ while (__wt_random(&rnd) % 10 != 0) {
+ /*
+ * For schema events, we choose to create, rename or drop tables. We pick a random
+ * slot, and if it is empty, create a table there. Otherwise, we rename or drop.
+ * That should give us a steady state with slots mostly filled.
+ */
+ slot = __wt_random(&rnd) % tinfo.table_count;
+ if (!TABLE_VALID(&tinfo.table[slot]))
+ create_table(session, &tinfo, slot);
+ else if (__wt_random(&rnd) % 3 == 0 && do_rename)
+ rename_table(session, &tinfo, slot);
+ else if (do_drop)
+ drop_table(session, &tinfo, slot);
+ }
+ }
+ for (slot = 0; slot < tinfo.table_count; slot++) {
+ if (TABLE_VALID(&tinfo.table[slot]))
+ table_changes(session, &tinfo.table[slot]);
+ if (next_checkpoint-- == 0) {
+ VERBOSE(2, "Checkpoint %d\n", ncheckpoints);
+ testutil_check(session->checkpoint(session, NULL));
+ next_checkpoint = __wt_random(&rnd) % tinfo.table_count;
+ ncheckpoints++;
+ }
+ }
+
+ if (iter == 0) {
+ base_backup(conn, &rnd, home, backup_dir, &tinfo, &active);
+ check_backup(backup_dir, backup_check, &tinfo);
+ } else {
+ incr_backup(conn, home, backup_dir, &tinfo, &active);
+ check_backup(backup_dir, backup_check, &tinfo);
+ if (__wt_random(&rnd) % 10 == 0) {
+ base_backup(conn, &rnd, home, backup_dir, &tinfo, &active);
+ check_backup(backup_dir, backup_check, &tinfo);
+ }
+ }
+ }
+ testutil_check(session->close(session, NULL));
+ testutil_check(conn->close(conn, NULL));
+ active_files_free(&active);
+
+ printf("Success.\n");
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/test/csuite/incr_backup/smoke.sh b/src/third_party/wiredtiger/test/csuite/incr_backup/smoke.sh
new file mode 100755
index 00000000000..65727df015e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/csuite/incr_backup/smoke.sh
@@ -0,0 +1,12 @@
+#! /bin/sh
+
+set -e
+
+# Smoke-test incr-backup as part of running "make check".
+
+# If $top_builddir/$top_srcdir aren't set, default to building in build_posix
+# and running in test/csuite.
+top_builddir=${top_builddir:-../../build_posix}
+top_srcdir=${top_srcdir:-../..}
+
+$TEST_WRAPPER $top_builddir/test/csuite/test_incr_backup -v 3
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index 628dc815785..2ff8e54db61 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -710,7 +710,7 @@ tasks:
${test_env_vars|} $(pwd)/../test/csuite/import/smoke.sh 2>&1
- - name: csuite-random-abort-test
+ - name: csuite-incr-backup-test
tags: ["pull_request"]
depends_on:
- name: compile
@@ -722,7 +722,20 @@ tasks:
script: |
set -o errexit
set -o verbose
+ ${test_env_vars|} $(pwd)/test/csuite/test_incr_backup 2>&1
+ - name: csuite-random-abort-test
+ tags: ["pull_request"]
+ depends_on:
+ - name: compile
+ commands:
+ - func: "fetch artifacts"
+ - command: shell.exec
+ params:
+ working_dir: "wiredtiger/build_posix"
+ script: |
+ set -o errexit
+ set -o verbose
${test_env_vars|} $(pwd)/../test/csuite/random_abort/smoke.sh 2>&1
- name: csuite-random-directio-test
diff --git a/src/third_party/wiredtiger/test/format/CONFIG.endian b/src/third_party/wiredtiger/test/format/CONFIG.endian
index 618a6ceb380..1ac81778d19 100644
--- a/src/third_party/wiredtiger/test/format/CONFIG.endian
+++ b/src/third_party/wiredtiger/test/format/CONFIG.endian
@@ -1,6 +1,6 @@
-abort=0
-cache_minimum=20
+cache.minimum=20
+format.abort=0
+logging.archive=0
logging=1
-logging_archive=0
-rows=1000000
-timer=4
+runs.timer=4
+runs.rows=1000000
diff --git a/src/third_party/wiredtiger/test/format/CONFIG.stress b/src/third_party/wiredtiger/test/format/CONFIG.stress
index 0b5251d7952..65a93e05821 100644
--- a/src/third_party/wiredtiger/test/format/CONFIG.stress
+++ b/src/third_party/wiredtiger/test/format/CONFIG.stress
@@ -1,7 +1,9 @@
# A reasonable configuration for stress testing.
-cache_minimum=20
-huffman_key=0
-huffman_value=0
-rows=1000000
+btree.huffman_key=0
+btree.huffman_value=0
+cache.minimum=20
+runs.rows=1000000:5000000
+runs.threads=4:32
+runs.timer=6:30
+runs.type=row-store
runs=100
-timer=4
diff --git a/src/third_party/wiredtiger/test/format/Makefile.am b/src/third_party/wiredtiger/test/format/Makefile.am
index bff2986f25e..0f265fa5a56 100644
--- a/src/third_party/wiredtiger/test/format/Makefile.am
+++ b/src/third_party/wiredtiger/test/format/Makefile.am
@@ -4,8 +4,8 @@ AM_CPPFLAGS +=-I$(top_srcdir)/test/utility
noinst_PROGRAMS = t
t_SOURCES =\
- backup.c bulk.c compact.c config.c ops.c random.c rebalance.c \
- salvage.c snap.c t.c util.c wts.c
+ backup.c bulk.c checkpoint.c compact.c config.c config_compat.c kv.c ops.c random.c \
+ rebalance.c salvage.c snap.c t.c util.c wts.c
t_LDADD = $(top_builddir)/test/utility/libtest_util.la
t_LDADD +=$(top_builddir)/libwiredtiger.la
@@ -25,7 +25,8 @@ backup:
refresh:
rm -rf RUNDIR && cp -p -r BACKUP RUNDIR
-TESTS = smoke.sh
+# Temporarily disabled
+# TESTS = smoke.sh
clean-local:
rm -rf RUNDIR s_dumpcmp core.* *.core
diff --git a/src/third_party/wiredtiger/test/format/backup.c b/src/third_party/wiredtiger/test/format/backup.c
index 074a254c481..5ad1cfe65dc 100644
--- a/src/third_party/wiredtiger/test/format/backup.c
+++ b/src/third_party/wiredtiger/test/format/backup.c
@@ -38,21 +38,264 @@ check_copy(void)
WT_CONNECTION *conn;
WT_DECL_RET;
WT_SESSION *session;
+ size_t len;
+ char *path;
+
+ len = strlen(g.home) + strlen("BACKUP") + 2;
+ path = dmalloc(len);
+ testutil_check(__wt_snprintf(path, len, "%s/BACKUP", g.home));
- wts_open(g.home_backup, false, &conn);
+ wts_open(path, false, &conn);
- testutil_checkfmt(conn->open_session(conn, NULL, NULL, &session), "%s", g.home_backup);
+ testutil_checkfmt(conn->open_session(conn, NULL, NULL, &session), "%s", path);
/*
* Verify can return EBUSY if the handle isn't available. Don't yield and retry, in the case of
* LSM, the handle may not be available for a long time.
*/
ret = session->verify(session, g.uri, NULL);
- testutil_assertfmt(ret == 0 || ret == EBUSY, "WT_SESSION.verify: %s: %s", g.home_backup, g.uri);
+ testutil_assertfmt(ret == 0 || ret == EBUSY, "WT_SESSION.verify: %s: %s", path, g.uri);
+
+ testutil_checkfmt(conn->close(conn, NULL), "%s", path);
+
+ free(path);
+}
+
+/*
+ * The set of active files in a backup. This is our "memory" of files that are used in each backup,
+ * so we can remove any that are not mentioned in the next backup.
+ */
+typedef struct {
+ char **names;
+ uint32_t count;
+} ACTIVE_FILES;
- testutil_checkfmt(conn->close(conn, NULL), "%s", g.home_backup);
+/*
+ * active_files_init --
+ * Initialize (clear) the active file struct.
+ */
+static void
+active_files_init(ACTIVE_FILES *active)
+{
+ WT_CLEAR(*active);
}
+#if 0
+/*
+ * active_files_print --
+ * Print the set of active files for debugging.
+ */
+static void
+active_files_print(ACTIVE_FILES *active, const char *msg)
+{
+ uint32_t i;
+
+ if (active == NULL)
+ return;
+ fprintf(stderr, "Active files: %s, %d entries\n", msg, (int)active->count);
+ for (i = 0; i < active->count; i++)
+ fprintf(stderr, " %s\n", active->names[i]);
+}
+#endif
+
+/*
+ * active_files_add --
+ * Add a new name to the active file list.
+ */
+static void
+active_files_add(ACTIVE_FILES *active, const char *name)
+{
+ uint32_t pos;
+
+ if (active == NULL)
+ return;
+ pos = active->count++;
+ active->names = drealloc(active->names, sizeof(char *) * active->count);
+ active->names[pos] = strdup(name);
+}
+
+/*
+ * active_files_sort_function --
+ * Sort function for qsort.
+ */
+static int
+active_files_sort_function(const void *left, const void *right)
+{
+ return (strcmp(*(const char **)left, *(const char **)right));
+}
+
+/*
+ * active_files_sort --
+ * Sort the list of names in the active file list.
+ */
+static void
+active_files_sort(ACTIVE_FILES *active)
+{
+ if (active == NULL)
+ return;
+ __wt_qsort(active->names, active->count, sizeof(char *), active_files_sort_function);
+}
+
+/*
+ * active_files_remove_missing --
+ * Files in the previous list that are missing from the current list are removed.
+ */
+static void
+active_files_remove_missing(ACTIVE_FILES *prev, ACTIVE_FILES *cur)
+{
+ uint32_t curpos, prevpos;
+ int cmp;
+ char filename[1024];
+
+ if (prev == NULL)
+ return;
+#if 0
+ active_files_print(prev, "computing removals: previous list of active files");
+ active_files_print(cur, "computing removals: current list of active files");
+#endif
+ curpos = 0;
+
+ /*
+ * Walk through the two lists looking for non-matches.
+ */
+ for (prevpos = 0; prevpos < prev->count; prevpos++) {
+again:
+ if (curpos >= cur->count)
+ cmp = -1; /* There are extra entries at the end of the prev list */
+ else
+ cmp = strcmp(prev->names[prevpos], cur->names[curpos]);
+
+ if (cmp == 0)
+ curpos++;
+ else if (cmp < 0) {
+ /*
+ * There is something in the prev list not in the current list. Remove it, and continue
+ * - don't advance the current list.
+ */
+ testutil_check(__wt_snprintf(
+ filename, sizeof(filename), "%s/BACKUP/%s", g.home, prev->names[prevpos]));
+#if 0
+ fprintf(stderr, "Removing file from backup: %s\n", filename);
+#endif
+ error_sys_check(unlink(filename));
+ testutil_check(__wt_snprintf(
+ filename, sizeof(filename), "%s/BACKUP.copy/%s", g.home, prev->names[prevpos]));
+ error_sys_check(unlink(filename));
+ } else {
+ /*
+ * There is something in the current list not in the prev list. Walk past it in the
+ * current list and try again.
+ */
+ curpos++;
+ goto again;
+ }
+ }
+}
+
+/*
+ * active_files_free --
+ * Free the list of active files.
+ */
+static void
+active_files_free(ACTIVE_FILES *active)
+{
+ uint32_t i;
+
+ if (active == NULL)
+ return;
+ for (i = 0; i < active->count; i++)
+ free(active->names[i]);
+ free(active->names);
+ active_files_init(active);
+}
+
+/*
+ * copy_blocks --
+ * Perform a single block-based incremental backup of the given file.
+ */
+static void
+copy_blocks(WT_SESSION *session, WT_CURSOR *bkup_c, const char *name)
+{
+ WT_CURSOR *incr_cur;
+ size_t len, tmp_sz;
+ ssize_t rdsize;
+ uint64_t offset, type;
+ u_int size;
+ int ret, rfd, wfd1, wfd2;
+ char buf[512], config[512], *first, *second, *tmp;
+ bool first_pass;
+
+ /*
+ * We need to prepend the home directory name here because we are not using the WiredTiger
+ * internal functions that would prepend it for us.
+ */
+ len = strlen(g.home) + strlen("BACKUP") + strlen(name) + 10;
+ first = dmalloc(len);
+
+ /*
+ * Save another copy of the original file to make debugging recovery errors easier.
+ */
+ len = strlen(g.home) + strlen("BACKUP.copy") + strlen(name) + 10;
+ second = dmalloc(len);
+ testutil_check(__wt_snprintf(config, sizeof(config), "incremental=(file=%s)", name));
+
+ /* Open the duplicate incremental backup cursor with the file name given. */
+ tmp_sz = 0;
+ tmp = NULL;
+ first_pass = true;
+ rfd = wfd1 = wfd2 = -1;
+ testutil_check(session->open_cursor(session, NULL, bkup_c, config, &incr_cur));
+ while ((ret = incr_cur->next(incr_cur)) == 0) {
+ testutil_check(incr_cur->get_key(incr_cur, &offset, (uint64_t *)&size, &type));
+ if (type == WT_BACKUP_RANGE) {
+ /*
+ * Since we are using system calls below instead of a WiredTiger function, we have to
+ * prepend the home directory to the file names ourselves.
+ */
+ testutil_check(__wt_snprintf(first, len, "%s/BACKUP/%s", g.home, name));
+ testutil_check(__wt_snprintf(second, len, "%s/BACKUP.copy/%s", g.home, name));
+ if (tmp_sz < size) {
+ tmp = drealloc(tmp, size);
+ tmp_sz = size;
+ }
+ if (first_pass) {
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/%s", g.home, name));
+ error_sys_check(rfd = open(buf, O_RDONLY, 0));
+ error_sys_check(wfd1 = open(first, O_WRONLY | O_CREAT, 0));
+ error_sys_check(wfd2 = open(second, O_WRONLY | O_CREAT, 0));
+ first_pass = false;
+ }
+ error_sys_check(lseek(rfd, (wt_off_t)offset, SEEK_SET));
+ error_sys_check(rdsize = read(rfd, tmp, size));
+ error_sys_check(lseek(wfd1, (wt_off_t)offset, SEEK_SET));
+ error_sys_check(lseek(wfd2, (wt_off_t)offset, SEEK_SET));
+ /* Use the read size since we may have read less than the granularity. */
+ error_sys_check(write(wfd1, tmp, (size_t)rdsize));
+ error_sys_check(write(wfd2, tmp, (size_t)rdsize));
+ } else {
+ /*
+ * These operations are using a WiredTiger function so it will prepend the home
+ * directory to the name for us.
+ */
+ testutil_check(__wt_snprintf(first, len, "BACKUP/%s", name));
+ testutil_check(__wt_snprintf(second, len, "BACKUP.copy/%s", name));
+ testutil_assert(type == WT_BACKUP_FILE);
+ testutil_assert(rfd == -1);
+ testutil_assert(first_pass == true);
+ testutil_check(__wt_copy_and_sync(session, name, first));
+ testutil_check(__wt_copy_and_sync(session, first, second));
+ }
+ }
+ testutil_check(incr_cur->close(incr_cur));
+ if (rfd != -1) {
+ error_sys_check(close(rfd));
+ error_sys_check(close(wfd1));
+ error_sys_check(close(wfd2));
+ }
+ free(first);
+ free(second);
+ free(tmp);
+}
/*
* copy_file --
* Copy a single file into the backup directories.
@@ -71,9 +314,9 @@ copy_file(WT_SESSION *session, const char *name)
/*
* Save another copy of the original file to make debugging recovery errors easier.
*/
- len = strlen("BACKUP_COPY") + strlen(name) + 10;
+ len = strlen("BACKUP.copy") + strlen(name) + 10;
second = dmalloc(len);
- testutil_check(__wt_snprintf(second, len, "BACKUP_COPY/%s", name));
+ testutil_check(__wt_snprintf(second, len, "BACKUP.copy/%s", name));
testutil_check(__wt_copy_and_sync(session, first, second));
free(first);
@@ -81,24 +324,37 @@ copy_file(WT_SESSION *session, const char *name)
}
/*
+ * Backup directory initialize command, remove and re-create the primary backup directory, plus a
+ * copy we maintain for recovery testing.
+ */
+#define HOME_BACKUP_INIT_CMD "rm -rf %s/BACKUP %s/BACKUP.copy && mkdir %s/BACKUP %s/BACKUP.copy"
+
+/*
* backup --
* Periodically do a backup and verify it.
*/
WT_THREAD_RET
backup(void *arg)
{
+ ACTIVE_FILES active[2], *active_now, *active_prev;
WT_CONNECTION *conn;
WT_CURSOR *backup_cursor;
WT_DECL_RET;
WT_SESSION *session;
+ size_t len;
u_int incremental, period;
+ uint64_t src_id;
const char *config, *key;
- bool full;
+ char cfg[512], *cmd;
+ bool full, incr_full;
(void)(arg);
conn = g.wts_conn;
+ /* Guarantee backup ID uniqueness, we might be reopening an existing database. */
+ __wt_seconds(NULL, &g.backup_id);
+
/* Open a session. */
testutil_check(conn->open_session(conn, NULL, NULL, &session));
@@ -106,7 +362,11 @@ backup(void *arg)
* Perform a full backup at somewhere under 10 seconds (that way there's at least one), then at
* larger intervals, optionally do incremental backups between full backups.
*/
+ incr_full = true;
incremental = 0;
+ active_files_init(&active[0]);
+ active_files_init(&active[1]);
+ active_now = active_prev = NULL;
for (period = mmrand(NULL, 1, 10);; period = mmrand(NULL, 20, 45)) {
/* Sleep for short periods so we don't make the run wait. */
while (period > 0 && !g.workers_finished) {
@@ -119,23 +379,69 @@ backup(void *arg)
* with named checkpoints. Wait for the checkpoint to complete, otherwise backups might be
* starved out.
*/
- testutil_check(pthread_rwlock_wrlock(&g.backup_lock));
+ lock_writelock(session, &g.backup_lock);
if (g.workers_finished) {
- testutil_check(pthread_rwlock_unlock(&g.backup_lock));
+ lock_writeunlock(session, &g.backup_lock);
break;
}
- if (incremental) {
- config = "target=(\"log:\")";
- full = false;
+ if (g.c_backup_incr_flag == INCREMENTAL_BLOCK) {
+ /*
+ * If we're doing a full backup as the start of the incremental backup, only send in an
+ * identifier for this one.
+ */
+ if (incr_full) {
+ active_files_free(&active[0]);
+ active_files_free(&active[1]);
+ active_now = &active[g.backup_id % 2];
+ active_prev = NULL;
+ testutil_check(__wt_snprintf(
+ cfg, sizeof(cfg), "incremental=(enabled,this_id=ID%" PRIu64 ")", g.backup_id++));
+ full = true;
+ incr_full = false;
+ } else {
+ if (active_prev == &active[0])
+ active_now = &active[1];
+ else
+ active_now = &active[0];
+ src_id = g.backup_id - 1;
+ testutil_check(__wt_snprintf(cfg, sizeof(cfg),
+ "incremental=(enabled,src_id=ID%" PRIu64 ",this_id=ID%" PRIu64 ")", src_id,
+ g.backup_id++));
+ /* Restart a full incremental every once in a while. */
+ full = false;
+ incr_full = mmrand(NULL, 1, 8) == 1;
+ }
+ config = cfg;
+ /* Free up the old active file list we're going to overwrite. */
+ active_files_free(active_now);
+ } else if (g.c_logging && g.c_backup_incr_flag == INCREMENTAL_LOG) {
+ if (incr_full) {
+ config = NULL;
+ full = true;
+ incr_full = false;
+ } else {
+ testutil_check(__wt_snprintf(cfg, sizeof(cfg), "target=(\"log:\")"));
+ config = cfg;
+ full = false;
+ /* Restart a full incremental every once in a while. */
+ incr_full = mmrand(NULL, 1, 8) == 1;
+ }
} else {
- /* Re-create the backup directory. */
- testutil_checkfmt(system(g.home_backup_init), "%s", "backup directory creation failed");
-
config = NULL;
full = true;
}
+ /* If we're taking a full backup, create the backup directories. */
+ if (full || incremental == 0) {
+ len = strlen(g.home) * 4 + strlen(HOME_BACKUP_INIT_CMD) + 1;
+ cmd = dmalloc(len);
+ testutil_check(
+ __wt_snprintf(cmd, len, HOME_BACKUP_INIT_CMD, g.home, g.home, g.home, g.home));
+ testutil_checkfmt(system(cmd), "%s", "backup directory creation failed");
+ free(cmd);
+ }
+
/*
* open_cursor can return EBUSY if concurrent with a metadata operation, retry in that case.
*/
@@ -147,17 +453,28 @@ backup(void *arg)
while ((ret = backup_cursor->next(backup_cursor)) == 0) {
testutil_check(backup_cursor->get_key(backup_cursor, &key));
- copy_file(session, key);
+ if (g.c_backup_incr_flag == INCREMENTAL_BLOCK) {
+ if (full)
+ copy_file(session, key);
+ else
+ copy_blocks(session, backup_cursor, key);
+
+ } else
+ copy_file(session, key);
+ active_files_add(active_now, key);
}
if (ret != WT_NOTFOUND)
testutil_die(ret, "backup-cursor");
- /* After an incremental backup, truncate the log files. */
- if (incremental)
+ /* After a log-based incremental backup, truncate the log files. */
+ if (g.c_backup_incr_flag == INCREMENTAL_LOG)
testutil_check(session->truncate(session, "log:", backup_cursor, NULL, NULL));
testutil_check(backup_cursor->close(backup_cursor));
- testutil_check(pthread_rwlock_unlock(&g.backup_lock));
+ lock_writeunlock(session, &g.backup_lock);
+ active_files_sort(active_now);
+ active_files_remove_missing(active_prev, active_now);
+ active_prev = active_now;
/*
* If automatic log archival isn't configured, optionally do incremental backups after each
@@ -166,14 +483,19 @@ backup(void *arg)
* more incremental backups).
*/
if (full)
- incremental = g.c_logging_archive ? 1 : mmrand(NULL, 1, 5);
- if (--incremental == 0)
+ incremental = g.c_logging_archive ? 1 : mmrand(NULL, 1, 8);
+ if (--incremental == 0) {
check_copy();
+ /* We ran recovery in the backup directory, so next time it must be a full backup. */
+ incr_full = full = true;
+ }
}
if (incremental != 0)
check_copy();
+ active_files_free(&active[0]);
+ active_files_free(&active[1]);
testutil_check(session->close(session, NULL));
return (WT_THREAD_RET_VALUE);
diff --git a/src/third_party/wiredtiger/test/format/bulk.c b/src/third_party/wiredtiger/test/format/bulk.c
index ff32ad1f6a0..69f986aa79a 100644
--- a/src/third_party/wiredtiger/test/format/bulk.c
+++ b/src/third_party/wiredtiger/test/format/bulk.c
@@ -59,7 +59,7 @@ bulk_commit_transaction(WT_SESSION *session)
testutil_check(session->commit_transaction(session, buf));
/* Update the oldest timestamp, otherwise updates are pinned in memory. */
- timestamp_once();
+ timestamp_once(session);
}
/*
@@ -80,6 +80,7 @@ wts_load(void)
WT_DECL_RET;
WT_ITEM key, value;
WT_SESSION *session;
+ uint32_t keyno;
bool is_bulk;
conn = g.wts_conn;
@@ -111,11 +112,11 @@ wts_load(void)
if (g.c_txn_timestamps)
bulk_begin_transaction(session);
- while (++g.key_cnt <= g.c_rows) {
+ for (keyno = 0; ++keyno <= g.c_rows;) {
/* Do some checking every 10K operations. */
- if (g.key_cnt % 10000 == 0) {
+ if (keyno % 10000 == 0) {
/* Report on progress. */
- track("bulk load", g.key_cnt, NULL);
+ track("bulk load", keyno, NULL);
/* Restart the enclosing transaction so we don't overflow the cache. */
if (g.c_txn_timestamps) {
@@ -124,28 +125,28 @@ wts_load(void)
}
}
- key_gen(&key, g.key_cnt);
- val_gen(NULL, &value, g.key_cnt);
+ key_gen(&key, keyno);
+ val_gen(NULL, &value, keyno);
switch (g.type) {
case FIX:
if (!is_bulk)
- cursor->set_key(cursor, g.key_cnt);
+ cursor->set_key(cursor, keyno);
cursor->set_value(cursor, *(uint8_t *)value.data);
- logop(session, "%-10s %" PRIu64 " {0x%02" PRIx8 "}", "bulk", g.key_cnt,
+ logop(session, "%-10s %" PRIu32 " {0x%02" PRIx8 "}", "bulk", keyno,
((uint8_t *)value.data)[0]);
break;
case VAR:
if (!is_bulk)
- cursor->set_key(cursor, g.key_cnt);
+ cursor->set_key(cursor, keyno);
cursor->set_value(cursor, &value);
- logop(session, "%-10s %" PRIu64 " {%.*s}", "bulk", g.key_cnt, (int)value.size,
+ logop(session, "%-10s %" PRIu32 " {%.*s}", "bulk", keyno, (int)value.size,
(char *)value.data);
break;
case ROW:
cursor->set_key(cursor, &key);
cursor->set_value(cursor, &value);
- logop(session, "%-10s %" PRIu64 " {%.*s}, {%.*s}", "bulk", g.key_cnt, (int)key.size,
+ logop(session, "%-10s %" PRIu32 " {%.*s}, {%.*s}", "bulk", keyno, (int)key.size,
(char *)key.data, (int)value.size, (char *)value.data);
break;
}
@@ -155,9 +156,6 @@ wts_load(void)
* case, guaranteeing the load succeeds probably means future updates are also guaranteed to
* succeed, which isn't what we want. If we run out of space in the initial load, reset the
* row counter and continue.
- *
- * Decrease inserts, they can't be successful if we're at the cache limit, and increase the
- * delete percentage to get some extra space once the run starts.
*/
if ((ret = cursor->insert(cursor)) != 0) {
testutil_assert(ret == WT_CACHE_FULL || ret == WT_ROLLBACK);
@@ -167,18 +165,31 @@ wts_load(void)
bulk_begin_transaction(session);
}
- if (g.c_insert_pct > 5)
+ /*
+ * Decrease inserts since they won't be successful if we're hitting cache limits, and
+ * increase the delete percentage to get some extra space once the run starts. We can't
+ * simply modify the values because they have to equal 100 when the database is reopened
+ * (we are going to rewrite the CONFIG file, too).
+ */
+ if (g.c_insert_pct > 5) {
+ g.c_delete_pct += g.c_insert_pct - 5;
g.c_insert_pct = 5;
- if (g.c_delete_pct < 20)
- g.c_delete_pct += 20;
- break;
+ }
+ g.c_delete_pct += g.c_write_pct / 2;
+ g.c_write_pct = g.c_write_pct / 2;
}
}
- /* We may have exited the loop early, reset all of our counters to match our insert count. */
- --g.key_cnt;
- g.rows = g.key_cnt;
- g.c_rows = (uint32_t)g.key_cnt;
+ /*
+ * We may have exited the loop early, reset our counters to match our insert count. If the count
+ * changed, rewrite the CONFIG file so reopens aren't surprised.
+ */
+ --keyno;
+ if (g.rows != keyno) {
+ g.rows = keyno;
+ g.c_rows = (uint32_t)keyno;
+ config_print(false);
+ }
if (g.c_txn_timestamps)
bulk_commit_transaction(session);
diff --git a/src/third_party/wiredtiger/test/format/checkpoint.c b/src/third_party/wiredtiger/test/format/checkpoint.c
new file mode 100644
index 00000000000..36e70ae3125
--- /dev/null
+++ b/src/third_party/wiredtiger/test/format/checkpoint.c
@@ -0,0 +1,120 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "format.h"
+
+/*
+ * wts_checkpoints --
+ * Configure WiredTiger library checkpoints.
+ */
+void
+wts_checkpoints(void)
+{
+ char config[1024];
+
+ testutil_check(
+ __wt_snprintf(config, sizeof(config), ",checkpoint=(wait=%" PRIu32 ",log_size=%" PRIu32 ")",
+ g.c_checkpoint_wait, MEGABYTE(g.c_checkpoint_log_size)));
+ testutil_check(g.wts_conn->reconfigure(g.wts_conn, config));
+}
+
+/*
+ * checkpoint --
+ * Periodically take a checkpoint in a format thread.
+ */
+WT_THREAD_RET
+checkpoint(void *arg)
+{
+ WT_CONNECTION *conn;
+ WT_DECL_RET;
+ WT_SESSION *session;
+ u_int secs;
+ char config_buf[64];
+ const char *ckpt_config;
+ bool backup_locked;
+
+ (void)arg;
+ conn = g.wts_conn;
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
+
+ for (secs = mmrand(NULL, 1, 10); !g.workers_finished;) {
+ if (secs > 0) {
+ __wt_sleep(1, 0);
+ --secs;
+ continue;
+ }
+
+ /*
+ * LSM and data-sources don't support named checkpoints. Also, don't attempt named
+ * checkpoints during a hot backup. It's OK to create named checkpoints during a hot backup,
+ * but we can't delete them, so repeating an already existing named checkpoint will fail
+ * when we can't drop the previous one.
+ */
+ ckpt_config = NULL;
+ backup_locked = false;
+ if (!DATASOURCE("lsm"))
+ switch (mmrand(NULL, 1, 20)) {
+ case 1:
+ /*
+ * 5% create a named snapshot. Rotate between a
+ * few names to test multiple named snapshots in
+ * the system.
+ */
+ ret = lock_try_writelock(session, &g.backup_lock);
+ if (ret == 0) {
+ backup_locked = true;
+ testutil_check(__wt_snprintf(
+ config_buf, sizeof(config_buf), "name=mine.%" PRIu32, mmrand(NULL, 1, 4)));
+ ckpt_config = config_buf;
+ } else if (ret != EBUSY)
+ testutil_check(ret);
+ break;
+ case 2:
+ /*
+ * 5% drop all named snapshots.
+ */
+ ret = lock_try_writelock(session, &g.backup_lock);
+ if (ret == 0) {
+ backup_locked = true;
+ ckpt_config = "drop=(all)";
+ } else if (ret != EBUSY)
+ testutil_check(ret);
+ break;
+ }
+
+ testutil_check(session->checkpoint(session, ckpt_config));
+
+ if (backup_locked)
+ lock_writeunlock(session, &g.backup_lock);
+
+ secs = mmrand(NULL, 5, 40);
+ }
+
+ testutil_check(session->close(session, NULL));
+ return (WT_THREAD_RET_VALUE);
+}
diff --git a/src/third_party/wiredtiger/test/format/config.c b/src/third_party/wiredtiger/test/format/config.c
index 1bfe87473c4..54a09229ce4 100644
--- a/src/third_party/wiredtiger/test/format/config.c
+++ b/src/third_party/wiredtiger/test/format/config.c
@@ -29,6 +29,9 @@
#include "format.h"
#include "config.h"
+static void config(void);
+static void config_backup_incr(void);
+static void config_backward_compatible(void);
static void config_cache(void);
static void config_checkpoint(void);
static void config_checksum(void);
@@ -41,6 +44,7 @@ static void config_in_memory(void);
static void config_in_memory_reset(void);
static int config_is_perm(const char *);
static void config_lsm_reset(void);
+static void config_map_backup_incr(const char *, u_int *);
static void config_map_checkpoint(const char *, u_int *);
static void config_map_checksum(const char *, u_int *);
static void config_map_compression(const char *, u_int *);
@@ -58,11 +62,28 @@ static void config_transaction(void);
#define DISABLE_RANDOM_LSM_TESTING 1
/*
- * config_setup --
- * Initialize configuration for a run.
+ * config_final --
+ * Final run initialization.
*/
void
-config_setup(void)
+config_final(void)
+{
+ config(); /* Finish up configuration and review it. */
+
+ config_print(false);
+
+ g.rows = g.c_rows; /* Set the key count. */
+
+ key_init(); /* Initialize key/value information. */
+ val_init();
+}
+
+/*
+ * config --
+ * Initialize the configuration itself.
+ */
+static void
+config(void)
{
CONFIG *cp;
char buf[128];
@@ -77,19 +98,19 @@ config_setup(void)
* Choose a file format and a data source: they're interrelated (LSM is only compatible with
* row-store) and other items depend on them.
*/
- if (!config_is_perm("file_type")) {
- if (config_is_perm("data_source") && DATASOURCE("lsm"))
- config_single("file_type=row", false);
+ if (!config_is_perm("runs.type")) {
+ if (config_is_perm("runs.source") && DATASOURCE("lsm"))
+ config_single("runs.type=row", false);
else
switch (mmrand(NULL, 1, 10)) {
case 1:
case 2:
case 3: /* 30% */
- config_single("file_type=var", false);
+ config_single("runs.type=var", false);
break;
case 4: /* 10% */
if (config_fix()) {
- config_single("file_type=fix", false);
+ config_single("runs.type=fix", false);
break;
}
/* FALLTHROUGH */ /* 60% */
@@ -99,17 +120,17 @@ config_setup(void)
case 8:
case 9:
case 10:
- config_single("file_type=row", false);
+ config_single("runs.type=row", false);
break;
}
}
config_map_file_type(g.c_file_type, &g.type);
- if (!config_is_perm("data_source")) {
- config_single("data_source=table", false);
+ if (!config_is_perm("runs.source")) {
+ config_single("runs.source=table", false);
switch (mmrand(NULL, 1, 5)) {
case 1: /* 20% */
- config_single("data_source=file", false);
+ config_single("runs.source=file", false);
break;
case 2: /* 20% */
#if !defined(DISABLE_RANDOM_LSM_TESTING)
@@ -123,11 +144,11 @@ config_setup(void)
*/
if (g.type != ROW || g.c_in_memory)
break;
- if (config_is_perm("transaction_timestamps") && g.c_txn_timestamps)
+ if (config_is_perm("transaction.timestamps") && g.c_txn_timestamps)
break;
- if (config_is_perm("truncate") && g.c_truncate)
+ if (config_is_perm("ops.truncate") && g.c_truncate)
break;
- config_single("data_source=lsm", false);
+ config_single("runs.source=lsm", false);
#endif
break;
case 3:
@@ -137,13 +158,10 @@ config_setup(void)
}
}
- /*
- * If data_source and file_type were both "permanent", we may still have a mismatch.
- */
- if (DATASOURCE("lsm") && g.type != ROW) {
- fprintf(stderr, "%s: lsm data_source is only compatible with row file_type\n", progname);
- exit(EXIT_FAILURE);
- }
+ /* If data_source and file_type were both "permanent", we may still have a mismatch. */
+ if (DATASOURCE("lsm") && g.type != ROW)
+ testutil_die(
+ EINVAL, "%s: lsm data_source is only compatible with row file_type\n", progname);
/*
* Build the top-level object name: we're overloading data_source in our configuration, LSM
@@ -173,16 +191,17 @@ config_setup(void)
/* Only row-store tables support collation order. */
if (g.type != ROW)
- config_single("reverse=off", false);
+ config_single("btree.reverse=off", false);
/* First, transaction configuration, it configures other features. */
config_transaction();
/* Simple selection. */
+ config_backup_incr();
config_checkpoint();
config_checksum();
- config_compression("compression");
- config_compression("logging_compression");
+ config_compression("btree.compression");
+ config_compression("logging.compression");
config_encryption();
/* Configuration based on the configuration already chosen. */
@@ -190,25 +209,26 @@ config_setup(void)
config_pct();
config_cache();
- /* Give in-memory and LSM configurations a final review. */
+ /* Give in-memory, LSM and backward compatible configurations a final review. */
if (g.c_in_memory != 0)
config_in_memory_reset();
if (DATASOURCE("lsm"))
config_lsm_reset();
+ config_backward_compatible();
/*
* Key/value minimum/maximum are related, correct unless specified by the configuration.
*/
- if (!config_is_perm("key_min") && g.c_key_min > g.c_key_max)
+ if (!config_is_perm("btree.key_min") && g.c_key_min > g.c_key_max)
g.c_key_min = g.c_key_max;
- if (!config_is_perm("key_max") && g.c_key_max < g.c_key_min)
+ if (!config_is_perm("btree.key_max") && g.c_key_max < g.c_key_min)
g.c_key_max = g.c_key_min;
if (g.c_key_min > g.c_key_max)
testutil_die(EINVAL, "key_min may not be larger than key_max");
- if (!config_is_perm("value_min") && g.c_value_min > g.c_value_max)
+ if (!config_is_perm("btree.value_min") && g.c_value_min > g.c_value_max)
g.c_value_min = g.c_value_max;
- if (!config_is_perm("value_max") && g.c_value_max < g.c_value_min)
+ if (!config_is_perm("btree.value_max") && g.c_value_max < g.c_value_min)
g.c_value_max = g.c_value_min;
if (g.c_value_min > g.c_value_max)
testutil_die(EINVAL, "value_min may not be larger than value_max");
@@ -225,18 +245,107 @@ config_setup(void)
* operations but the rest of the configuration means operations take a long time to complete
* (for example, a small cache and many worker threads), don't let it run forever.
*/
- if (config_is_perm("timer")) {
- if (!config_is_perm("ops"))
- config_single("ops=0", false);
+ if (config_is_perm("runs.timer")) {
+ if (!config_is_perm("runs.ops"))
+ config_single("runs.ops=0", false);
} else {
- if (!config_is_perm("ops"))
- config_single("timer=30", false);
+ if (!config_is_perm("runs.ops"))
+ config_single("runs.timer=30", false);
else
- config_single("timer=360", false);
+ config_single("runs.timer=360", false);
+ }
+}
+
+/*
+ * config_backup_incr --
+ * Incremental backup configuration.
+ */
+static void
+config_backup_incr(void)
+{
+ /* Incremental backup requires backup. */
+ if (g.c_backups == 0)
+ return;
+
+ /*
+ * Incremental backup using log files is incompatible with logging archival. Testing log file
+ * archival doesn't seem as useful as testing backup, let the backup configuration override.
+ */
+ if (config_is_perm("backup.incremental")) {
+ if (g.c_backup_incr_flag == INCREMENTAL_LOG) {
+ if (g.c_logging_archive && config_is_perm("logging.archive"))
+ testutil_die(EINVAL, "backup.incremental=log is incompatible with logging.archive");
+ if (g.c_logging_archive)
+ config_single("logging.archive=0", false);
+ }
+ return;
}
- /* Reset the key count. */
- g.key_cnt = 0;
+ /*
+ * Choose a type of incremental backup, where the log archival setting can eliminate incremental
+ * backup based on log files.
+ */
+ switch (mmrand(NULL, 1, 10)) {
+ case 1: /* 30% full backup only */
+ case 2:
+ case 3:
+ config_single("backup.incremental=off", false);
+ break;
+ case 4: /* 30% log based incremental */
+ case 5:
+ case 6:
+ if (!g.c_logging_archive || !config_is_perm("logging.archive")) {
+ if (g.c_logging_archive)
+ config_single("logging.archive=0", false);
+ config_single("backup.incremental=log", false);
+ }
+ /* FALLTHROUGH */
+ case 7: /* 40% block based incremental */
+ case 8:
+ case 9:
+ case 10:
+ config_single("backup.incremental=block", false);
+ break;
+ }
+}
+
+/*
+ * config_backward_compatible --
+ * Backward compatibility configuration.
+ */
+static void
+config_backward_compatible(void)
+{
+ bool backward_compatible;
+
+ /*
+ * If built in a branch that doesn't support all current options, or creating a database for
+ * such an environment, strip out configurations that won't work.
+ */
+ backward_compatible = g.backward_compatible;
+#if WIREDTIGER_VERSION_MAJOR < 10
+ backward_compatible = true;
+#endif
+ if (!backward_compatible)
+ return;
+
+ if (g.c_backup_incr_flag != INCREMENTAL_OFF) {
+ if (config_is_perm("backup.incremental"))
+ testutil_die(EINVAL, "incremental backup not supported in backward compatibility mode");
+ config_single("backup.incremental=off", false);
+ }
+
+ if (g.c_mmap_all) {
+ if (config_is_perm("disk.mmap_all"))
+ testutil_die(EINVAL, "disk.mmap_all not supported in backward compatibility mode");
+ config_single("disk.mmap_all=off", false);
+ }
+
+ if (g.c_timing_stress_hs_sweep) {
+ if (config_is_perm("stress.hs_sweep"))
+ testutil_die(EINVAL, "stress.hs_sweep not supported in backward compatibility mode");
+ config_single("stress.hs_sweep=off", false);
+ }
}
/*
@@ -254,7 +363,7 @@ config_cache(void)
/* Check if a minimum cache size has been specified. */
if (config_is_perm("cache")) {
- if (config_is_perm("cache_minimum") && g.c_cache_minimum != 0 &&
+ if (config_is_perm("cache.minimum") && g.c_cache_minimum != 0 &&
g.c_cache < g.c_cache_minimum)
testutil_die(EINVAL, "minimum cache set larger than cache (%" PRIu32 " > %" PRIu32 ")",
g.c_cache_minimum, g.c_cache);
@@ -303,19 +412,19 @@ static void
config_checkpoint(void)
{
/* Choose a checkpoint mode if nothing was specified. */
- if (!config_is_perm("checkpoints"))
+ if (!config_is_perm("checkpoint"))
switch (mmrand(NULL, 1, 20)) {
case 1:
case 2:
case 3:
case 4: /* 20% */
- config_single("checkpoints=wiredtiger", false);
+ config_single("checkpoint=wiredtiger", false);
break;
case 5: /* 5 % */
- config_single("checkpoints=off", false);
+ config_single("checkpoint=off", false);
break;
default: /* 75% */
- config_single("checkpoints=on", false);
+ config_single("checkpoint=on", false);
break;
}
}
@@ -328,16 +437,16 @@ static void
config_checksum(void)
{
/* Choose a checksum mode if nothing was specified. */
- if (!config_is_perm("checksum"))
+ if (!config_is_perm("disk.checksum"))
switch (mmrand(NULL, 1, 10)) {
case 1: /* 10% */
- config_single("checksum=on", false);
+ config_single("disk.checksum=on", false);
break;
case 2: /* 10% */
- config_single("checksum=off", false);
+ config_single("disk.checksum=off", false);
break;
default: /* 80% */
- config_single("checksum=uncompressed", false);
+ config_single("disk.checksum=uncompressed", false);
break;
}
}
@@ -361,7 +470,7 @@ config_compression(const char *conf_name)
* but it's confusing).
*/
cstr = "none";
- if (strcmp(conf_name, "logging_compression") == 0 && g.c_logging == 0) {
+ if (strcmp(conf_name, "logging.compression") == 0 && g.c_logging == 0) {
testutil_check(__wt_snprintf(confbuf, sizeof(confbuf), "%s=%s", conf_name, cstr));
config_single(confbuf, false);
return;
@@ -436,9 +545,9 @@ config_directio(void)
* direct I/O in Linux won't work. If direct I/O is configured, turn off backups.
*/
if (g.c_backups) {
- if (config_is_perm("backups"))
- testutil_die(EINVAL, "backups are incompatible with direct I/O");
- config_single("backups=off", false);
+ if (config_is_perm("backup"))
+ testutil_die(EINVAL, "backup are incompatible with direct I/O");
+ config_single("backup=off", false);
}
/*
@@ -448,14 +557,14 @@ config_directio(void)
* child process termination, but it's not worth the effort.
*/
if (g.c_rebalance) {
- if (config_is_perm("rebalance"))
+ if (config_is_perm("ops.rebalance"))
testutil_die(EINVAL, "rebalance is incompatible with direct I/O");
- config_single("rebalance=off", false);
+ config_single("ops.rebalance=off", false);
}
if (g.c_salvage) {
- if (config_is_perm("salvage"))
+ if (config_is_perm("ops.salvage"))
testutil_die(EINVAL, "salvage is incompatible with direct I/O");
- config_single("salvage=off", false);
+ config_single("ops.salvage=off", false);
}
}
@@ -471,8 +580,8 @@ config_encryption(void)
/*
* Encryption: choose something if encryption wasn't specified.
*/
- if (!config_is_perm("encryption")) {
- cstr = "encryption=none";
+ if (!config_is_perm("disk.encryption")) {
+ cstr = "disk.encryption=none";
switch (mmrand(NULL, 1, 10)) {
case 1:
case 2:
@@ -485,7 +594,7 @@ config_encryption(void)
case 8:
case 9:
case 10: /* 30% rotn */
- cstr = "encryption=rotn-7";
+ cstr = "disk.encryption=rotn-7";
break;
}
@@ -500,8 +609,8 @@ config_encryption(void)
static bool
config_fix(void)
{
- /* Fixed-length column stores don't support the lookaside table, so no modify operations. */
- if (config_is_perm("modify_pct"))
+ /* Fixed-length column stores don't support the history store table, so no modify operations. */
+ if (config_is_perm("ops.pct.modify"))
return (false);
return (true);
}
@@ -519,24 +628,24 @@ config_in_memory(void)
* don't have to configure in-memory every time we configure something like LSM, that's too
* painful.
*/
- if (config_is_perm("backups"))
+ if (config_is_perm("backup"))
return;
- if (config_is_perm("checkpoints"))
+ if (config_is_perm("checkpoint"))
return;
- if (config_is_perm("compression"))
+ if (config_is_perm("btree.compression"))
return;
- if (config_is_perm("data_source") && DATASOURCE("lsm"))
+ if (config_is_perm("runs.source") && DATASOURCE("lsm"))
return;
if (config_is_perm("logging"))
return;
- if (config_is_perm("rebalance"))
+ if (config_is_perm("ops.rebalance"))
return;
- if (config_is_perm("salvage"))
+ if (config_is_perm("ops.salvage"))
return;
- if (config_is_perm("verify"))
+ if (config_is_perm("ops.verify"))
return;
- if (!config_is_perm("in_memory") && mmrand(NULL, 1, 20) == 1)
+ if (!config_is_perm("runs.in_memory") && mmrand(NULL, 1, 20) == 1)
g.c_in_memory = 1;
}
@@ -550,31 +659,31 @@ config_in_memory_reset(void)
uint32_t cache;
/* Turn off a lot of stuff. */
- if (!config_is_perm("alter"))
- config_single("alter=off", false);
- if (!config_is_perm("backups"))
- config_single("backups=off", false);
- if (!config_is_perm("checkpoints"))
- config_single("checkpoints=off", false);
- if (!config_is_perm("compression"))
- config_single("compression=none", false);
+ if (!config_is_perm("ops.alter"))
+ config_single("ops.alter=off", false);
+ if (!config_is_perm("backup"))
+ config_single("backup=off", false);
+ if (!config_is_perm("checkpoint"))
+ config_single("checkpoint=off", false);
+ if (!config_is_perm("btree.compression"))
+ config_single("btree.compression=none", false);
if (!config_is_perm("logging"))
config_single("logging=off", false);
- if (!config_is_perm("rebalance"))
- config_single("rebalance=off", false);
- if (!config_is_perm("salvage"))
- config_single("salvage=off", false);
- if (!config_is_perm("verify"))
- config_single("verify=off", false);
+ if (!config_is_perm("ops.rebalance"))
+ config_single("ops.rebalance=off", false);
+ if (!config_is_perm("ops.salvage"))
+ config_single("ops.salvage=off", false);
+ if (!config_is_perm("ops.verify"))
+ config_single("ops.verify=off", false);
/*
* Keep keys/values small, overflow items aren't an issue for in-memory configurations and it
* keeps us from overflowing the cache.
*/
- if (!config_is_perm("key_max"))
- config_single("key_max=32", false);
- if (!config_is_perm("value_max"))
- config_single("value_max=80", false);
+ if (!config_is_perm("btree.key_max"))
+ config_single("btree.key_max=32", false);
+ if (!config_is_perm("btree.value_max"))
+ config_single("btree.value_max=80", false);
/*
* Size the cache relative to the initial data set, use 2x the base size as a minimum.
@@ -602,17 +711,17 @@ config_lsm_reset(void)
* Turn off truncate for LSM runs (some configurations with truncate always result in a
* timeout).
*/
- if (!config_is_perm("truncate"))
- config_single("truncate=off", false);
+ if (!config_is_perm("ops.truncate"))
+ config_single("ops.truncate=off", false);
/*
* LSM doesn't currently play nicely with timestamps, don't choose the pair unless forced to. If
* we turn off timestamps, make sure we turn off prepare as well, it requires timestamps. Remove
* this code with WT-4162.
*/
- if (!config_is_perm("prepare") && !config_is_perm("transaction_timestamps")) {
- config_single("prepare=off", false);
- config_single("transaction_timestamps=off", false);
+ if (!config_is_perm("ops.prepare") && !config_is_perm("transaction.timestamps")) {
+ config_single("ops.prepare=off", false);
+ config_single("transaction.timestamps=off", false);
}
}
@@ -628,10 +737,10 @@ config_pct(void)
uint32_t *vp; /* Value store */
u_int order; /* Order of assignment */
} list[] = {
- {"delete_pct", &g.c_delete_pct, 0}, {"insert_pct", &g.c_insert_pct, 0},
+ {"ops.pct.delete", &g.c_delete_pct, 0}, {"ops.pct.insert", &g.c_insert_pct, 0},
#define CONFIG_MODIFY_ENTRY 2
- {"modify_pct", &g.c_modify_pct, 0}, {"read_pct", &g.c_read_pct, 0},
- {"write_pct", &g.c_write_pct, 0},
+ {"ops.pct.modify", &g.c_modify_pct, 0}, {"ops.pct.read", &g.c_read_pct, 0},
+ {"ops.pct.write", &g.c_write_pct, 0},
};
u_int i, max_order, max_slot, n, pct;
@@ -650,7 +759,7 @@ config_pct(void)
/* Cursor modify isn't possible for fixed-length column store. */
if (g.type == FIX) {
- if (config_is_perm("modify_pct") && g.c_modify_pct != 0)
+ if (config_is_perm("ops.pct.modify") && g.c_modify_pct != 0)
testutil_die(EINVAL, "WT_CURSOR.modify not supported by fixed-length column store");
list[CONFIG_MODIFY_ENTRY].order = 0;
*list[CONFIG_MODIFY_ENTRY].vp = 0;
@@ -664,7 +773,8 @@ config_pct(void)
*/
if (g.c_isolation_flag == ISOLATION_READ_COMMITTED ||
g.c_isolation_flag == ISOLATION_READ_UNCOMMITTED) {
- if (config_is_perm("isolation") && config_is_perm("modify_pct") && g.c_modify_pct != 0)
+ if (config_is_perm("transaction.isolation") && config_is_perm("ops.pct.modify") &&
+ g.c_modify_pct != 0)
testutil_die(
EINVAL, "WT_CURSOR.modify only supported with snapshot isolation transactions");
@@ -710,66 +820,94 @@ config_pct(void)
static void
config_transaction(void)
{
- bool prepare_requires_ts;
+ /*
+ * WiredTiger cannot support relaxed isolation levels. Turn off everything but timestamps with
+ * snapshot isolation.
+ */
+ if ((!g.c_txn_timestamps && config_is_perm("transaction.timestamps")) ||
+ (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation")))
+ testutil_die(EINVAL, "format limited to timestamp and snapshot-isolation testing");
+ if (!g.c_txn_timestamps)
+ config_single("transaction.timestamps=on", false);
+ if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
+ config_single("transaction.isolation=snapshot", false);
/*
- * We can't prepare a transaction if logging is configured or timestamps aren't configured.
- * Further, for repeatable reads to work in timestamp testing, all updates must be within a
- * snapshot-isolation transaction. Check for incompatible configurations, then let prepare and
- * timestamp drive the remaining configuration.
+ * Check the permanent configuration. We can't prepare a transaction if logging is configured or
+ * timestamps aren't configured. For repeatable reads to work in timestamp testing, all updates
+ * must be done in a snapshot isolation transaction.
*/
- prepare_requires_ts = false;
- if (g.c_prepare) {
- if (config_is_perm("prepare")) {
- if (g.c_logging && config_is_perm("logging"))
- testutil_die(EINVAL, "prepare is incompatible with logging");
- if (!g.c_txn_timestamps && config_is_perm("transaction_timestamps"))
- testutil_die(EINVAL, "prepare requires transaction timestamps");
- } else if ((g.c_logging && config_is_perm("logging")) ||
- (!g.c_txn_timestamps && config_is_perm("transaction_timestamps")))
- config_single("prepare=off", false);
- if (g.c_prepare) {
- prepare_requires_ts = true;
- if (g.c_logging)
- config_single("logging=off", false);
- if (!g.c_txn_timestamps)
- config_single("transaction_timestamps=on", false);
- }
+ if (g.c_prepare && config_is_perm("ops.prepare")) {
+ if (g.c_logging && config_is_perm("logging"))
+ testutil_die(EINVAL, "prepare is incompatible with logging");
+ if (!g.c_txn_timestamps && config_is_perm("transaction.timestamps"))
+ testutil_die(EINVAL, "prepare requires transaction timestamps");
+ if (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation"))
+ testutil_die(EINVAL, "prepare requires snapshot isolation");
+ if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
+ testutil_die(EINVAL, "prepare requires transaction frequency set to 100");
+ }
+ if (g.c_txn_timestamps && config_is_perm("transaction.timestamps")) {
+ if (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation"))
+ testutil_die(EINVAL, "timestamps require snapshot isolation");
+ if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
+ testutil_die(EINVAL, "timestamps require transaction frequency set to 100");
+ }
+ if (g.c_isolation_flag == ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation")) {
+ if (!g.c_txn_timestamps && config_is_perm("transaction.timestamps"))
+ testutil_die(EINVAL, "snapshot isolation requires timestamps");
+ if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
+ testutil_die(EINVAL, "snapshot isolation requires transaction frequency set to 100");
}
- if (g.c_txn_timestamps) {
- if (prepare_requires_ts || config_is_perm("transaction_timestamps")) {
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("isolation"))
- testutil_die(
- EINVAL, "transaction_timestamps or prepare require isolation=snapshot");
- if (g.c_txn_freq != 100 && config_is_perm("transaction-frequency"))
- testutil_die(
- EINVAL, "transaction_timestamps or prepare require transaction-frequency=100");
- } else if ((g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("isolation")) ||
- (g.c_txn_freq != 100 && config_is_perm("transaction-frequency")))
- config_single("transaction_timestamps=off", false);
+ /*
+ * The permanent configuration has no incompatible settings, adjust the temporary configuration
+ * as necessary. Prepare overrides timestamps, overrides isolation, for no reason other than
+ * prepare is the least configured and timestamps are the option we want to test the most.
+ */
+ if (g.c_prepare) {
+ if (g.c_logging)
+ config_single("logging=off", false);
+ if (!g.c_txn_timestamps)
+ config_single("transaction.timestamps=on", false);
+ if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
+ config_single("transaction.isolation=snapshot", false);
+ if (g.c_txn_freq != 100)
+ config_single("transaction.frequency=100", false);
}
if (g.c_txn_timestamps) {
if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
- config_single("isolation=snapshot", false);
+ config_single("transaction.isolation=snapshot", false);
if (g.c_txn_freq != 100)
- config_single("transaction-frequency=100", false);
- } else if (!config_is_perm("isolation"))
- switch (mmrand(NULL, 1, 4)) {
- case 1:
- config_single("isolation=random", false);
+ config_single("transaction.frequency=100", false);
+ }
+ if (g.c_isolation_flag == ISOLATION_NOT_SET) {
+ switch (mmrand(NULL, 1, 20)) {
+ case 1: /* 5% */
+ config_single("transaction.isolation=random", false);
break;
- case 2:
- config_single("isolation=read-uncommitted", false);
+ case 2: /* 5% */
+ config_single("transaction.isolation=read-uncommitted", false);
break;
- case 3:
- config_single("isolation=read-committed", false);
+ case 3: /* 5% */
+ config_single("transaction.isolation=read-committed", false);
break;
- case 4:
- default:
- config_single("isolation=snapshot", false);
+ default: /* 85% */
+ config_single("transaction.isolation=snapshot", false);
break;
}
+ if (g.c_isolation_flag == ISOLATION_SNAPSHOT) {
+ if (!g.c_txn_timestamps)
+ config_single("transaction.timestamps=on", false);
+ if (g.c_txn_freq != 100)
+ config_single("transaction.frequency=100", false);
+ } else {
+ if (g.c_prepare)
+ config_single("ops.prepare=off", false);
+ if (g.c_txn_timestamps)
+ config_single("transaction.timestamps=off", false);
+ }
+ }
}
/*
@@ -780,15 +918,27 @@ void
config_error(void)
{
CONFIG *cp;
+ size_t max_name;
/* Display configuration names. */
fprintf(stderr, "\n");
+ fprintf(stderr, "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n");
+ fprintf(stderr, "Configuration values:\n");
+ fprintf(stderr, "%10s: %s\n", "off", "boolean off");
+ fprintf(stderr, "%10s: %s\n", "on", "boolean on");
+ fprintf(stderr, "%10s: %s\n", "0", "boolean off");
+ fprintf(stderr, "%10s: %s\n", "1", "boolean on");
+ fprintf(stderr, "%10s: %s\n", "NNN", "unsigned number");
+ fprintf(stderr, "%10s: %s\n", "NNN-NNN", "number range, each number equally likely");
+ fprintf(stderr, "%10s: %s\n", "NNN:NNN", "number range, lower numbers more likely");
+ fprintf(stderr, "%10s: %s\n", "string", "configuration value");
+ fprintf(stderr, "\n");
+ fprintf(stderr, "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n");
fprintf(stderr, "Configuration names:\n");
+ for (max_name = 0, cp = c; cp->name != NULL; ++cp)
+ max_name = WT_MAX(max_name, strlen(cp->name));
for (cp = c; cp->name != NULL; ++cp)
- if (strlen(cp->name) > 17)
- fprintf(stderr, "%s\n%17s: %s\n", cp->name, " ", cp->desc);
- else
- fprintf(stderr, "%17s: %s\n", cp->name, cp->desc);
+ fprintf(stderr, "%*s: %s\n", (int)max_name, cp->name, cp->desc);
}
/*
@@ -801,13 +951,17 @@ config_print(bool error_display)
CONFIG *cp;
FILE *fp;
+ /* Reopening or replaying an existing database should leave the existing CONFIG file. */
+ if (g.reopen || g.replay)
+ return;
+
if (error_display)
fp = stdout;
else if ((fp = fopen(g.home_config, "w")) == NULL)
testutil_die(errno, "fopen: %s", g.home_config);
fprintf(fp, "############################################\n");
- fprintf(fp, "# RUN PARAMETERS\n");
+ fprintf(fp, "# RUN PARAMETERS: V2\n");
fprintf(fp, "############################################\n");
/* Display configuration values. */
@@ -895,6 +1049,9 @@ config_reset(void)
{
CONFIG *cp;
+ if (!config_is_perm("transaction.isolation"))
+ g.c_isolation_flag = ISOLATION_NOT_SET;
+
/* Clear temporary allocated configuration data. */
for (cp = c; cp->name != NULL; ++cp) {
F_CLR(cp, C_TEMP);
@@ -920,40 +1077,59 @@ config_find(const char *s, size_t len, bool fatal)
if (strncmp(s, cp->name, len) == 0 && cp->name[len] == '\0')
return (cp);
- /*
- * Optionally ignore unknown keywords, it makes it easier to run old CONFIG files.
- */
- if (fatal) {
- fprintf(stderr, "%s: %s: unknown required configuration keyword\n", progname, s);
- exit(EXIT_FAILURE);
- }
+ /* Optionally ignore unknown keywords, it makes it easier to run old CONFIG files. */
+ if (fatal)
+ testutil_die(EINVAL, "%s: %s: unknown required configuration keyword\n", progname, s);
+
fprintf(stderr, "%s: %s: WARNING, ignoring unknown configuration keyword\n", progname, s);
return (NULL);
}
/*
+ * config_value --
+ * String to long helper function.
+ */
+static uint32_t
+config_value(const char *config, const char *p, int match)
+{
+ long v;
+ char *endptr;
+
+ errno = 0;
+ v = strtol(p, &endptr, 10);
+ if ((errno == ERANGE && (v == LONG_MAX || v == LONG_MIN)) || (errno != 0 && v == 0) ||
+ *endptr != match || v < 0 || v > UINT32_MAX)
+ testutil_die(
+ EINVAL, "%s: %s: illegal numeric value or value out of range", progname, config);
+ return ((uint32_t)v);
+}
+
+/*
* config_single --
* Set a single configuration structure value.
*/
void
config_single(const char *s, bool perm)
{
+ enum { RANGE_FIXED, RANGE_NONE, RANGE_WEIGHTED } range;
CONFIG *cp;
- long vlong;
- uint32_t v;
- char *p;
- const char *ep;
-
- if ((ep = strchr(s, '=')) == NULL) {
- fprintf(stderr, "%s: %s: illegal configuration value\n", progname, s);
- exit(EXIT_FAILURE);
- }
+ uint32_t steps, v1, v2;
+ u_int i;
+ const char *equalp, *vp1, *vp2;
+
+ while (__wt_isspace((u_char)*s))
+ ++s;
- if ((cp = config_find(s, (size_t)(ep - s), false)) == NULL)
+ config_compat(&s);
+
+ if ((equalp = strchr(s, '=')) == NULL)
+ testutil_die(EINVAL, "%s: %s: illegal configuration value\n", progname, s);
+
+ if ((cp = config_find(s, (size_t)(equalp - s), false)) == NULL)
return;
F_SET(cp, perm ? C_PERM : C_TEMP);
- ++ep;
+ ++equalp;
if (F_ISSET(cp, C_STRING)) {
/*
@@ -964,65 +1140,103 @@ config_single(const char *s, bool perm)
*cp->vstr = NULL;
}
- if (strncmp(s, "checkpoints", strlen("checkpoints")) == 0) {
- config_map_checkpoint(ep, &g.c_checkpoint_flag);
- *cp->vstr = dstrdup(ep);
- } else if (strncmp(s, "checksum", strlen("checksum")) == 0) {
- config_map_checksum(ep, &g.c_checksum_flag);
- *cp->vstr = dstrdup(ep);
- } else if (strncmp(s, "compression", strlen("compression")) == 0) {
- config_map_compression(ep, &g.c_compression_flag);
- *cp->vstr = dstrdup(ep);
- } else if (strncmp(s, "data_source", strlen("data_source")) == 0 &&
- strncmp("file", ep, strlen("file")) != 0 && strncmp("lsm", ep, strlen("lsm")) != 0 &&
- strncmp("table", ep, strlen("table")) != 0) {
- fprintf(stderr, "Invalid data source option: %s\n", ep);
- exit(EXIT_FAILURE);
- } else if (strncmp(s, "encryption", strlen("encryption")) == 0) {
- config_map_encryption(ep, &g.c_encryption_flag);
- *cp->vstr = dstrdup(ep);
- } else if (strncmp(s, "file_type", strlen("file_type")) == 0) {
- config_map_file_type(ep, &g.type);
+ if (strncmp(s, "backup.incremental", strlen("backup.incremental")) == 0) {
+ config_map_backup_incr(equalp, &g.c_backup_incr_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "checkpoint", strlen("checkpoint")) == 0) {
+ config_map_checkpoint(equalp, &g.c_checkpoint_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "disk.checksum", strlen("disk.checksum")) == 0) {
+ config_map_checksum(equalp, &g.c_checksum_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "btree.compression", strlen("btree.compression")) == 0) {
+ config_map_compression(equalp, &g.c_compression_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "runs.source", strlen("runs.source")) == 0 &&
+ strncmp("file", equalp, strlen("file")) != 0 &&
+ strncmp("lsm", equalp, strlen("lsm")) != 0 &&
+ strncmp("table", equalp, strlen("table")) != 0) {
+ testutil_die(EINVAL, "Invalid data source option: %s\n", equalp);
+ } else if (strncmp(s, "disk.encryption", strlen("disk.encryption")) == 0) {
+ config_map_encryption(equalp, &g.c_encryption_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "runs.type", strlen("runs.type")) == 0) {
+ config_map_file_type(equalp, &g.type);
*cp->vstr = dstrdup(config_file_type(g.type));
- } else if (strncmp(s, "isolation", strlen("isolation")) == 0) {
- config_map_isolation(ep, &g.c_isolation_flag);
- *cp->vstr = dstrdup(ep);
- } else if (strncmp(s, "logging_compression", strlen("logging_compression")) == 0) {
- config_map_compression(ep, &g.c_logging_compression_flag);
- *cp->vstr = dstrdup(ep);
+ } else if (strncmp(s, "transaction.isolation", strlen("transaction.isolation")) == 0) {
+ config_map_isolation(equalp, &g.c_isolation_flag);
+ *cp->vstr = dstrdup(equalp);
+ } else if (strncmp(s, "logging.compression", strlen("logging.compression")) == 0) {
+ config_map_compression(equalp, &g.c_logging_compression_flag);
+ *cp->vstr = dstrdup(equalp);
} else
- *cp->vstr = dstrdup(ep);
+ *cp->vstr = dstrdup(equalp);
return;
}
- vlong = -1;
if (F_ISSET(cp, C_BOOL)) {
- if (strncmp(ep, "off", strlen("off")) == 0)
- vlong = 0;
- else if (strncmp(ep, "on", strlen("on")) == 0)
- vlong = 1;
- }
- if (vlong == -1) {
- vlong = strtol(ep, &p, 10);
- if (*p != '\0') {
- fprintf(stderr, "%s: %s: illegal numeric value\n", progname, s);
- exit(EXIT_FAILURE);
+ if (strncmp(equalp, "off", strlen("off")) == 0)
+ v1 = 0;
+ else if (strncmp(equalp, "on", strlen("on")) == 0)
+ v1 = 1;
+ else {
+ v1 = config_value(s, equalp, '\0');
+ if (v1 != 0 && v1 != 1)
+ testutil_die(EINVAL, "%s: %s: value of boolean not 0 or 1", progname, s);
}
+
+ *cp->v = v1;
+ return;
}
- v = (uint32_t)vlong;
- if (F_ISSET(cp, C_BOOL)) {
- if (v != 0 && v != 1) {
- fprintf(stderr, "%s: %s: value of boolean not 0 or 1\n", progname, s);
- exit(EXIT_FAILURE);
- }
- } else if (v < cp->min || v > cp->maxset) {
- fprintf(stderr, "%s: %s: value outside min/max values of %" PRIu32 "-%" PRIu32 "\n",
+
+ /*
+ * Three possible syntax elements: a number, two numbers separated by a dash, two numbers
+ * separated by an colon. The first is a fixed value, the second is a range where all values are
+ * equally possible, the third is a weighted range where lower values are more likely.
+ */
+ vp1 = equalp;
+ range = RANGE_NONE;
+ if ((vp2 = strchr(vp1, '-')) != NULL) {
+ ++vp2;
+ range = RANGE_FIXED;
+ } else if ((vp2 = strchr(vp1, ':')) != NULL) {
+ ++vp2;
+ range = RANGE_WEIGHTED;
+ }
+
+ v1 = config_value(s, vp1, range == RANGE_NONE ? '\0' : (range == RANGE_FIXED ? '-' : ':'));
+ if (v1 < cp->min || v1 > cp->maxset)
+ testutil_die(EINVAL, "%s: %s: value outside min/max values of %" PRIu32 "-%" PRIu32 "\n",
progname, s, cp->min, cp->maxset);
- exit(EXIT_FAILURE);
+
+ if (range != RANGE_NONE) {
+ v2 = config_value(s, vp2, '\0');
+ if (v2 < cp->min || v2 > cp->maxset)
+ testutil_die(EINVAL,
+ "%s: %s: value outside min/max values of %" PRIu32 "-%" PRIu32 "\n", progname, s,
+ cp->min, cp->maxset);
+ if (v1 > v2)
+ testutil_die(EINVAL, "%s: %s: illegal numeric range\n", progname, s);
+
+ if (range == RANGE_FIXED)
+ v1 = mmrand(NULL, (u_int)v1, (u_int)v2);
+ else {
+ /*
+ * Roll dice, 50% chance of proceeding to the next larger value, and 5 steps to the
+ * maximum value.
+ */
+ steps = ((v2 - v1) + 4) / 5;
+ if (steps == 0)
+ steps = 1;
+ for (i = 0; i < 5; ++i, v1 += steps)
+ if (mmrand(NULL, 0, 1) == 0)
+ break;
+ v1 = WT_MIN(v1, v2);
+ }
}
- *cp->v = v;
+ *cp->v = v1;
}
/*
@@ -1043,6 +1257,23 @@ config_map_file_type(const char *s, u_int *vp)
}
/*
+ * config_map_backup_incr --
+ * Map a incremental backup configuration to a flag.
+ */
+static void
+config_map_backup_incr(const char *s, u_int *vp)
+{
+ if (strcmp(s, "block") == 0)
+ *vp = INCREMENTAL_BLOCK;
+ else if (strcmp(s, "log") == 0)
+ *vp = INCREMENTAL_LOG;
+ else if (strcmp(s, "off") == 0)
+ *vp = INCREMENTAL_OFF;
+ else
+ testutil_die(EINVAL, "illegal incremental backup configuration: %s", s);
+}
+
+/*
* config_map_checkpoint --
* Map a checkpoint configuration to a flag.
*/
diff --git a/src/third_party/wiredtiger/test/format/config.h b/src/third_party/wiredtiger/test/format/config.h
index 832c977df29..04da641386b 100644
--- a/src/third_party/wiredtiger/test/format/config.h
+++ b/src/third_party/wiredtiger/test/format/config.h
@@ -59,254 +59,288 @@ typedef struct {
#define COMPRESSION_LIST "(none | lz4 | snappy | zlib | zstd)"
-static CONFIG c[] = {{"abort", "if timed run should drop core", /* 0% */
- C_BOOL, 0, 0, 0, &g.c_abort, NULL},
+static CONFIG c[] = {
+ /* 5% */
+ {"assert.commit_timestamp", "if assert commit_timestamp", C_BOOL, 5, 0, 0,
+ &g.c_assert_commit_timestamp, NULL},
- {"alter", "if altering the table is enabled", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_alter, NULL},
+ /* 5% */
+ {"assert.read_timestamp", "if assert read_timestamp", C_BOOL, 5, 0, 0, &g.c_assert_read_timestamp,
+ NULL},
- {"assert_commit_timestamp", "if assert commit_timestamp", /* 5% */
- C_BOOL, 5, 0, 0, &g.c_assert_commit_timestamp, NULL},
+ /* 20% */
+ {"backup", "if backups are enabled", C_BOOL, 20, 0, 0, &g.c_backups, NULL},
- {"assert_read_timestamp", "if assert read_timestamp", /* 5% */
- C_BOOL, 5, 0, 0, &g.c_assert_read_timestamp, NULL},
+ {"backup.incremental", "type of backup (block | log | off)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_backup_incremental},
- {"auto_throttle", "if LSM inserts are throttled", /* 90% */
- C_BOOL, 90, 0, 0, &g.c_auto_throttle, NULL},
+ {"btree.bitcnt", "number of bits for fixed-length column-store files", 0x0, 1, 8, 8, &g.c_bitcnt,
+ NULL},
- {"backups", "if backups are enabled", /* 20% */
- C_BOOL, 20, 0, 0, &g.c_backups, NULL},
+ {"btree.compression", "type of compression " COMPRESSION_LIST, C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_compression},
- {"bitcnt", "number of bits for fixed-length column-store files", 0x0, 1, 8, 8, &g.c_bitcnt, NULL},
+ /* 20% */
+ {"btree.dictionary", "if values are dictionary compressed", C_BOOL, 20, 0, 0, &g.c_dictionary,
+ NULL},
- {"bloom", "if bloom filters are configured", /* 95% */
- C_BOOL, 95, 0, 0, &g.c_bloom, NULL},
+ /* 20% */
+ {"btree.huffman_key", "if keys are huffman encoded", C_BOOL, 20, 0, 0, &g.c_huffman_key, NULL},
- {"bloom_bit_count", "number of bits per item for LSM bloom filters", 0x0, 4, 64, 1000,
- &g.c_bloom_bit_count, NULL},
+ /* 20% */
+ {"btree.huffman_value", "if values are huffman encoded", C_BOOL, 20, 0, 0, &g.c_huffman_value,
+ NULL},
- {"bloom_hash_count", "number of hash values per item for LSM bloom filters", 0x0, 4, 32, 100,
- &g.c_bloom_hash_count, NULL},
+ /* 95% */
+ {"btree.internal_key_truncation", "if internal keys are truncated", C_BOOL, 95, 0, 0,
+ &g.c_internal_key_truncation, NULL},
- {"bloom_oldest", "if bloom_oldest=true", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_bloom_oldest, NULL},
+ {"btree.internal_page_max", "maximum size of Btree internal nodes", 0x0, 9, 17, 27,
+ &g.c_intl_page_max, NULL},
- {"cache", "size of the cache in MB", 0x0, 1, 100, 100 * 1024, &g.c_cache, NULL},
+ {"btree.key_gap", "gap between instantiated keys on a Btree page", 0x0, 0, 20, 20, &g.c_key_gap,
+ NULL},
- {"cache_minimum", "minimum size of the cache in MB", C_IGNORE, 0, 0, 100 * 1024,
- &g.c_cache_minimum, NULL},
+ {"btree.key_max", "maximum size of keys", 0x0, 20, 128, MEGABYTE(10), &g.c_key_max, NULL},
- {"checkpoints", "type of checkpoints (on | off | wiredtiger)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_checkpoint},
+ {"btree.key_min", "minimum size of keys", 0x0, 10, 32, 256, &g.c_key_min, NULL},
- {"checkpoint_log_size", "MB of log to wait if wiredtiger checkpoints configured", 0x0, 20, 200,
- 1024, &g.c_checkpoint_log_size, NULL},
+ {"btree.leaf_page_max", "maximum size of Btree leaf nodes", 0x0, 9, 17, 27, &g.c_leaf_page_max,
+ NULL},
- {"checkpoint_wait", "seconds to wait if wiredtiger checkpoints configured", 0x0, 5, 100, 3600,
- &g.c_checkpoint_wait, NULL},
+ {"btree.memory_page_max", "maximum size of in-memory pages", 0x0, 1, 10, 128,
+ &g.c_memory_page_max, NULL},
- {"checksum", "type of checksums (on | off | uncompressed)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_checksum},
+ /* 80% */
+ {"btree.prefix_compression", "if keys are prefix compressed", C_BOOL, 80, 0, 0,
+ &g.c_prefix_compression, NULL},
- {"chunk_size", "LSM chunk size in MB", 0x0, 1, 10, 100, &g.c_chunk_size, NULL},
+ {"btree.prefix_compression_min", "minimum gain before prefix compression is used", 0x0, 0, 8, 256,
+ &g.c_prefix_compression_min, NULL},
- {"compaction", "if compaction is running", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_compact, NULL},
+ {"btree.repeat_data_pct", "percent duplicate values in row- or var-length column-stores", 0x0, 0,
+ 90, 90, &g.c_repeat_data_pct, NULL},
- {"compression", "type of compression " COMPRESSION_LIST, C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_compression},
+ /* 10% */
+ {"btree.reverse", "collate in reverse order", C_BOOL, 10, 0, 0, &g.c_reverse, NULL},
- {"data_extend", "if data files are extended", /* 5% */
- C_BOOL, 5, 0, 0, &g.c_data_extend, NULL},
+ {"btree.split_pct", "page split size as a percentage of the maximum page size", 0x0, 50, 100, 100,
+ &g.c_split_pct, NULL},
- {"data_source", "data source (file | lsm | table)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_data_source},
+ {"btree.value_max", "maximum size of values", 0x0, 32, 4096, MEGABYTE(10), &g.c_value_max, NULL},
- {"delete_pct", "percent operations that are deletes", C_IGNORE, 0, 0, 100, &g.c_delete_pct, NULL},
+ {"btree.value_min", "minimum size of values", 0x0, 0, 20, 4096, &g.c_value_min, NULL},
- {"dictionary", "if values are dictionary compressed", /* 20% */
- C_BOOL, 20, 0, 0, &g.c_dictionary, NULL},
+ {"cache", "size of the cache in MB", 0x0, 1, 100, 100 * 1024, &g.c_cache, NULL},
- {"direct_io", "if direct I/O is configured for data objects", /* 0% */
- C_IGNORE | C_BOOL, 0, 0, 1, &g.c_direct_io, NULL},
+ {"cache.evict_max", "the maximum number of eviction workers", 0x0, 0, 5, 100, &g.c_evict_max,
+ NULL},
- {"encryption", "type of encryption (none | rotn-7)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_encryption},
+ {"cache.minimum", "minimum size of the cache in MB", C_IGNORE, 0, 0, 100 * 1024,
+ &g.c_cache_minimum, NULL},
- {"evict_max", "the maximum number of eviction workers", 0x0, 0, 5, 100, &g.c_evict_max, NULL},
+ {"checkpoint", "type of checkpoints (on | off | wiredtiger)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_checkpoint},
- {"file_type", "type of store to create (fix | var | row)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
- &g.c_file_type},
+ {"checkpoint.log_size", "MB of log to wait if wiredtiger checkpoints configured", 0x0, 20, 200,
+ 1024, &g.c_checkpoint_log_size, NULL},
- {"firstfit", "if allocation is firstfit", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_firstfit, NULL},
+ {"checkpoint.wait", "seconds to wait if wiredtiger checkpoints configured", 0x0, 5, 100, 3600,
+ &g.c_checkpoint_wait, NULL},
- {"huffman_key", "if keys are huffman encoded", /* 20% */
- C_BOOL, 20, 0, 0, &g.c_huffman_key, NULL},
+ {"disk.checksum", "type of checksums (on | off | uncompressed)", C_IGNORE | C_STRING, 0, 0, 0,
+ NULL, &g.c_checksum},
- {"huffman_value", "if values are huffman encoded", /* 20% */
- C_BOOL, 20, 0, 0, &g.c_huffman_value, NULL},
+ /* 5% */
+ {"disk.data_extend", "if data files are extended", C_BOOL, 5, 0, 0, &g.c_data_extend, NULL},
- {"independent_thread_rng", "if thread RNG space is independent", /* 75% */
- C_BOOL, 75, 0, 0, &g.c_independent_thread_rng, NULL},
+ /* 0% */
+ {"disk.direct_io", "if direct I/O is configured for data objects", C_IGNORE | C_BOOL, 0, 0, 1,
+ &g.c_direct_io, NULL},
- {"in_memory", "if in-memory configured", C_IGNORE | C_BOOL, 0, 0, 1, &g.c_in_memory, NULL},
+ {"disk.encryption", "type of encryption (none | rotn-7)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_encryption},
- {"insert_pct", "percent operations that are inserts", C_IGNORE, 0, 0, 100, &g.c_insert_pct, NULL},
+ /* 10% */
+ {"disk.firstfit", "if allocation is firstfit", C_BOOL, 10, 0, 0, &g.c_firstfit, NULL},
- {"internal_key_truncation", "if internal keys are truncated", /* 95% */
- C_BOOL, 95, 0, 0, &g.c_internal_key_truncation, NULL},
+ /* 90% */
+ {"disk.mmap", "configure for mmap operations (readonly)", C_BOOL, 90, 0, 0, &g.c_mmap, NULL},
- {"internal_page_max", "maximum size of Btree internal nodes", 0x0, 9, 17, 27, &g.c_intl_page_max,
- NULL},
+ /* 5% */
+ {"disk.mmap_all", "configure for mmap operations (read and write)", C_BOOL, 5, 0, 0,
+ &g.c_mmap_all, NULL},
+
+ /* 0% */
+ {"format.abort", "if timed run should drop core", C_BOOL, 0, 0, 0, &g.c_abort, NULL},
- {"isolation", "isolation level (random | read-uncommitted | read-committed | snapshot)",
- C_IGNORE | C_STRING, 0, 0, 0, NULL, &g.c_isolation},
+ /* 75% */
+ {"format.independent_thread_rng", "if thread RNG space is independent", C_BOOL, 75, 0, 0,
+ &g.c_independent_thread_rng, NULL},
- {"key_gap", "gap between instantiated keys on a Btree page", 0x0, 0, 20, 20, &g.c_key_gap, NULL},
+ {"format.major_timeout", "timeout for long-running operations (minutes)", C_IGNORE, 0, 0, 1000,
+ &g.c_major_timeout, NULL},
- {"key_max", "maximum size of keys", 0x0, 20, 128, MEGABYTE(10), &g.c_key_max, NULL},
+ /* 50% */
+ {"logging", "if logging configured", C_BOOL, 50, 0, 0, &g.c_logging, NULL},
- {"key_min", "minimum size of keys", 0x0, 10, 32, 256, &g.c_key_min, NULL},
+ /* 50% */
+ {"logging.archive", "if log file archival configured", C_BOOL, 50, 0, 0, &g.c_logging_archive,
+ NULL},
- {"leaf_page_max", "maximum size of Btree leaf nodes", 0x0, 9, 17, 27, &g.c_leaf_page_max, NULL},
+ {"logging.compression", "type of logging compression " COMPRESSION_LIST, C_IGNORE | C_STRING, 0,
+ 0, 0, NULL, &g.c_logging_compression},
- {"leak_memory", "if memory should be leaked on close", C_BOOL, 0, 0, 0, &g.c_leak_memory, NULL},
+ {"logging.file_max", "maximum log file size in KB", 0x0, 100, 512000, 2097152,
+ &g.c_logging_file_max, NULL},
- {"logging", "if logging configured", /* 50% */
- C_BOOL, 50, 0, 0, &g.c_logging, NULL},
+ /* 50% */
+ {"logging.prealloc", "if log file pre-allocation configured", C_BOOL, 50, 0, 0,
+ &g.c_logging_prealloc, NULL},
- {"logging_archive", "if log file archival configured", /* 50% */
- C_BOOL, 50, 0, 0, &g.c_logging_archive, NULL},
+ /* 90% */
+ {"lsm.auto_throttle", "if LSM inserts are throttled", C_BOOL, 90, 0, 0, &g.c_auto_throttle, NULL},
- {"logging_compression", "type of logging compression " COMPRESSION_LIST, C_IGNORE | C_STRING, 0,
- 0, 0, NULL, &g.c_logging_compression},
+ /* 95% */
+ {"lsm.bloom", "if bloom filters are configured", C_BOOL, 95, 0, 0, &g.c_bloom, NULL},
- {"logging_file_max", "maximum log file size in KB", 0x0, 100, 512000, 2097152,
- &g.c_logging_file_max, NULL},
+ {"lsm.bloom_bit_count", "number of bits per item for LSM bloom filters", 0x0, 4, 64, 1000,
+ &g.c_bloom_bit_count, NULL},
- {"logging_prealloc", "if log file pre-allocation configured", /* 50% */
- C_BOOL, 50, 0, 0, &g.c_logging_prealloc, NULL},
+ {"lsm.bloom_hash_count", "number of hash values per item for LSM bloom filters", 0x0, 4, 32, 100,
+ &g.c_bloom_hash_count, NULL},
- {"lsm_worker_threads", "the number of LSM worker threads", 0x0, 3, 4, 20, &g.c_lsm_worker_threads,
- NULL},
+ /* 10% */
+ {"lsm.bloom_oldest", "if bloom_oldest=true", C_BOOL, 10, 0, 0, &g.c_bloom_oldest, NULL},
- {"major_timeout", "timeout for long-running operations (minutes)", C_IGNORE, 0, 0, 1000,
- &g.c_major_timeout, NULL},
+ {"lsm.chunk_size", "LSM chunk size in MB", 0x0, 1, 10, 100, &g.c_chunk_size, NULL},
+
+ {"lsm.merge_max", "the maximum number of chunks to include in a merge operation", 0x0, 4, 20, 100,
+ &g.c_merge_max, NULL},
- {"memory_page_max", "maximum size of in-memory pages", 0x0, 1, 10, 128, &g.c_memory_page_max,
+ {"lsm.worker_threads", "the number of LSM worker threads", 0x0, 3, 4, 20, &g.c_lsm_worker_threads,
NULL},
- {"merge_max", "the maximum number of chunks to include in a merge operation", 0x0, 4, 20, 100,
- &g.c_merge_max, NULL},
+ /* 10% */
+ {"ops.alter", "if altering the table is enabled", C_BOOL, 10, 0, 0, &g.c_alter, NULL},
- {"mmap", "configure for mmap operations", /* 90% */
- C_BOOL, 90, 0, 0, &g.c_mmap, NULL},
+ /* 10% */
+ {"ops.compaction", "if compaction is running", C_BOOL, 10, 0, 0, &g.c_compact, NULL},
- {"modify_pct", "percent operations that are value modifications", C_IGNORE, 0, 0, 100,
- &g.c_modify_pct, NULL},
+ {"ops.pct.delete", "percent operations that are deletes", C_IGNORE, 0, 0, 100, &g.c_delete_pct,
+ NULL},
- {"ops", "the number of modification operations done per run", 0x0, 0, M(2), M(100), &g.c_ops,
+ {"ops.pct.insert", "percent operations that are inserts", C_IGNORE, 0, 0, 100, &g.c_insert_pct,
NULL},
- {"prefix_compression", "if keys are prefix compressed", /* 80% */
- C_BOOL, 80, 0, 0, &g.c_prefix_compression, NULL},
+ {"ops.pct.modify", "percent operations that are value modifications", C_IGNORE, 0, 0, 100,
+ &g.c_modify_pct, NULL},
- {"prefix_compression_min", "minimum gain before prefix compression is used", 0x0, 0, 8, 256,
- &g.c_prefix_compression_min, NULL},
+ {"ops.pct.read", "percent operations that are reads", C_IGNORE, 0, 0, 100, &g.c_read_pct, NULL},
- {"prepare", "configure transaction prepare", /* 5% */
- C_BOOL, 5, 0, 0, &g.c_prepare, NULL},
+ {"ops.pct.write", "percent operations that are value updates", C_IGNORE, 0, 0, 100,
+ &g.c_write_pct, NULL},
- {"quiet", "quiet run (same as -q)", C_IGNORE | C_BOOL, 0, 0, 1, &g.c_quiet, NULL},
+ /* 5% */
+ {"ops.prepare", "configure transaction prepare", C_BOOL, 5, 0, 0, &g.c_prepare, NULL},
- {"random_cursor", "if random cursor reads configured", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_random_cursor, NULL},
+ /* 10% */
+ {"ops.random_cursor", "if random cursor reads configured", C_BOOL, 10, 0, 0, &g.c_random_cursor,
+ NULL},
- {"read_pct", "percent operations that are reads", C_IGNORE, 0, 0, 100, &g.c_read_pct, NULL},
+ /* 100% */
+ {"ops.rebalance", "rebalance testing", C_BOOL, 100, 1, 0, &g.c_rebalance, NULL},
- {"rebalance", "rebalance testing", /* 100% */
- C_BOOL, 100, 1, 0, &g.c_rebalance, NULL},
+ /* 100% */
+ {"ops.salvage", "salvage testing", C_BOOL, 100, 1, 0, &g.c_salvage, NULL},
- {"repeat_data_pct", "percent duplicate values in row- or var-length column-stores", 0x0, 0, 90,
- 90, &g.c_repeat_data_pct, NULL},
+ /* 100% */
+ {"ops.truncate", "enable truncation", C_BOOL, 100, 0, 0, &g.c_truncate, NULL},
- {"reverse", "collate in reverse order", /* 10% */
- C_BOOL, 10, 0, 0, &g.c_reverse, NULL},
+ /* 100% */
+ {"ops.verify", "to regularly verify during a run", C_BOOL, 100, 1, 0, &g.c_verify, NULL},
- {"rows", "the number of rows to create", 0x0, 10, M(1), M(100), &g.c_rows, NULL},
+ {"quiet", "quiet run (same as -q)", C_IGNORE | C_BOOL, 0, 0, 1, &g.c_quiet, NULL},
{"runs", "the number of runs", C_IGNORE, 0, 0, UINT_MAX, &g.c_runs, NULL},
- {"salvage", "salvage testing", /* 100% */
- C_BOOL, 100, 1, 0, &g.c_salvage, NULL},
+ {"runs.in_memory", "if in-memory configured", C_IGNORE | C_BOOL, 0, 0, 1, &g.c_in_memory, NULL},
- {"split_pct", "page split size as a percentage of the maximum page size", 0x0, 50, 100, 100,
- &g.c_split_pct, NULL},
+ {"runs.ops", "the number of operations done per run", 0x0, 0, M(2), M(100), &g.c_ops, NULL},
- {"statistics", "maintain statistics", /* 20% */
- C_BOOL, 20, 0, 0, &g.c_statistics, NULL},
+ {"runs.rows", "the number of rows to create", 0x0, 10, M(1), M(100), &g.c_rows, NULL},
- {"statistics_server", "run the statistics server thread", /* 5% */
- C_BOOL, 5, 0, 0, &g.c_statistics_server, NULL},
+ {"runs.source", "data source (file | lsm | table)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_data_source},
- {"threads", "the number of worker threads", 0x0, 1, 32, 128, &g.c_threads, NULL},
+ {"runs.threads", "the number of worker threads", 0x0, 1, 32, 128, &g.c_threads, NULL},
- {"timer", "maximum time to run in minutes", C_IGNORE, 0, 0, UINT_MAX, &g.c_timer, NULL},
+ {"runs.timer", "maximum time to run in minutes", C_IGNORE, 0, 0, UINT_MAX, &g.c_timer, NULL},
- {"timing_stress_aggressive_sweep", "stress aggressive sweep", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_aggressive_sweep, NULL},
+ {"runs.type", "type of store to create (fix | var | row)", C_IGNORE | C_STRING, 0, 0, 0, NULL,
+ &g.c_file_type},
- {"timing_stress_checkpoint", "stress checkpoints", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_checkpoint, NULL},
+ /* 20% */
+ {"statistics", "maintain statistics", C_BOOL, 20, 0, 0, &g.c_statistics, NULL},
- {"timing_stress_lookaside_sweep", "stress lookaside sweep", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_lookaside_sweep, NULL},
+ /* 5% */
+ {"statistics.server", "run the statistics server thread", C_BOOL, 5, 0, 0, &g.c_statistics_server,
+ NULL},
- {"timing_stress_split_1", "stress splits (#1)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_1, NULL},
+ /* 2% */
+ {"stress.aggressive_sweep", "stress aggressive sweep", C_BOOL, 2, 0, 0,
+ &g.c_timing_stress_aggressive_sweep, NULL},
- {"timing_stress_split_2", "stress splits (#2)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_2, NULL},
+ /* 2% */
+ {"stress.checkpoint", "stress checkpoints", C_BOOL, 2, 0, 0, &g.c_timing_stress_checkpoint, NULL},
- {"timing_stress_split_3", "stress splits (#3)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_3, NULL},
+ /* 2% */
+ {"stress.hs_sweep", "stress history store sweep", C_BOOL, 2, 0, 0, &g.c_timing_stress_hs_sweep,
+ NULL},
- {"timing_stress_split_4", "stress splits (#4)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_4, NULL},
+ /* 2% */
+ {"stress.split_1", "stress splits (#1)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_1, NULL},
- {"timing_stress_split_5", "stress splits (#5)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_5, NULL},
+ /* 2% */
+ {"stress.split_2", "stress splits (#2)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_2, NULL},
- {"timing_stress_split_6", "stress splits (#6)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_6, NULL},
+ /* 2% */
+ {"stress.split_3", "stress splits (#3)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_3, NULL},
- {"timing_stress_split_7", "stress splits (#7)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_7, NULL},
+ /* 2% */
+ {"stress.split_4", "stress splits (#4)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_4, NULL},
- {"timing_stress_split_8", "stress splits (#8)", /* 2% */
- C_BOOL, 2, 0, 0, &g.c_timing_stress_split_8, NULL},
+ /* 2% */
+ {"stress.split_5", "stress splits (#5)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_5, NULL},
- {"transaction_timestamps", /* 70% */
- "enable transaction timestamp support", C_BOOL, 70, 0, 0, &g.c_txn_timestamps, NULL},
+ /* 2% */
+ {"stress.split_6", "stress splits (#6)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_6, NULL},
- {"transaction-frequency", "percent operations done inside an explicit transaction", 0x0, 1, 100,
- 100, &g.c_txn_freq, NULL},
+ /* 2% */
+ {"stress.split_7", "stress splits (#7)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_7, NULL},
- {"truncate", /* 100% */
- "enable truncation", C_BOOL, 100, 0, 0, &g.c_truncate, NULL},
+ /* 2% */
+ {"stress.split_8", "stress splits (#8)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_8, NULL},
- {"value_max", "maximum size of values", 0x0, 32, 4096, MEGABYTE(10), &g.c_value_max, NULL},
+ {"transaction.frequency", "percent operations done inside an explicit transaction", 0x0, 1, 100,
+ 100, &g.c_txn_freq, NULL},
- {"value_min", "minimum size of values", 0x0, 0, 20, 4096, &g.c_value_min, NULL},
+ {"transaction.isolation",
+ "isolation level (random | read-uncommitted | read-committed | snapshot)", C_IGNORE | C_STRING,
+ 0, 0, 0, NULL, &g.c_isolation},
- {"verify", "to regularly verify during a run", /* 100% */
- C_BOOL, 100, 1, 0, &g.c_verify, NULL},
+ /* 70% */
+ {"transaction.timestamps", "enable transaction timestamp support", C_BOOL, 70, 0, 0,
+ &g.c_txn_timestamps, NULL},
- {"wiredtiger_config", "configuration string used to wiredtiger_open", C_IGNORE | C_STRING, 0, 0,
+ {"wiredtiger.config", "configuration string used to wiredtiger_open", C_IGNORE | C_STRING, 0, 0,
0, NULL, &g.c_config_open},
- {"write_pct", "percent operations that are value updates", C_IGNORE, 0, 0, 100, &g.c_write_pct,
- NULL},
+ /* 80% */
+ {"wiredtiger.rwlock", "if wiredtiger read/write mutexes should be used", C_BOOL, 80, 0, 0,
+ &g.c_wt_mutex, NULL},
+
+ {"wiredtiger.leak_memory", "if memory should be leaked on close", C_BOOL, 0, 0, 0,
+ &g.c_leak_memory, NULL},
{NULL, NULL, 0x0, 0, 0, 0, NULL, NULL}};
diff --git a/src/third_party/wiredtiger/test/format/config_compat.c b/src/third_party/wiredtiger/test/format/config_compat.c
new file mode 100644
index 00000000000..0a5fe4424f7
--- /dev/null
+++ b/src/third_party/wiredtiger/test/format/config_compat.c
@@ -0,0 +1,92 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "format.h"
+
+static const char *list[] = {
+ "abort=", "format.abort", "alter=", "ops.alter", "assert_commit_timestamp=",
+ "assert.commit_timestamp", "assert_read_timestamp=", "assert.read_timestamp", "auto_throttle=",
+ "lsm.auto_throttle", "backup_incremental=", "backup.incremental", "backups=", "backup", "bitcnt=",
+ "btree.bitcnt", "bloom=", "lsm.bloom", "bloom_bit_count=", "lsm.bloom_bit_count",
+ "bloom_hash_count=", "lsm.bloom_hash_count", "bloom_oldest=", "lsm.bloom_oldest", "cache=",
+ "cache", "cache_minimum=", "cache.minimum", "checkpoint_log_size=", "checkpoint.log_size",
+ "checkpoint_wait=", "checkpoint.wait", "checkpoints=", "checkpoint", "checksum=", "disk.checksum",
+ "chunk_size=", "lsm.chunk_size", "compaction=", "ops.compaction", "compression=",
+ "btree.compression", "data_extend=", "disk.data_extend", "data_source=", "runs.source",
+ "delete_pct=", "ops.pct.delete", "dictionary=", "btree.dictionary", "direct_io=",
+ "disk.direct_io", "encryption=", "disk.encryption", "evict_max=", "cache.evict_max", "file_type=",
+ "runs.type", "firstfit=", "disk.firstfit", "huffman_key=", "btree.huffman_key", "huffman_value=",
+ "btree.huffman_value", "in_memory=", "runs.in_memory", "independent_thread_rng=",
+ "format.independent_thread_rng", "insert_pct=", "ops.pct.insert", "internal_key_truncation=",
+ "btree.internal_key_truncation", "internal_page_max=", "btree.internal_page_max", "isolation=",
+ "transaction.isolation", "key_gap=", "btree.key_gap", "key_max=", "btree.key_max", "key_min=",
+ "btree.key_min", "leaf_page_max=", "btree.leaf_page_max", "leak_memory=",
+ "wiredtiger.leak_memory", "logging_archive=", "logging.archive", "logging_compression=",
+ "logging.compression", "logging_file_max=", "logging.file_max", "logging_prealloc=",
+ "logging.prealloc", "lsm_worker_threads=", "lsm.worker_threads", "major_timeout=",
+ "format.major_timeout", "memory_page_max=", "btree.memory_page_max", "merge_max=",
+ "lsm.merge_max", "mmap=", "disk.mmap", "mmap_all=", "disk.mmap_all", "modify_pct=",
+ "ops.pct.modify", "ops=", "runs.ops", "prefix_compression=", "btree.prefix_compression",
+ "prefix_compression_min=", "btree.prefix_compression_min", "prepare=", "ops.prepare",
+ "random_cursor=", "ops.random_cursor", "read_pct=", "ops.pct.read", "rebalance=", "ops.rebalance",
+ "repeat_data_pct=", "btree.repeat_data_pct", "reverse=", "btree.reverse", "rows=", "runs.rows",
+ "salvage=", "ops.salvage", "split_pct=", "btree.split_pct", "statistics=", "statistics",
+ "statistics_server=", "statistics.server", "threads=", "runs.threads", "timer=", "runs.timer",
+ "timing_stress_aggressive_sweep=", "stress.aggressive_sweep", "timing_stress_checkpoint=",
+ "stress.checkpoint", "timing_stress_hs_sweep=", "stress.hs_sweep", "timing_stress_split_1=",
+ "stress.split_1", "timing_stress_split_2=", "stress.split_2", "timing_stress_split_3=",
+ "stress.split_3", "timing_stress_split_4=", "stress.split_4", "timing_stress_split_5=",
+ "stress.split_5", "timing_stress_split_6=", "stress.split_6", "timing_stress_split_7=",
+ "stress.split_7", "timing_stress_split_8=", "stress.split_8", "transaction-frequency=",
+ "transaction.frequency", "transaction_timestamps=", "transaction.timestamps", "truncate=",
+ "ops.truncate", "value_max=", "btree.value_max", "value_min=", "btree.value_min", "verify=",
+ "ops.verify", "wiredtiger_config=", "wiredtiger.config", "write_pct=", "ops.pct.write", NULL,
+ NULL,
+};
+
+/*
+ * config_compat --
+ * Convert old names to new ones.
+ */
+void
+config_compat(const char **origp)
+{
+ static char conv[100];
+ const char *equalp, *orig, **p;
+
+ orig = *origp;
+ if ((equalp = strchr(orig, '=')) == NULL)
+ return;
+
+ for (p = list; *p != NULL; p += 2)
+ if (strncmp(orig, *p, (size_t)((equalp - orig) + 1)) == 0) {
+ testutil_check(__wt_snprintf(conv, sizeof(conv), "%s%s", *++p, equalp));
+ *origp = conv;
+ break;
+ }
+}
diff --git a/src/third_party/wiredtiger/test/format/config_compat.sed b/src/third_party/wiredtiger/test/format/config_compat.sed
new file mode 100644
index 00000000000..73d867a0d55
--- /dev/null
+++ b/src/third_party/wiredtiger/test/format/config_compat.sed
@@ -0,0 +1,89 @@
+s/^assert.commit_timestamp=/assert_commit_timestamp=/
+s/^assert.read_timestamp=/assert_read_timestamp=/
+s/^backup.incremental=/backup_incremental=/
+s/^backup=/backups=/
+s/^btree.bitcnt=/bitcnt=/
+s/^btree.compression=/compression=/
+s/^btree.dictionary=/dictionary=/
+s/^btree.huffman_key=/huffman_key=/
+s/^btree.huffman_value=/huffman_value=/
+s/^btree.internal_key_truncation=/internal_key_truncation=/
+s/^btree.internal_page_max=/internal_page_max=/
+s/^btree.key_gap=/key_gap=/
+s/^btree.key_max=/key_max=/
+s/^btree.key_min=/key_min=/
+s/^btree.leaf_page_max=/leaf_page_max=/
+s/^btree.memory_page_max=/memory_page_max=/
+s/^btree.prefix_compression=/prefix_compression=/
+s/^btree.prefix_compression_min=/prefix_compression_min=/
+s/^btree.repeat_data_pct=/repeat_data_pct=/
+s/^btree.reverse=/reverse=/
+s/^btree.split_pct=/split_pct=/
+s/^btree.value_max=/value_max=/
+s/^btree.value_min=/value_min=/
+s/^cache.evict_max=/evict_max=/
+s/^cache.minimum=/cache_minimum=/
+s/^cache=/cache=/
+s/^checkpoint.log_size=/checkpoint_log_size=/
+s/^checkpoint.wait=/checkpoint_wait=/
+s/^checkpoint=/checkpoints=/
+s/^disk.checksum=/checksum=/
+s/^disk.data_extend=/data_extend=/
+s/^disk.direct_io=/direct_io=/
+s/^disk.encryption=/encryption=/
+s/^disk.firstfit=/firstfit=/
+s/^disk.mmap=/mmap=/
+s/^format.abort=/abort=/
+s/^format.independent_thread_rng=/independent_thread_rng=/
+s/^format.major_timeout=/major_timeout=/
+s/^logging.archive=/logging_archive=/
+s/^logging.compression=/logging_compression=/
+s/^logging.file_max=/logging_file_max=/
+s/^logging.prealloc=/logging_prealloc=/
+s/^lsm.auto_throttle=/auto_throttle=/
+s/^lsm.bloom=/bloom=/
+s/^lsm.bloom_bit_count=/bloom_bit_count=/
+s/^lsm.bloom_hash_count=/bloom_hash_count=/
+s/^lsm.bloom_oldest=/bloom_oldest=/
+s/^lsm.chunk_size=/chunk_size=/
+s/^lsm.merge_max=/merge_max=/
+s/^lsm.worker_threads=/lsm_worker_threads=/
+s/^ops.alter=/alter=/
+s/^ops.compaction=/compaction=/
+s/^ops.pct.delete=/delete_pct=/
+s/^ops.pct.insert=/insert_pct=/
+s/^ops.pct.modify=/modify_pct=/
+s/^ops.pct.read=/read_pct=/
+s/^ops.pct.write=/write_pct=/
+s/^ops.prepare=/prepare=/
+s/^ops.random_cursor=/random_cursor=/
+s/^ops.rebalance=/rebalance=/
+s/^ops.salvage=/salvage=/
+s/^ops.truncate=/truncate=/
+s/^ops.verify=/verify=/
+s/^runs.in_memory=/in_memory=/
+s/^runs.mmap_all=/mmap_all=/
+s/^runs.ops=/ops=/
+s/^runs.rows=/rows=/
+s/^runs.source=/data_source=/
+s/^runs.threads=/threads=/
+s/^runs.timer=/timer=/
+s/^runs.type=/file_type=/
+s/^statistics.server=/statistics_server=/
+s/^statistics=/statistics=/
+s/^stress.aggressive_sweep=/timing_stress_aggressive_sweep=/
+s/^stress.checkpoint=/timing_stress_checkpoint=/
+s/^stress.hs_sweep=/timing_stress_hs_sweep=/
+s/^stress.split_1=/timing_stress_split_1=/
+s/^stress.split_2=/timing_stress_split_2=/
+s/^stress.split_3=/timing_stress_split_3=/
+s/^stress.split_4=/timing_stress_split_4=/
+s/^stress.split_5=/timing_stress_split_5=/
+s/^stress.split_6=/timing_stress_split_6=/
+s/^stress.split_7=/timing_stress_split_7=/
+s/^stress.split_8=/timing_stress_split_8=/
+s/^transaction.frequency=/transaction-frequency=/
+s/^transaction.isolation=/isolation=/
+s/^transaction.timestamps=/transaction_timestamps=/
+s/^wiredtiger.config=/wiredtiger_config=/
+s/^wiredtiger.leak_memory=/leak_memory=/
diff --git a/src/third_party/wiredtiger/test/format/format.h b/src/third_party/wiredtiger/test/format/format.h
index 6407ee652d0..6bc213a65ef 100644
--- a/src/third_party/wiredtiger/test/format/format.h
+++ b/src/third_party/wiredtiger/test/format/format.h
@@ -60,24 +60,44 @@
#define MAX_MODIFY_ENTRIES 5 /* maximum change vectors */
+/*
+ * Abstract lock that lets us use either pthread reader-writer locks or WiredTiger's own (likely
+ * faster) implementation.
+ */
typedef struct {
- char *home; /* Home directory */
- char *home_backup; /* Hot-backup directory */
- char *home_backup_init; /* Initialize backup command */
- char *home_config; /* Run CONFIG file path */
- char *home_init; /* Initialize home command */
- char *home_lasdump; /* LAS dump filename */
- char *home_log; /* Operation log file path */
- char *home_pagedump; /* Page dump filename */
- char *home_rand; /* RNG log file path */
- char *home_salvage_copy; /* Salvage copy command */
- char *home_stats; /* Statistics file path */
+ union {
+ WT_RWLOCK wt;
+ pthread_rwlock_t pthread;
+ } l;
+ enum { LOCK_NONE = 0, LOCK_WT, LOCK_PTHREAD } lock_type;
+} RWLOCK;
- char wiredtiger_open_config[8 * 1024]; /* Database open config */
+#define LOCK_INITIALIZED(lock) ((lock)->lock_type != LOCK_NONE)
+typedef struct {
WT_CONNECTION *wts_conn;
WT_EXTENSION_API *wt_api;
+ char *uri; /* Object name */
+
+ bool backward_compatible; /* Backward compatibility testing */
+ bool reopen; /* Reopen an existing database */
+ bool replay; /* Replaying a run. */
+ bool workers_finished; /* Operations completed */
+
+ char *home; /* Home directory */
+ char *home_config; /* Run CONFIG file path */
+ char *home_hsdump; /* HS dump filename */
+ char *home_init; /* Initialize home command */
+ char *home_key; /* Key file filename */
+ char *home_log; /* Operation log file path */
+ char *home_pagedump; /* Page dump filename */
+ char *home_rand; /* RNG log file path */
+ char *home_stats; /* Statistics file path */
+
+ char *config_open; /* Command-line configuration */
+ char wiredtiger_open_config[8 * 1024]; /* Database open config */
+
bool rand_log_stop; /* Logging turned off */
FILE *randfp; /* Random number log */
@@ -86,10 +106,8 @@ typedef struct {
bool logging; /* log operations */
FILE *logfp; /* log file */
- bool replay; /* Replaying a run. */
- bool workers_finished; /* Operations completed */
-
- pthread_rwlock_t backup_lock; /* Backup running */
+ RWLOCK backup_lock; /* Backup running */
+ uint64_t backup_id; /* Block incremental id */
WT_RAND_STATE rnd; /* Global RNG state */
@@ -100,17 +118,17 @@ typedef struct {
* We get the last committed timestamp periodically in order to update the oldest timestamp,
* that requires locking out transactional ops that set a timestamp.
*/
- pthread_rwlock_t ts_lock;
+ RWLOCK ts_lock;
uint64_t timestamp; /* Counter for timestamps */
uint64_t truncate_cnt; /* Counter for truncation */
- pthread_rwlock_t death_lock; /* Single-thread failure */
-
- char *uri; /* Object name */
-
- char *config_open; /* Command-line configuration */
+ /*
+ * Single-thread failure. Always use pthread lock rather than WT lock in case WT library is
+ * misbehaving.
+ */
+ pthread_rwlock_t death_lock;
uint32_t c_abort; /* Config values */
uint32_t c_alter;
@@ -118,6 +136,7 @@ typedef struct {
uint32_t c_assert_read_timestamp;
uint32_t c_auto_throttle;
uint32_t c_backups;
+ char *c_backup_incremental;
uint32_t c_bitcnt;
uint32_t c_bloom;
uint32_t c_bloom_bit_count;
@@ -165,6 +184,7 @@ typedef struct {
uint32_t c_memory_page_max;
uint32_t c_merge_max;
uint32_t c_mmap;
+ uint32_t c_mmap_all;
uint32_t c_modify_pct;
uint32_t c_ops;
uint32_t c_prefix_compression;
@@ -186,7 +206,7 @@ typedef struct {
uint32_t c_timer;
uint32_t c_timing_stress_aggressive_sweep;
uint32_t c_timing_stress_checkpoint;
- uint32_t c_timing_stress_lookaside_sweep;
+ uint32_t c_timing_stress_hs_sweep;
uint32_t c_timing_stress_split_1;
uint32_t c_timing_stress_split_2;
uint32_t c_timing_stress_split_3;
@@ -202,12 +222,18 @@ typedef struct {
uint32_t c_value_min;
uint32_t c_verify;
uint32_t c_write_pct;
+ uint32_t c_wt_mutex;
#define FIX 1
#define ROW 2
#define VAR 3
u_int type; /* File type's flag value */
+#define INCREMENTAL_BLOCK 1
+#define INCREMENTAL_LOG 2
+#define INCREMENTAL_OFF 3
+ u_int c_backup_incr_flag; /* Incremental backup flag value */
+
#define CHECKPOINT_OFF 1
#define CHECKPOINT_ON 2
#define CHECKPOINT_WIREDTIGER 3
@@ -230,6 +256,7 @@ typedef struct {
#define ENCRYPT_ROTN_7 2
u_int c_encryption_flag; /* Encryption flag value */
+#define ISOLATION_NOT_SET 0
#define ISOLATION_RANDOM 1
#define ISOLATION_READ_UNCOMMITTED 2
#define ISOLATION_READ_COMMITTED 3
@@ -239,15 +266,14 @@ typedef struct {
uint32_t intl_page_max; /* Maximum page sizes */
uint32_t leaf_page_max;
- uint64_t key_cnt; /* Keys loaded so far */
- uint64_t rows; /* Total rows */
+ uint64_t rows; /* Total rows */
uint32_t key_rand_len[1031]; /* Key lengths */
} GLOBAL;
extern GLOBAL g;
/* Worker thread operations. */
-typedef enum { INSERT, MODIFY, READ, REMOVE, TRUNCATE, UPDATE } thread_op;
+typedef enum { INSERT = 1, MODIFY, READ, REMOVE, TRUNCATE, UPDATE } thread_op;
/* Worker read operations. */
typedef enum { NEXT, PREV, SEARCH, SEARCH_NEAR } read_operation;
@@ -330,17 +356,23 @@ WT_THREAD_RET backup(void *);
WT_THREAD_RET checkpoint(void *);
WT_THREAD_RET compact(void *);
void config_clear(void);
+void config_compat(const char **);
void config_error(void);
void config_file(const char *);
+void config_final(void);
void config_print(bool);
-void config_setup(void);
void config_single(const char *, bool);
void fclose_and_clear(FILE **);
-void key_gen(WT_ITEM *, uint64_t);
+bool fp_readv(FILE *, char *, bool, uint32_t *);
+void handle_init(void);
+void handle_teardown(void);
+void key_gen_common(WT_ITEM *, uint64_t, const char *);
void key_gen_init(WT_ITEM *);
-void key_gen_insert(WT_RAND_STATE *, WT_ITEM *, uint64_t);
void key_gen_teardown(WT_ITEM *);
void key_init(void);
+void lock_destroy(WT_SESSION *, RWLOCK *);
+void lock_init(WT_SESSION *, RWLOCK *);
+void operations(u_int, bool);
WT_THREAD_RET random_kv(void *);
void path_setup(const char *);
int read_row_worker(WT_CURSOR *, uint64_t, WT_ITEM *, WT_ITEM *, bool);
@@ -353,7 +385,7 @@ int snap_repeat_txn(WT_CURSOR *, TINFO *);
void snap_repeat_update(TINFO *, bool);
void snap_track(TINFO *, thread_op);
WT_THREAD_RET timestamp(void *);
-void timestamp_once(void);
+void timestamp_once(WT_SESSION *);
void track(const char *, uint64_t, TINFO *);
void val_gen(WT_RAND_STATE *, WT_ITEM *, uint64_t);
void val_gen_init(WT_ITEM *);
@@ -361,11 +393,11 @@ void val_gen_teardown(WT_ITEM *);
void val_init(void);
void wts_checkpoints(void);
void wts_close(void);
+void wts_create(void);
void wts_dump(const char *, bool);
void wts_init(void);
void wts_load(void);
void wts_open(const char *, bool, WT_CONNECTION **);
-void wts_ops(u_int, bool);
void wts_read_scan(void);
void wts_rebalance(void);
void wts_reopen(void);
diff --git a/src/third_party/wiredtiger/test/format/format.i b/src/third_party/wiredtiger/test/format/format.i
index 2b22eab069a..661dd096ae9 100644
--- a/src/third_party/wiredtiger/test/format/format.i
+++ b/src/third_party/wiredtiger/test/format/format.i
@@ -76,7 +76,7 @@ rng(WT_RAND_STATE *rnd)
* and replay because threaded operation order can't be replayed. Do that check inline so it's a
* cheap call once thread performance starts to matter.
*/
- return (g.rand_log_stop ? __wt_random(rnd) : rng_slow(rnd));
+ return (g.randfp == NULL || g.rand_log_stop ? __wt_random(rnd) : rng_slow(rnd));
}
/*
@@ -139,3 +139,74 @@ wiredtiger_begin_transaction(WT_SESSION *session, const char *config)
__wt_yield();
testutil_check(ret);
}
+
+/*
+ * key_gen --
+ * Generate a key for lookup.
+ */
+static inline void
+key_gen(WT_ITEM *key, uint64_t keyno)
+{
+ key_gen_common(key, keyno, "00");
+}
+
+/*
+ * key_gen_insert --
+ * Generate a key for insertion.
+ */
+static inline void
+key_gen_insert(WT_RAND_STATE *rnd, WT_ITEM *key, uint64_t keyno)
+{
+ static const char *const suffix[15] = {
+ "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15"};
+
+ key_gen_common(key, keyno, suffix[mmrand(rnd, 0, 14)]);
+}
+
+/*
+ * lock_try_writelock
+ * Try to get exclusive lock. Fail immediately if not available.
+ */
+static inline int
+lock_try_writelock(WT_SESSION *session, RWLOCK *lock)
+{
+ testutil_assert(LOCK_INITIALIZED(lock));
+
+ if (lock->lock_type == LOCK_WT) {
+ return (__wt_try_writelock((WT_SESSION_IMPL *)session, &lock->l.wt));
+ } else {
+ return (pthread_rwlock_trywrlock(&lock->l.pthread));
+ }
+}
+
+/*
+ * lock_writelock --
+ * Wait to get exclusive lock.
+ */
+static inline void
+lock_writelock(WT_SESSION *session, RWLOCK *lock)
+{
+ testutil_assert(LOCK_INITIALIZED(lock));
+
+ if (lock->lock_type == LOCK_WT) {
+ __wt_writelock((WT_SESSION_IMPL *)session, &lock->l.wt);
+ } else {
+ testutil_check(pthread_rwlock_wrlock(&lock->l.pthread));
+ }
+}
+
+/*
+ * lock_writeunlock --
+ * Release an exclusive lock.
+ */
+static inline void
+lock_writeunlock(WT_SESSION *session, RWLOCK *lock)
+{
+ testutil_assert(LOCK_INITIALIZED(lock));
+
+ if (lock->lock_type == LOCK_WT) {
+ __wt_writeunlock((WT_SESSION_IMPL *)session, &lock->l.wt);
+ } else {
+ testutil_check(pthread_rwlock_unlock(&lock->l.pthread));
+ }
+}
diff --git a/src/third_party/wiredtiger/test/format/format.sh b/src/third_party/wiredtiger/test/format/format.sh
index 200aa5466c5..19f5df8ede4 100755
--- a/src/third_party/wiredtiger/test/format/format.sh
+++ b/src/third_party/wiredtiger/test/format/format.sh
@@ -248,8 +248,10 @@ report_failure()
dir=$1
log="$dir.log"
- skip_known_errors $log
- skip_ret=$?
+ # DO NOT CURRENTLY SKIP ANY ERRORS.
+ skip_ret=0
+ #skip_known_errors $log
+ #skip_ret=$?
echo "$name: failure status reported" > $dir/$status
[[ $skip_ret -ne 0 ]] && failure=$(($failure + 1))
@@ -318,6 +320,14 @@ resolve()
continue
}
+ # Check for Evergreen running out of disk space, and forcibly quit.
+ grep -E -i 'no space left on device' $log > /dev/null && {
+ rm -rf $dir $log
+ force_quit=1
+ echo "$name: job in $dir ran out of disk space"
+ continue
+ }
+
# Test recovery on jobs configured for random abort. */
grep 'aborting to test recovery' $log > /dev/null && {
cp -pr $dir $dir.RECOVER
@@ -424,7 +434,8 @@ format()
args=$format_args
# If abort/recovery testing is configured, do it 5% of the time.
- [[ $abort_test -ne 0 ]] && [[ $(($count_jobs % 20)) -eq 0 ]] && args="$args abort=1"
+ [[ $abort_test -ne 0 ]] &&
+ [[ $(($count_jobs % 20)) -eq 0 ]] && args="$args format.abort=1"
echo "$name: starting job in $dir ($(date))"
fi
diff --git a/src/third_party/wiredtiger/test/format/kv.c b/src/third_party/wiredtiger/test/format/kv.c
new file mode 100644
index 00000000000..6305376eaa8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/format/kv.c
@@ -0,0 +1,282 @@
+/*-
+ * Public Domain 2014-2020 MongoDB, Inc.
+ * Public Domain 2008-2014 WiredTiger, Inc.
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "format.h"
+
+/*
+ * key_init --
+ * Initialize the keys for a run.
+ */
+void
+key_init(void)
+{
+ FILE *fp;
+ size_t i;
+ uint32_t max;
+
+ /*
+ * The key is a variable length item with a leading 10-digit value. Since we have to be able
+ * re-construct it from the record number (when doing row lookups), we pre-load a set of random
+ * lengths in a lookup table, and then use the record number to choose one of the pre-loaded
+ * lengths.
+ *
+ * Read in the values during reopen.
+ */
+ if (g.reopen) {
+ if ((fp = fopen(g.home_key, "r")) == NULL)
+ testutil_die(errno, "%s", g.home_key);
+ for (i = 0; i < WT_ELEMENTS(g.key_rand_len); ++i)
+ fp_readv(fp, g.home_key, false, &g.key_rand_len[i]);
+ fclose_and_clear(&fp);
+ return;
+ }
+
+ /*
+ * Fill in the random key lengths.
+ *
+ * Focus on relatively small items, admitting the possibility of larger items. Pick a size close
+ * to the minimum most of the time, only create a larger item 1 in 20 times.
+ */
+ for (i = 0; i < WT_ELEMENTS(g.key_rand_len); ++i) {
+ max = g.c_key_max;
+ if (i % 20 != 0 && max > g.c_key_min + 20)
+ max = g.c_key_min + 20;
+ g.key_rand_len[i] = mmrand(NULL, g.c_key_min, max);
+ }
+
+ /* Write out the values for a subsequent reopen. */
+ if ((fp = fopen(g.home_key, "w")) == NULL)
+ testutil_die(errno, "%s", g.home_key);
+ for (i = 0; i < WT_ELEMENTS(g.key_rand_len); ++i)
+ fprintf(fp, "%" PRIu32 "\n", g.key_rand_len[i]);
+ fclose_and_clear(&fp);
+}
+
+/*
+ * key_gen_init --
+ * Initialize the key structures for a run.
+ */
+void
+key_gen_init(WT_ITEM *key)
+{
+ size_t i, len;
+ char *p;
+
+ len = WT_MAX(KILOBYTE(100), g.c_key_max);
+ p = dmalloc(len);
+ for (i = 0; i < len; ++i)
+ p[i] = "abcdefghijklmnopqrstuvwxyz"[i % 26];
+
+ key->mem = p;
+ key->memsize = len;
+ key->data = key->mem;
+ key->size = 0;
+}
+
+/*
+ * key_gen_teardown --
+ * Tear down the key structures.
+ */
+void
+key_gen_teardown(WT_ITEM *key)
+{
+ free(key->mem);
+ memset(key, 0, sizeof(*key));
+}
+
+/*
+ * key_gen_common --
+ * Key generation code shared between normal and insert key generation.
+ */
+void
+key_gen_common(WT_ITEM *key, uint64_t keyno, const char *const suffix)
+{
+ int len;
+ char *p;
+
+ p = key->mem;
+
+ /*
+ * The key always starts with a 10-digit string (the specified row) followed by two digits, a
+ * random number between 1 and 15 if it's an insert, otherwise 00.
+ */
+ u64_to_string_zf(keyno, key->mem, 11);
+ p[10] = '.';
+ p[11] = suffix[0];
+ p[12] = suffix[1];
+ len = 13;
+
+ /*
+ * In a column-store, the key isn't used, it doesn't need a random length.
+ */
+ if (g.type == ROW) {
+ p[len] = '/';
+
+ /*
+ * Because we're doing table lookup for key sizes, we weren't able to set really big keys
+ * sizes in the table, the table isn't big enough to keep our hash from selecting too many
+ * big keys and blowing out the cache. Handle that here, use a really big key 1 in 2500
+ * times.
+ */
+ len = keyno % 2500 == 0 && g.c_key_max < KILOBYTE(80) ?
+ KILOBYTE(80) :
+ (int)g.key_rand_len[keyno % WT_ELEMENTS(g.key_rand_len)];
+ }
+
+ key->data = key->mem;
+ key->size = (size_t)len;
+}
+
+static char *val_base; /* Base/original value */
+static uint32_t val_dup_data_len; /* Length of duplicate data items */
+static uint32_t val_len; /* Length of data items */
+
+static inline uint32_t
+value_len(WT_RAND_STATE *rnd, uint64_t keyno, uint32_t min, uint32_t max)
+{
+ /*
+ * Focus on relatively small items, admitting the possibility of larger items. Pick a size close
+ * to the minimum most of the time, only create a larger item 1 in 20 times, and a really big
+ * item 1 in somewhere around 2500 items.
+ */
+ if (keyno % 2500 == 0 && max < KILOBYTE(80)) {
+ min = KILOBYTE(80);
+ max = KILOBYTE(100);
+ } else if (keyno % 20 != 0 && max > min + 20)
+ max = min + 20;
+ return (mmrand(rnd, min, max));
+}
+
+void
+val_init(void)
+{
+ size_t i;
+
+ /* Discard any previous value initialization. */
+ free(val_base);
+ val_base = NULL;
+ val_dup_data_len = val_len = 0;
+
+ /*
+ * Set initial buffer contents to recognizable text.
+ *
+ * Add a few extra bytes in order to guarantee we can always offset into the buffer by a few
+ * extra bytes, used to generate different data for column-store run-length encoded files.
+ */
+ val_len = WT_MAX(KILOBYTE(100), g.c_value_max) + 20;
+ val_base = dmalloc(val_len);
+ for (i = 0; i < val_len; ++i)
+ val_base[i] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[i % 26];
+
+ val_dup_data_len = value_len(NULL, (uint64_t)mmrand(NULL, 1, 20), g.c_value_min, g.c_value_max);
+}
+
+void
+val_gen_init(WT_ITEM *value)
+{
+ value->mem = dmalloc(val_len);
+ value->memsize = val_len;
+ value->data = value->mem;
+ value->size = 0;
+}
+
+void
+val_gen_teardown(WT_ITEM *value)
+{
+ free(value->mem);
+ memset(value, 0, sizeof(*value));
+}
+
+void
+val_gen(WT_RAND_STATE *rnd, WT_ITEM *value, uint64_t keyno)
+{
+ char *p;
+
+ p = value->mem;
+ value->data = value->mem;
+
+ /*
+ * Fixed-length records: take the low N bits from the last digit of the record number.
+ */
+ if (g.type == FIX) {
+ switch (g.c_bitcnt) {
+ case 8:
+ p[0] = (char)mmrand(rnd, 1, 0xff);
+ break;
+ case 7:
+ p[0] = (char)mmrand(rnd, 1, 0x7f);
+ break;
+ case 6:
+ p[0] = (char)mmrand(rnd, 1, 0x3f);
+ break;
+ case 5:
+ p[0] = (char)mmrand(rnd, 1, 0x1f);
+ break;
+ case 4:
+ p[0] = (char)mmrand(rnd, 1, 0x0f);
+ break;
+ case 3:
+ p[0] = (char)mmrand(rnd, 1, 0x07);
+ break;
+ case 2:
+ p[0] = (char)mmrand(rnd, 1, 0x03);
+ break;
+ case 1:
+ p[0] = 1;
+ break;
+ }
+ value->size = 1;
+ return;
+ }
+
+ /*
+ * WiredTiger doesn't store zero-length data items in row-store files, test that by inserting a
+ * zero-length data item every so often.
+ */
+ if (keyno % 63 == 0) {
+ p[0] = '\0';
+ value->size = 0;
+ return;
+ }
+
+ /*
+ * Data items have unique leading numbers by default and random lengths; variable-length
+ * column-stores use a duplicate data value to test RLE.
+ */
+ if (g.type == VAR && mmrand(rnd, 1, 100) < g.c_repeat_data_pct) {
+ value->size = val_dup_data_len;
+ memcpy(p, val_base, value->size);
+ (void)strcpy(p, "DUPLICATEV");
+ p[10] = '/';
+ } else {
+ value->size = value_len(rnd, keyno, g.c_value_min, g.c_value_max);
+ memcpy(p, val_base, value->size);
+ u64_to_string_zf(keyno, p, 11);
+ p[10] = '/';
+ }
+}
diff --git a/src/third_party/wiredtiger/test/format/ops.c b/src/third_party/wiredtiger/test/format/ops.c
index 93d5fc204ac..78e8d1cf4b8 100644
--- a/src/third_party/wiredtiger/test/format/ops.c
+++ b/src/third_party/wiredtiger/test/format/ops.c
@@ -101,11 +101,11 @@ random_failure(void)
TINFO **tinfo_list;
/*
- * wts_ops --
+ * operations --
* Perform a number of operations in a set of threads.
*/
void
-wts_ops(u_int ops_seconds, bool lastrun)
+operations(u_int ops_seconds, bool lastrun)
{
TINFO *tinfo, total;
WT_CONNECTION *conn;
@@ -158,11 +158,13 @@ wts_ops(u_int ops_seconds, bool lastrun)
if (!SINGLETHREADED)
g.rand_log_stop = true;
- /* Logging requires a session. */
- if (g.logging)
- testutil_check(conn->open_session(conn, NULL, NULL, &session));
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
logop(session, "%s", "=============== thread ops start");
+ /* Initialize locks to single-thread backups, failures, and timestamp updates. */
+ lock_init(session, &g.backup_lock);
+ lock_init(session, &g.ts_lock);
+
/*
* Create the per-thread structures and start the worker threads. Allocate the thread structures
* separately to minimize false sharing.
@@ -195,8 +197,6 @@ wts_ops(u_int ops_seconds, bool lastrun)
testutil_check(__wt_thread_create(NULL, &alter_tid, alter, NULL));
if (g.c_backups)
testutil_check(__wt_thread_create(NULL, &backup_tid, backup, NULL));
- if (g.c_checkpoint_flag == CHECKPOINT_ON)
- testutil_check(__wt_thread_create(NULL, &checkpoint_tid, checkpoint, NULL));
if (g.c_compact)
testutil_check(__wt_thread_create(NULL, &compact_tid, compact, NULL));
if (g.c_random_cursor)
@@ -204,6 +204,19 @@ wts_ops(u_int ops_seconds, bool lastrun)
if (g.c_txn_timestamps)
testutil_check(__wt_thread_create(NULL, &timestamp_tid, timestamp, tinfo_list));
+ /*
+ * Configuring WiredTiger library checkpoints is done separately, rather than as part of the
+ * original database open because format tests small caches and you can get into cache stuck
+ * trouble during the initial load (where bulk load isn't configured). There's a single thread
+ * doing lots of inserts and creating huge leaf pages. Those pages can't be evicted if there's a
+ * checkpoint running in the tree, and the cache can get stuck. That workload is unlikely enough
+ * we're not going to fix it in the library, so configure it away by delaying checkpoint start.
+ */
+ if (g.c_checkpoint_flag == CHECKPOINT_WIREDTIGER)
+ wts_checkpoints();
+ if (g.c_checkpoint_flag == CHECKPOINT_ON)
+ testutil_check(__wt_thread_create(NULL, &checkpoint_tid, checkpoint, NULL));
+
/* Spin on the threads, calculating the totals. */
for (;;) {
/* Clear out the totals each pass. */
@@ -284,12 +297,22 @@ wts_ops(u_int ops_seconds, bool lastrun)
testutil_check(__wt_thread_join(NULL, &timestamp_tid));
g.workers_finished = false;
+ lock_destroy(session, &g.backup_lock);
+ lock_destroy(session, &g.ts_lock);
+
logop(session, "%s", "=============== thread ops stop");
- if (g.logging)
- testutil_check(session->close(session, NULL));
+ testutil_check(session->close(session, NULL));
- for (i = 0; i < g.c_threads; ++i)
- free(tinfo_list[i]);
+ for (i = 0; i < g.c_threads; ++i) {
+ tinfo = tinfo_list[i];
+
+ /*
+ * Assert records were not removed unless configured to do so, otherwise subsequent runs can
+ * incorrectly report scan errors.
+ */
+ testutil_assert(g.c_delete_pct != 0 || tinfo->remove == 0);
+ free(tinfo);
+ }
free(tinfo_list);
}
@@ -353,13 +376,13 @@ begin_transaction_ts(TINFO *tinfo, u_int *iso_configp)
*
* Lock out the oldest timestamp update.
*/
- testutil_check(pthread_rwlock_wrlock(&g.ts_lock));
+ lock_writelock(session, &g.ts_lock);
ts = __wt_atomic_addv64(&g.timestamp, 1);
testutil_check(__wt_snprintf(buf, sizeof(buf), "read_timestamp=%" PRIx64, ts));
testutil_check(session->timestamp_transaction(session, buf));
- testutil_check(pthread_rwlock_unlock(&g.ts_lock));
+ lock_writeunlock(session, &g.ts_lock);
snap_init(tinfo, ts, false);
logop(session, "begin snapshot read-ts=%" PRIu64 " (not repeatable)", ts);
@@ -424,7 +447,7 @@ commit_transaction(TINFO *tinfo, bool prepared)
ts = 0; /* -Wconditional-uninitialized */
if (g.c_txn_timestamps) {
/* Lock out the oldest timestamp update. */
- testutil_check(pthread_rwlock_wrlock(&g.ts_lock));
+ lock_writelock(session, &g.ts_lock);
ts = __wt_atomic_addv64(&g.timestamp, 1);
testutil_check(__wt_snprintf(buf, sizeof(buf), "commit_timestamp=%" PRIx64, ts));
@@ -435,7 +458,7 @@ commit_transaction(TINFO *tinfo, bool prepared)
testutil_check(session->timestamp_transaction(session, buf));
}
- testutil_check(pthread_rwlock_unlock(&g.ts_lock));
+ lock_writeunlock(session, &g.ts_lock);
}
testutil_check(session->commit_transaction(session, NULL));
@@ -473,7 +496,7 @@ prepare_transaction(TINFO *tinfo)
{
WT_DECL_RET;
WT_SESSION *session;
- uint64_t ts;
+ uint64_t longwait, pause_ms, ts;
char buf[64];
session = tinfo->session;
@@ -490,7 +513,7 @@ prepare_transaction(TINFO *tinfo)
*
* Lock out the oldest timestamp update.
*/
- testutil_check(pthread_rwlock_wrlock(&g.ts_lock));
+ lock_writelock(session, &g.ts_lock);
ts = __wt_atomic_addv64(&g.timestamp, 1);
testutil_check(__wt_snprintf(buf, sizeof(buf), "prepare_timestamp=%" PRIx64, ts));
@@ -498,8 +521,21 @@ prepare_transaction(TINFO *tinfo)
logop(session, "prepare ts=%" PRIu64, ts);
- testutil_check(pthread_rwlock_unlock(&g.ts_lock));
+ lock_writeunlock(session, &g.ts_lock);
+ /*
+ * Sometimes add a delay after prepare to induce extra memory stress. For 80% of the threads,
+ * there is never a delay, so there is always a dedicated set of threads trying to do work. For
+ * the other 20%, we'll sometimes delay. For these threads, 99% of the time, proceed without
+ * delay. The rest of the time, pause up to 5 seconds, weighted toward the smaller delays.
+ */
+ if (tinfo->id % 5 == 0) {
+ longwait = mmrand(&tinfo->rnd, 0, 999);
+ if (longwait < 10) {
+ pause_ms = mmrand(&tinfo->rnd, 1, 10) << longwait;
+ __wt_sleep(0, pause_ms * WT_THOUSAND);
+ }
+ }
return (ret);
}
@@ -532,10 +568,10 @@ prepare_transaction(TINFO *tinfo)
* When in a transaction on the live table with snapshot isolation, track operations for later
* repetition.
*/
-#define SNAP_TRACK(tinfo, op) \
- do { \
- if (intxn && !ckpt_handle && iso_config == ISOLATION_SNAPSHOT) \
- snap_track(tinfo, op); \
+#define SNAP_TRACK(tinfo, op) \
+ do { \
+ if (intxn && iso_config == ISOLATION_SNAPSHOT) \
+ snap_track(tinfo, op); \
} while (0)
/*
@@ -543,7 +579,7 @@ prepare_transaction(TINFO *tinfo)
* Create a new session/cursor pair for the thread.
*/
static void
-ops_open_session(TINFO *tinfo, bool *ckpt_handlep)
+ops_open_session(TINFO *tinfo)
{
WT_CONNECTION *conn;
WT_CURSOR *cursor;
@@ -559,38 +595,13 @@ ops_open_session(TINFO *tinfo, bool *ckpt_handlep)
testutil_check(conn->open_session(conn, NULL, NULL, &session));
/*
- * 10% of the time, perform some read-only operations from a checkpoint.
- * Skip if we are using data-sources or LSM, they don't support reading
- * from checkpoints.
+ * Configure "append", in the case of column stores, we append when inserting new rows.
+ *
+ * WT_SESSION.open_cursor can return EBUSY if concurrent with a metadata operation, retry.
*/
- cursor = NULL;
- if (!DATASOURCE("lsm") && mmrand(&tinfo->rnd, 1, 10) == 1) {
- /*
- * WT_SESSION.open_cursor can return EBUSY if concurrent with a metadata operation, retry.
- */
- while ((ret = session->open_cursor(
- session, g.uri, NULL, "checkpoint=WiredTigerCheckpoint", &cursor)) == EBUSY)
- __wt_yield();
-
- /*
- * If the checkpoint hasn't been created yet, ignore the error.
- */
- if (ret != ENOENT) {
- testutil_check(ret);
- *ckpt_handlep = true;
- }
- }
- if (cursor == NULL) {
- /*
- * Configure "append", in the case of column stores, we append when inserting new rows.
- *
- * WT_SESSION.open_cursor can return EBUSY if concurrent with a metadata operation, retry.
- */
- while ((ret = session->open_cursor(session, g.uri, NULL, "append", &cursor)) == EBUSY)
- __wt_yield();
- testutil_checkfmt(ret, "%s", g.uri);
- *ckpt_handlep = false;
- }
+ while ((ret = session->open_cursor(session, g.uri, NULL, "append", &cursor)) == EBUSY)
+ __wt_yield();
+ testutil_checkfmt(ret, "%s", g.uri);
tinfo->session = session;
tinfo->cursor = cursor;
@@ -611,12 +622,11 @@ ops(void *arg)
uint64_t reset_op, session_op, truncate_op;
uint32_t range, rnd;
u_int i, j, iso_config;
- bool ckpt_handle, greater_than, intxn, next, positioned, prepared;
+ bool greater_than, intxn, next, positioned, prepared;
tinfo = arg;
iso_config = ISOLATION_RANDOM; /* -Wconditional-uninitialized */
- ckpt_handle = false; /* -Wconditional-uninitialized */
/* Tracking of transactional snapshot isolation operations. */
tinfo->snap = tinfo->snap_first = tinfo->snap_list;
@@ -651,7 +661,7 @@ ops(void *arg)
intxn = false;
}
- ops_open_session(tinfo, &ckpt_handle);
+ ops_open_session(tinfo);
/* Pick the next session/cursor close/open. */
session_op += mmrand(&tinfo->rnd, 100, 5000);
@@ -676,7 +686,7 @@ ops(void *arg)
* If not in a transaction, have a live handle and running in a timestamp world,
* occasionally repeat a timestamped operation.
*/
- if (!intxn && !ckpt_handle && g.c_txn_timestamps && mmrand(&tinfo->rnd, 1, 15) == 1) {
+ if (!intxn && g.c_txn_timestamps && mmrand(&tinfo->rnd, 1, 15) == 1) {
++tinfo->search;
snap_repeat_single(cursor, tinfo);
}
@@ -695,22 +705,20 @@ ops(void *arg)
/* Select an operation. */
op = READ;
- if (!ckpt_handle) {
- i = mmrand(&tinfo->rnd, 1, 100);
- if (i < g.c_delete_pct && tinfo->ops > truncate_op) {
- op = TRUNCATE;
-
- /* Pick the next truncate operation. */
- truncate_op += mmrand(&tinfo->rnd, 20000, 100000);
- } else if (i < g.c_delete_pct)
- op = REMOVE;
- else if (i < g.c_delete_pct + g.c_insert_pct)
- op = INSERT;
- else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct)
- op = MODIFY;
- else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct + g.c_write_pct)
- op = UPDATE;
- }
+ i = mmrand(&tinfo->rnd, 1, 100);
+ if (i < g.c_delete_pct && tinfo->ops > truncate_op) {
+ op = TRUNCATE;
+
+ /* Pick the next truncate operation. */
+ truncate_op += mmrand(&tinfo->rnd, 20000, 100000);
+ } else if (i < g.c_delete_pct)
+ op = REMOVE;
+ else if (i < g.c_delete_pct + g.c_insert_pct)
+ op = INSERT;
+ else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct)
+ op = MODIFY;
+ else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct + g.c_write_pct)
+ op = UPDATE;
/* Select a row. */
tinfo->keyno = mmrand(&tinfo->rnd, 1, (u_int)g.rows);
@@ -735,7 +743,7 @@ ops(void *arg)
* Optionally reserve a row. Reserving a row before a read isn't all that sensible, but not
* unexpected, either.
*/
- if (intxn && !ckpt_handle && mmrand(&tinfo->rnd, 0, 20) == 1) {
+ if (intxn && mmrand(&tinfo->rnd, 0, 20) == 1) {
switch (g.type) {
case ROW:
ret = row_reserve(tinfo, cursor, positioned);
@@ -814,7 +822,6 @@ ops(void *arg)
READ_OP_FAILED(true);
break;
case REMOVE:
-remove_instead_of_truncate:
switch (g.type) {
case ROW:
ret = row_remove(tinfo, cursor, positioned);
@@ -841,7 +848,7 @@ remove_instead_of_truncate:
*/
if (__wt_atomic_addv64(&g.truncate_cnt, 1) > 2) {
(void)__wt_atomic_subv64(&g.truncate_cnt, 1);
- goto remove_instead_of_truncate;
+ goto update_instead_of_chosen_op;
}
if (!positioned)
@@ -955,7 +962,7 @@ update_instead_of_chosen_op:
* Ending a transaction. If on a live handle and the transaction was configured for snapshot
* isolation, repeat the operations and confirm the results are unchanged.
*/
- if (intxn && !ckpt_handle && iso_config == ISOLATION_SNAPSHOT) {
+ if (intxn && iso_config == ISOLATION_SNAPSHOT) {
__wt_yield(); /* Encourage races */
ret = snap_repeat_txn(cursor, tinfo);
@@ -1051,7 +1058,7 @@ wts_read_scan(void)
testutil_check(ret);
/* Check a random subset of the records using the key. */
- for (keyno = 0; keyno < g.key_cnt;) {
+ for (keyno = 0; keyno < g.rows;) {
keyno += mmrand(NULL, 1, 1000);
if (keyno > g.rows)
keyno = g.rows;
@@ -1208,9 +1215,12 @@ nextprev(TINFO *tinfo, WT_CURSOR *cursor, bool next)
/*
* Compare the returned key with the previously returned key, and assert the order is
* correct. If not deleting keys, and the rows aren't in the column-store insert name space,
- * also assert we don't skip groups of records (that's a page-split bug symptom).
+ * also assert we don't skip groups of records (that's a page-split bug symptom). Note a
+ * previous run that performed salvage might have corrupted a chunk of space such that
+ * records were removed. If this is a reopen of an existing database, assume salvage might
+ * have happened.
*/
- record_gaps = g.c_delete_pct != 0;
+ record_gaps = g.c_delete_pct != 0 || g.reopen;
switch (g.type) {
case FIX:
case VAR:
@@ -1240,8 +1250,8 @@ order_error_col:
if (!record_gaps) {
/*
* Convert the keys to record numbers and then compare less-than-or-equal. (Not
- * less-than, row-store inserts new rows in-between rows by append a new suffix to
- * the row's key.)
+ * less-than, row-store inserts new rows in-between rows by appending a new suffix
+ * to the row's key.)
*/
testutil_check(__wt_buf_fmt((WT_SESSION_IMPL *)cursor->session, tinfo->tbuf, "%.*s",
(int)tinfo->key->size, (char *)tinfo->key->data));
diff --git a/src/third_party/wiredtiger/test/format/rebalance.c b/src/third_party/wiredtiger/test/format/rebalance.c
index c5d804c77c9..ea5d38a6d16 100644
--- a/src/third_party/wiredtiger/test/format/rebalance.c
+++ b/src/third_party/wiredtiger/test/format/rebalance.c
@@ -28,12 +28,16 @@
#include "format.h"
+#define REBALANCE_COPY_CMD "../../wt -h %s dump -f %s/REBALANCE.%s %s"
+#define REBALANCE_CMP_CMD "cmp %s/REBALANCE.orig %s/REBALANCE.new > /dev/null"
+
void
wts_rebalance(void)
{
WT_CONNECTION *conn;
WT_SESSION *session;
- char cmd[1024];
+ size_t len;
+ char *cmd;
if (g.c_rebalance == 0)
return;
@@ -41,9 +45,9 @@ wts_rebalance(void)
track("rebalance", 0ULL, NULL);
/* Dump the current object */
- testutil_check(__wt_snprintf(cmd, sizeof(cmd), ".." DIR_DELIM_STR ".." DIR_DELIM_STR "wt"
- " -h %s dump -f %s/rebalance.orig %s",
- g.home, g.home, g.uri));
+ len = strlen(g.home) * 2 + strlen(g.uri) + strlen(REBALANCE_COPY_CMD) + 100;
+ cmd = dmalloc(len);
+ testutil_check(__wt_snprintf(cmd, len, REBALANCE_COPY_CMD, g.home, g.home, "orig", g.uri));
testutil_checkfmt(system(cmd), "command failed: %s", cmd);
/* Rebalance, then verify the object. */
@@ -59,18 +63,12 @@ wts_rebalance(void)
wts_verify("post-rebalance verify");
wts_close();
- testutil_check(__wt_snprintf(cmd, sizeof(cmd), ".." DIR_DELIM_STR ".." DIR_DELIM_STR "wt"
- " -h %s dump -f %s/rebalance.new %s",
- g.home, g.home, g.uri));
+ testutil_check(__wt_snprintf(cmd, len, REBALANCE_COPY_CMD, g.home, g.home, "new", g.uri));
testutil_checkfmt(system(cmd), "command failed: %s", cmd);
-/* Compare the old/new versions of the object. */
-#ifdef _WIN32
- testutil_check(__wt_snprintf(
- cmd, sizeof(cmd), "fc /b %s\\rebalance.orig %s\\rebalance.new > NUL", g.home, g.home));
-#else
- testutil_check(__wt_snprintf(
- cmd, sizeof(cmd), "cmp %s/rebalance.orig %s/rebalance.new > /dev/null", g.home, g.home));
-#endif
+ /* Compare the old/new versions of the object. */
+ testutil_check(__wt_snprintf(cmd, len, REBALANCE_CMP_CMD, g.home, g.home));
testutil_checkfmt(system(cmd), "command failed: %s", cmd);
+
+ free(cmd);
}
diff --git a/src/third_party/wiredtiger/test/format/recover.sh b/src/third_party/wiredtiger/test/format/recover.sh
index 4177e26a278..1fb94b6d27f 100644
--- a/src/third_party/wiredtiger/test/format/recover.sh
+++ b/src/third_party/wiredtiger/test/format/recover.sh
@@ -32,7 +32,7 @@ while true; do
fi
rm -rf $rundir2
- $tcmd $config -q abort=1 logging=1 timer=$timer
+ $tcmd $config -q format.abort=1 logging=1 runs.timer=$timer
# Save a copy of the database directory exactly as it was at the crash.
cp -rp RUNDIR $rundir2
diff --git a/src/third_party/wiredtiger/test/format/salvage.c b/src/third_party/wiredtiger/test/format/salvage.c
index cf7955e495f..b9aca837215 100644
--- a/src/third_party/wiredtiger/test/format/salvage.c
+++ b/src/third_party/wiredtiger/test/format/salvage.c
@@ -69,24 +69,14 @@ corrupt(void)
*/
testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/%s", g.home, WT_NAME));
if ((fd = open(buf, O_RDWR)) != -1) {
-#ifdef _WIN32
testutil_check(__wt_snprintf(copycmd, sizeof(copycmd),
- "copy %s\\%s %s\\slvg.copy\\%s.corrupted", g.home, WT_NAME, g.home, WT_NAME));
-#else
- testutil_check(__wt_snprintf(copycmd, sizeof(copycmd), "cp %s/%s %s/slvg.copy/%s.corrupted",
- g.home, WT_NAME, g.home, WT_NAME));
-#endif
+ "cp %s/%s %s/SALVAGE.copy/%s.corrupted", g.home, WT_NAME, g.home, WT_NAME));
goto found;
}
testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/%s.wt", g.home, WT_NAME));
if ((fd = open(buf, O_RDWR)) != -1) {
-#ifdef _WIN32
testutil_check(__wt_snprintf(copycmd, sizeof(copycmd),
- "copy %s\\%s.wt %s\\slvg.copy\\%s.wt.corrupted", g.home, WT_NAME, g.home, WT_NAME));
-#else
- testutil_check(__wt_snprintf(copycmd, sizeof(copycmd),
- "cp %s/%s.wt %s/slvg.copy/%s.wt.corrupted", g.home, WT_NAME, g.home, WT_NAME));
-#endif
+ "cp %s/%s.wt %s/SALVAGE.copy/%s.wt.corrupted", g.home, WT_NAME, g.home, WT_NAME));
goto found;
}
return (0);
@@ -97,7 +87,7 @@ found:
offset = mmrand(NULL, 0, (u_int)sb.st_size);
len = (size_t)(20 + (sb.st_size / 100) * 2);
- testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/slvg.corrupt", g.home));
+ testutil_check(__wt_snprintf(buf, sizeof(buf), "%s/SALVAGE.corrupt", g.home));
if ((fp = fopen(buf, "w")) == NULL)
testutil_die(errno, "salvage-corrupt: open: %s", buf);
(void)fprintf(fp, "salvage-corrupt: offset %" PRIuMAX ", length %" WT_SIZET_FMT "\n",
@@ -127,6 +117,17 @@ found:
}
/*
+ * Salvage command, save the interesting files so we can replay the salvage command as necessary.
+ *
+ * Redirect the "cd" command to /dev/null so chatty cd implementations don't add the new working
+ * directory to our output.
+ */
+#define SALVAGE_COPY_CMD \
+ "cd %s > /dev/null && " \
+ "rm -rf SALVAGE.copy && mkdir SALVAGE.copy && " \
+ "cp WiredTiger* wt* SALVAGE.copy/"
+
+/*
* wts_salvage --
* Salvage testing.
*/
@@ -134,15 +135,19 @@ void
wts_salvage(void)
{
WT_DECL_RET;
+ size_t len;
+ char *cmd;
if (g.c_salvage == 0)
return;
- /*
- * Save a copy of the interesting files so we can replay the salvage step as necessary.
- */
- if ((ret = system(g.home_salvage_copy)) != 0)
- testutil_die(ret, "salvage copy step failed");
+ /* Save a copy of the interesting files so we can replay the salvage step as necessary. */
+ len = strlen(g.home) + strlen(SALVAGE_COPY_CMD) + 1;
+ cmd = dmalloc(len);
+ testutil_check(__wt_snprintf(cmd, len, SALVAGE_COPY_CMD, g.home));
+ if ((ret = system(cmd)) != 0)
+ testutil_die(ret, "salvage copy (\"%s\"), failed", cmd);
+ free(cmd);
/* Salvage, then verify. */
wts_open(g.home, true, &g.wts_conn);
diff --git a/src/third_party/wiredtiger/test/format/smoke.sh b/src/third_party/wiredtiger/test/format/smoke.sh
index 309cedbc5ac..06bf108dbd2 100755
--- a/src/third_party/wiredtiger/test/format/smoke.sh
+++ b/src/third_party/wiredtiger/test/format/smoke.sh
@@ -3,11 +3,17 @@
set -e
# Smoke-test format as part of running "make check".
-args="-1 -c "." data_source=table ops=50000 rows=10000 threads=4 compression=none logging_compression=none"
+args="-1 -c . "
+args="$args btree.compression=none "
+args="$args logging_compression=none"
+args="$args runs.ops=50000 "
+args="$args runs.rows=10000 "
+args="$args runs.source=table "
+args="$args runs.threads=4 "
-$TEST_WRAPPER ./t $args file_type=fix
-$TEST_WRAPPER ./t $args file_type=row
-$TEST_WRAPPER ./t $args file_type=row data_source=lsm
-$TEST_WRAPPER ./t $args file_type=var
+$TEST_WRAPPER ./t $args runs.type=fix
+$TEST_WRAPPER ./t $args runs.type=row
+$TEST_WRAPPER ./t $args runs.type=row runs.source=lsm
+$TEST_WRAPPER ./t $args runs.type=var
# Force a rebalance to occur with statistics logging to test the utility
-$TEST_WRAPPER ./t $args file_type=row statistics_server=1 rebalance=1
+$TEST_WRAPPER ./t $args runs.type=row statistics.server=1 ops.rebalance=1
diff --git a/src/third_party/wiredtiger/test/format/snap.c b/src/third_party/wiredtiger/test/format/snap.c
index 96c23fd4afd..35fcf20791f 100644
--- a/src/third_party/wiredtiger/test/format/snap.c
+++ b/src/third_party/wiredtiger/test/format/snap.c
@@ -231,18 +231,20 @@ snap_verify(WT_CURSOR *cursor, TINFO *tinfo, SNAP_OPS *snap)
* We have a mismatch. Try to print out as much information as we can. In doing so, we are
* calling into the debug code directly and that does not take locks, so it's possible we will
* simply drop core. The most important information is the key/value mismatch information. Then
- * try to dump out the other information. Right now we dump the entire lookaside table including
- * what is on disk. That can potentially be very large. If it becomes a problem, this can be
- * modified to just dump out the page this key is on. Write a failure message into the log file
- * first so format.sh knows we failed, and turn off core dumps.
+ * try to dump out the other information. Right now we dump the entire history store table
+ * including what is on disk. That can potentially be very large. If it becomes a problem, this
+ * can be modified to just dump out the page this key is on. Write a failure message into the
+ * log file first so format.sh knows we failed, and turn off core dumps.
*/
fprintf(stderr, "\n%s: run FAILED\n", progname);
set_core_off();
fprintf(stderr, "snapshot-isolation error: Dumping page to %s\n", g.home_pagedump);
testutil_check(__wt_debug_cursor_page(cursor, g.home_pagedump));
- fprintf(stderr, "snapshot-isolation error: Dumping LAS to %s\n", g.home_lasdump);
- testutil_check(__wt_debug_cursor_las(cursor, g.home_lasdump));
+ fprintf(stderr, "snapshot-isolation error: Dumping HS to %s\n", g.home_hsdump);
+#if WIREDTIGER_VERSION_MAJOR >= 10
+ testutil_check(__wt_debug_cursor_tree_hs(cursor, g.home_hsdump));
+#endif
if (g.logging)
testutil_check(cursor->session->log_flush(cursor->session, "sync=off"));
#endif
diff --git a/src/third_party/wiredtiger/test/format/t.c b/src/third_party/wiredtiger/test/format/t.c
index 8b7ba07fe9e..b596124087b 100644
--- a/src/third_party/wiredtiger/test/format/t.c
+++ b/src/third_party/wiredtiger/test/format/t.c
@@ -31,7 +31,6 @@
GLOBAL g;
static void format_die(void);
-static void startup(void);
static void usage(void) WT_GCC_FUNC_DECL_ATTRIBUTE((noreturn));
extern int __wt_optind;
@@ -99,6 +98,40 @@ set_alarm(u_int seconds)
}
/*
+ * format_process_env --
+ * Set up the format process environment.
+ */
+static void
+format_process_env(void)
+{
+/*
+ * Windows and Linux support different sets of signals, be conservative about installing handlers.
+ * If we time out unexpectedly, we want a core dump, otherwise, just exit.
+ */
+#ifdef SIGALRM
+ (void)signal(SIGALRM, signal_timer);
+#endif
+#ifdef SIGHUP
+ (void)signal(SIGHUP, signal_handler);
+#endif
+#ifdef SIGTERM
+ (void)signal(SIGTERM, signal_handler);
+#endif
+
+ /* Initialize lock to ensure single threading during failure handling */
+ testutil_check(pthread_rwlock_init(&g.death_lock, NULL));
+
+#if 0
+ /* Configure the GNU malloc for debugging. */
+ (void)setenv("MALLOC_CHECK_", "2", 1);
+#endif
+#if 0
+ /* Configure the FreeBSD malloc for debugging. */
+ (void)setenv("MALLOC_OPTIONS", "AJ", 1);
+#endif
+}
+
+/*
* TIMED_MAJOR_OP --
* Set a timer and perform a major operation (for example, verify or salvage).
*/
@@ -116,8 +149,9 @@ main(int argc, char *argv[])
{
uint64_t now, start;
u_int ops_seconds;
- int ch, onerun, reps;
+ int ch, reps;
const char *config, *home;
+ bool one_flag, quiet_flag;
custom_die = format_die; /* Local death handler. */
@@ -125,44 +159,23 @@ main(int argc, char *argv[])
(void)testutil_set_progname(argv);
-/*
- * Windows and Linux support different sets of signals, be conservative about installing handlers.
- * If we time out unexpectedly, we want a core dump, otherwise, just exit.
- */
-#ifdef SIGALRM
- (void)signal(SIGALRM, signal_timer);
-#endif
-#ifdef SIGHUP
- (void)signal(SIGHUP, signal_handler);
-#endif
-#ifdef SIGTERM
- (void)signal(SIGTERM, signal_handler);
-#endif
-
-#if 0
- /* Configure the GNU malloc for debugging. */
- (void)setenv("MALLOC_CHECK_", "2", 1);
-#endif
-#if 0
- /* Configure the FreeBSD malloc for debugging. */
- (void)setenv("MALLOC_OPTIONS", "AJ", 1);
-#endif
-
- /* Track progress unless we're re-directing output to a file. */
- g.c_quiet = isatty(1) ? 0 : 1;
+ format_process_env();
/* Set values from the command line. */
home = NULL;
- onerun = 0;
- while ((ch = __wt_getopt(progname, argc, argv, "1C:c:h:lqrt:")) != EOF)
+ one_flag = quiet_flag = false;
+ while ((ch = __wt_getopt(progname, argc, argv, "1BC:c:h:lqRrt:")) != EOF)
switch (ch) {
- case '1': /* One run */
- onerun = 1;
+ case '1': /* One run and quit */
+ one_flag = true;
+ break;
+ case 'B': /* Backward compatibility */
+ g.backward_compatible = true;
break;
case 'C': /* wiredtiger_open config */
g.config_open = __wt_optarg;
break;
- case 'c': /* Configuration from a file */
+ case 'c': /* Read configuration from a file */
config = __wt_optarg;
break;
case 'h':
@@ -172,9 +185,12 @@ main(int argc, char *argv[])
g.logging = true;
break;
case 'q': /* Quiet */
- g.c_quiet = 1;
+ quiet_flag = true;
+ break;
+ case 'R': /* Reopen (start running on an existing database) */
+ g.reopen = true;
break;
- case 'r': /* Replay a run */
+ case 'r': /* Replay a run (use the configuration and random numbers from a previous run) */
g.replay = true;
break;
default:
@@ -182,67 +198,58 @@ main(int argc, char *argv[])
}
argv += __wt_optind;
- /* Initialize the global RNG. */
- __wt_random_init_seed(NULL, &g.rnd);
-
/* Set up paths. */
path_setup(home);
- /* If it's a replay, use the home directory's CONFIG file. */
- if (g.replay) {
+ /*
+ * If it's a replay or a reopen, use the already existing home directory's CONFIG file.
+ *
+ * If we weren't given a configuration file, set values from "CONFIG", if it exists. Small hack
+ * to ignore any CONFIG file named ".", that just makes it possible to ignore any local CONFIG
+ * file, used when running checks.
+ */
+ if (g.reopen || g.replay) {
if (config != NULL)
- testutil_die(EINVAL, "-c incompatible with -r");
+ testutil_die(EINVAL, "-c incompatible with -R or -r");
if (access(g.home_config, R_OK) != 0)
testutil_die(ENOENT, "%s", g.home_config);
config = g.home_config;
}
-
- /*
- * If we weren't given a configuration file, set values from "CONFIG", if it exists.
- *
- * Small hack to ignore any CONFIG file named ".", that just makes it possible to ignore any
- * local CONFIG file, used when running checks.
- */
if (config == NULL && access("CONFIG", R_OK) == 0)
config = "CONFIG";
if (config != NULL && strcmp(config, ".") != 0)
config_file(config);
/*
- * The rest of the arguments are individual configurations that modify the base configuration.
+ * Remaining arguments are individual configurations that modify the base configuration. Note
+ * there's no restriction on command-line arguments when re-playing or re-opening a database,
+ * which can lead to a lot of hurt if you're not careful.
*/
for (; *argv != NULL; ++argv)
config_single(*argv, true);
/*
- * Multithreaded runs can be replayed: it's useful and we'll get the configuration correct.
- * Obviously the order of operations changes, warn the user.
+ * Let the command line -1 and -q flags override values configured from other sources.
+ * Regardless, don't go all verbose if we're not talking to a terminal.
*/
- if (g.replay && !SINGLETHREADED)
- printf("Warning: replaying a threaded run\n");
+ if (one_flag)
+ g.c_runs = 1;
+ if (quiet_flag || !isatty(1))
+ g.c_quiet = 1;
/*
+ * Multithreaded runs can be replayed: it's useful and we'll get the configuration correct.
+ * Obviously the order of operations changes, warn the user.
+ *
* Single-threaded runs historically exited after a single replay, which makes sense when you're
* debugging, leave that semantic in place.
*/
+ if (g.replay && !SINGLETHREADED)
+ printf("Warning: replaying a multi-threaded run\n");
if (g.replay && SINGLETHREADED)
g.c_runs = 1;
/*
- * Let the command line -1 flag override runs configured from other sources.
- */
- if (onerun)
- g.c_runs = 1;
-
- /*
- * Initialize locks to single-thread named checkpoints and backups, last last-record updates,
- * and failures.
- */
- testutil_check(pthread_rwlock_init(&g.backup_lock, NULL));
- testutil_check(pthread_rwlock_init(&g.death_lock, NULL));
- testutil_check(pthread_rwlock_init(&g.ts_lock, NULL));
-
- /*
* Calculate how long each operations loop should run. Take any timer value and convert it to
* seconds, then allocate 15 seconds to do initialization, verification, rebalance and/or
* salvage tasks after the operations loop finishes. This is not intended to be exact in any
@@ -254,30 +261,35 @@ main(int argc, char *argv[])
*/
ops_seconds = g.c_timer == 0 ? 0 : ((g.c_timer * 60) - 15) / FORMAT_OPERATION_REPS;
+ __wt_random_init_seed(NULL, &g.rnd); /* Initialize the RNG. */
+
printf("%s: process %" PRIdMAX " running\n", progname, (intmax_t)getpid());
fflush(stdout);
while (++g.run_cnt <= g.c_runs || g.c_runs == 0) {
__wt_seconds(NULL, &start);
+ track("starting up", 0ULL, NULL);
- startup(); /* Start a run */
- config_setup(); /* Run configuration */
- config_print(false); /* Dump run configuration */
- key_init(); /* Setup keys/values */
- val_init();
+ if (!g.reopen)
+ wts_create(); /* Create and initialize the database and an object. */
- track("starting up", 0ULL, NULL);
+ config_final(); /* Remaining configuration and validation */
+
+ handle_init();
+
+ if (g.reopen)
+ wts_reopen(); /* Reopen existing database. */
+ else {
+ wts_open(g.home, true, &g.wts_conn);
+ wts_init();
+ TIMED_MAJOR_OP(wts_load()); /* Load and verify initial records */
+ TIMED_MAJOR_OP(wts_verify("post-bulk verify"));
+ }
- /* Load and verify initial records */
- wts_open(g.home, true, &g.wts_conn);
- wts_init();
- TIMED_MAJOR_OP(wts_load());
- TIMED_MAJOR_OP(wts_verify("post-bulk verify"));
TIMED_MAJOR_OP(wts_read_scan());
/* Operations. */
- wts_checkpoints();
for (reps = 1; reps <= FORMAT_OPERATION_REPS; ++reps)
- wts_ops(ops_seconds, reps == FORMAT_OPERATION_REPS);
+ operations(ops_seconds, reps == FORMAT_OPERATION_REPS);
/* Copy out the run's statistics. */
TIMED_MAJOR_OP(wts_stats());
@@ -301,6 +313,8 @@ main(int argc, char *argv[])
*/
TIMED_MAJOR_OP(wts_salvage());
+ handle_teardown();
+
/* Overwrite the progress line with a completion line. */
if (!g.c_quiet)
printf("\r%78s\r", " ");
@@ -310,16 +324,8 @@ main(int argc, char *argv[])
fflush(stdout);
}
- /* Flush/close any logging information. */
- fclose_and_clear(&g.logfp);
- fclose_and_clear(&g.randfp);
-
config_print(false);
- testutil_check(pthread_rwlock_destroy(&g.backup_lock));
- testutil_check(pthread_rwlock_destroy(&g.death_lock));
- testutil_check(pthread_rwlock_destroy(&g.ts_lock));
-
config_clear();
printf("%s: successful run completed\n", progname);
@@ -328,32 +334,6 @@ main(int argc, char *argv[])
}
/*
- * startup --
- * Initialize for a run.
- */
-static void
-startup(void)
-{
- WT_DECL_RET;
-
- /* Flush/close any logging information. */
- fclose_and_clear(&g.logfp);
- fclose_and_clear(&g.randfp);
-
- /* Create or initialize the home and data-source directories. */
- if ((ret = system(g.home_init)) != 0)
- testutil_die(ret, "home directory initialization failed");
-
- /* Open/truncate the logging file. */
- if (g.logging && (g.logfp = fopen(g.home_log, "w")) == NULL)
- testutil_die(errno, "fopen: %s", g.home_log);
-
- /* Open/truncate the random number logging file. */
- if ((g.randfp = fopen(g.home_rand, g.replay ? "r" : "w")) == NULL)
- testutil_die(errno, "%s", g.home_rand);
-}
-
-/*
* die --
* Report an error, dumping the configuration.
*/
@@ -393,17 +373,19 @@ static void
usage(void)
{
fprintf(stderr,
- "usage: %s [-1lqr] [-C wiredtiger-config]\n "
+ "usage: %s [-1BlqRr] [-C wiredtiger-config]\n "
"[-c config-file] [-h home] [name=value ...]\n",
progname);
fprintf(stderr, "%s",
- "\t-1 run once\n"
+ "\t-1 run once then quit\n"
+ "\t-B maintain 3.3 release log and configuration option compatibility\n"
"\t-C specify wiredtiger_open configuration arguments\n"
- "\t-c read test program configuration from a file\n"
- "\t-h home (default 'RUNDIR')\n"
+ "\t-c read test program configuration from a file (default 'CONFIG')\n"
+ "\t-h home directory (default 'RUNDIR')\n"
"\t-l log operations to a file\n"
"\t-q run quietly\n"
- "\t-r replay the last run\n");
+ "\t-R run on an existing database\n"
+ "\t-r replay the last run from the home directory configuration\n");
config_error();
exit(EXIT_FAILURE);
diff --git a/src/third_party/wiredtiger/test/format/util.c b/src/third_party/wiredtiger/test/format/util.c
index 8a3bdb94693..7aba99c20de 100644
--- a/src/third_party/wiredtiger/test/format/util.c
+++ b/src/third_party/wiredtiger/test/format/util.c
@@ -28,244 +28,6 @@
#include "format.h"
-#ifndef MAX
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
-void
-key_init(void)
-{
- size_t i;
- uint32_t max;
-
- /*
- * The key is a variable length item with a leading 10-digit value.
- * Since we have to be able re-construct it from the record number
- * (when doing row lookups), we pre-load a set of random lengths in
- * a lookup table, and then use the record number to choose one of
- * the pre-loaded lengths.
- *
- * Fill in the random key lengths.
- *
- * Focus on relatively small items, admitting the possibility of larger
- * items. Pick a size close to the minimum most of the time, only create
- * a larger item 1 in 20 times.
- */
- for (i = 0; i < sizeof(g.key_rand_len) / sizeof(g.key_rand_len[0]); ++i) {
- max = g.c_key_max;
- if (i % 20 != 0 && max > g.c_key_min + 20)
- max = g.c_key_min + 20;
- g.key_rand_len[i] = mmrand(NULL, g.c_key_min, max);
- }
-}
-
-void
-key_gen_init(WT_ITEM *key)
-{
- size_t i, len;
- char *p;
-
- len = MAX(KILOBYTE(100), g.c_key_max);
- p = dmalloc(len);
- for (i = 0; i < len; ++i)
- p[i] = "abcdefghijklmnopqrstuvwxyz"[i % 26];
-
- key->mem = p;
- key->memsize = len;
- key->data = key->mem;
- key->size = 0;
-}
-
-void
-key_gen_teardown(WT_ITEM *key)
-{
- free(key->mem);
- memset(key, 0, sizeof(*key));
-}
-
-static void
-key_gen_common(WT_ITEM *key, uint64_t keyno, const char *const suffix)
-{
- int len;
- char *p;
-
- p = key->mem;
-
- /*
- * The key always starts with a 10-digit string (the specified row) followed by two digits, a
- * random number between 1 and 15 if it's an insert, otherwise 00.
- */
- u64_to_string_zf(keyno, key->mem, 11);
- p[10] = '.';
- p[11] = suffix[0];
- p[12] = suffix[1];
- len = 13;
-
- /*
- * In a column-store, the key isn't used, it doesn't need a random length.
- */
- if (g.type == ROW) {
- p[len] = '/';
-
- /*
- * Because we're doing table lookup for key sizes, we weren't able to set really big keys
- * sizes in the table, the table isn't big enough to keep our hash from selecting too many
- * big keys and blowing out the cache. Handle that here, use a really big key 1 in 2500
- * times.
- */
- len = keyno % 2500 == 0 && g.c_key_max < KILOBYTE(80) ?
- KILOBYTE(80) :
- (int)g.key_rand_len[keyno % WT_ELEMENTS(g.key_rand_len)];
- }
-
- key->data = key->mem;
- key->size = (size_t)len;
-}
-
-void
-key_gen(WT_ITEM *key, uint64_t keyno)
-{
- key_gen_common(key, keyno, "00");
-}
-
-void
-key_gen_insert(WT_RAND_STATE *rnd, WT_ITEM *key, uint64_t keyno)
-{
- static const char *const suffix[15] = {
- "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15"};
-
- key_gen_common(key, keyno, suffix[mmrand(rnd, 0, 14)]);
-}
-
-static char *val_base; /* Base/original value */
-static uint32_t val_dup_data_len; /* Length of duplicate data items */
-static uint32_t val_len; /* Length of data items */
-
-static inline uint32_t
-value_len(WT_RAND_STATE *rnd, uint64_t keyno, uint32_t min, uint32_t max)
-{
- /*
- * Focus on relatively small items, admitting the possibility of larger items. Pick a size close
- * to the minimum most of the time, only create a larger item 1 in 20 times, and a really big
- * item 1 in somewhere around 2500 items.
- */
- if (keyno % 2500 == 0 && max < KILOBYTE(80)) {
- min = KILOBYTE(80);
- max = KILOBYTE(100);
- } else if (keyno % 20 != 0 && max > min + 20)
- max = min + 20;
- return (mmrand(rnd, min, max));
-}
-
-void
-val_init(void)
-{
- size_t i;
-
- /* Discard any previous value initialization. */
- free(val_base);
- val_base = NULL;
- val_dup_data_len = val_len = 0;
-
- /*
- * Set initial buffer contents to recognizable text.
- *
- * Add a few extra bytes in order to guarantee we can always offset into the buffer by a few
- * extra bytes, used to generate different data for column-store run-length encoded files.
- */
- val_len = MAX(KILOBYTE(100), g.c_value_max) + 20;
- val_base = dmalloc(val_len);
- for (i = 0; i < val_len; ++i)
- val_base[i] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[i % 26];
-
- val_dup_data_len = value_len(NULL, (uint64_t)mmrand(NULL, 1, 20), g.c_value_min, g.c_value_max);
-}
-
-void
-val_gen_init(WT_ITEM *value)
-{
- value->mem = dmalloc(val_len);
- value->memsize = val_len;
- value->data = value->mem;
- value->size = 0;
-}
-
-void
-val_gen_teardown(WT_ITEM *value)
-{
- free(value->mem);
- memset(value, 0, sizeof(*value));
-}
-
-void
-val_gen(WT_RAND_STATE *rnd, WT_ITEM *value, uint64_t keyno)
-{
- char *p;
-
- p = value->mem;
- value->data = value->mem;
-
- /*
- * Fixed-length records: take the low N bits from the last digit of the record number.
- */
- if (g.type == FIX) {
- switch (g.c_bitcnt) {
- case 8:
- p[0] = (char)mmrand(rnd, 1, 0xff);
- break;
- case 7:
- p[0] = (char)mmrand(rnd, 1, 0x7f);
- break;
- case 6:
- p[0] = (char)mmrand(rnd, 1, 0x3f);
- break;
- case 5:
- p[0] = (char)mmrand(rnd, 1, 0x1f);
- break;
- case 4:
- p[0] = (char)mmrand(rnd, 1, 0x0f);
- break;
- case 3:
- p[0] = (char)mmrand(rnd, 1, 0x07);
- break;
- case 2:
- p[0] = (char)mmrand(rnd, 1, 0x03);
- break;
- case 1:
- p[0] = 1;
- break;
- }
- value->size = 1;
- return;
- }
-
- /*
- * WiredTiger doesn't store zero-length data items in row-store files, test that by inserting a
- * zero-length data item every so often.
- */
- if (keyno % 63 == 0) {
- p[0] = '\0';
- value->size = 0;
- return;
- }
-
- /*
- * Data items have unique leading numbers by default and random lengths; variable-length
- * column-stores use a duplicate data value to test RLE.
- */
- if (g.type == VAR && mmrand(rnd, 1, 100) < g.c_repeat_data_pct) {
- value->size = val_dup_data_len;
- memcpy(p, val_base, value->size);
- (void)strcpy(p, "DUPLICATEV");
- p[10] = '/';
- } else {
- value->size = value_len(rnd, keyno, g.c_value_min, g.c_value_max);
- memcpy(p, val_base, value->size);
- u64_to_string_zf(keyno, p, 11);
- p[10] = '/';
- }
-}
-
void
track(const char *tag, uint64_t cnt, TINFO *tinfo)
{
@@ -317,108 +79,79 @@ void
path_setup(const char *home)
{
size_t len;
+ const char *name;
/* Home directory. */
g.home = dstrdup(home == NULL ? "RUNDIR" : home);
+ /* Configuration file. */
+ name = "CONFIG";
+ len = strlen(g.home) + strlen(name) + 2;
+ g.home_config = dmalloc(len);
+ testutil_check(__wt_snprintf(g.home_config, len, "%s/%s", g.home, name));
+
+ /* Key length configuration file. */
+ name = "CONFIG.keylen";
+ len = strlen(g.home) + strlen(name) + 2;
+ g.home_key = dmalloc(len);
+ testutil_check(__wt_snprintf(g.home_key, len, "%s/%s", g.home, name));
+
+ /* RNG log file. */
+ name = "CONFIG.rand";
+ len = strlen(g.home) + strlen(name) + 2;
+ g.home_rand = dmalloc(len);
+ testutil_check(__wt_snprintf(g.home_rand, len, "%s/%s", g.home, name));
+
/* Log file. */
- len = strlen(g.home) + strlen("log") + 2;
+ name = "OPERATIONS.log";
+ len = strlen(g.home) + strlen(name) + 2;
g.home_log = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_log, len, "%s/%s", g.home, "log"));
+ testutil_check(__wt_snprintf(g.home_log, len, "%s/%s", g.home, name));
- /* LAS dump file. */
- len = strlen(g.home) + strlen("LASdump") + 2;
- g.home_lasdump = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_lasdump, len, "%s/%s", g.home, "LASdump"));
+ /* History store dump file. */
+ name = "FAIL.HSdump";
+ len = strlen(g.home) + strlen(name) + 2;
+ g.home_hsdump = dmalloc(len);
+ testutil_check(__wt_snprintf(g.home_hsdump, len, "%s/%s", g.home, name));
/* Page dump file. */
- len = strlen(g.home) + strlen("pagedump") + 2;
+ name = "FAIL.pagedump";
+ len = strlen(g.home) + strlen(name) + 2;
g.home_pagedump = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_pagedump, len, "%s/%s", g.home, "pagedump"));
-
- /* RNG log file. */
- len = strlen(g.home) + strlen("rand") + 2;
- g.home_rand = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_rand, len, "%s/%s", g.home, "rand"));
-
- /* Run file. */
- len = strlen(g.home) + strlen("CONFIG") + 2;
- g.home_config = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_config, len, "%s/%s", g.home, "CONFIG"));
+ testutil_check(__wt_snprintf(g.home_pagedump, len, "%s/%s", g.home, name));
/* Statistics file. */
- len = strlen(g.home) + strlen("stats") + 2;
+ name = "OPERATIONS.stats";
+ len = strlen(g.home) + strlen(name) + 2;
g.home_stats = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_stats, len, "%s/%s", g.home, "stats"));
+ testutil_check(__wt_snprintf(g.home_stats, len, "%s/%s", g.home, name));
+}
/*
- * Home directory initialize command: create the directory if it doesn't exist, else remove
- * everything except the RNG log file.
- *
- * Redirect the "cd" command to /dev/null so chatty cd implementations don't add the new working
- * directory to our output.
+ * fp_readv --
+ * Read and return a value from a file.
*/
-#undef CMD
-#ifdef _WIN32
-#define CMD \
- "del /q rand.copy & " \
- "(IF EXIST %s\\rand copy /y %s\\rand rand.copy) & " \
- "(IF EXIST %s rd /s /q %s) & mkdir %s & " \
- "(IF EXIST rand.copy copy rand.copy %s\\rand)"
- len = strlen(g.home) * 7 + strlen(CMD) + 1;
- g.home_init = dmalloc(len);
- testutil_check(
- __wt_snprintf(g.home_init, len, CMD, g.home, g.home, g.home, g.home, g.home, g.home, g.home));
-#else
-#define CMD \
- "test -e %s || mkdir %s; " \
- "cd %s > /dev/null && rm -rf `ls | sed /rand/d`"
- len = strlen(g.home) * 3 + strlen(CMD) + 1;
- g.home_init = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_init, len, CMD, g.home, g.home, g.home));
-#endif
-
- /* Primary backup directory. */
- len = strlen(g.home) + strlen("BACKUP") + 2;
- g.home_backup = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_backup, len, "%s/%s", g.home, "BACKUP"));
+bool
+fp_readv(FILE *fp, char *name, bool eof_ok, uint32_t *vp)
+{
+ u_long ulv;
+ char *endptr, buf[100];
-/*
- * Backup directory initialize command, remove and re-create the primary backup directory, plus a
- * copy we maintain for recovery testing.
- */
-#undef CMD
-#ifdef _WIN32
-#define CMD "rd /s /q %s\\%s %s\\%s & mkdir %s\\%s %s\\%s"
-#else
-#define CMD "rm -rf %s/%s %s/%s && mkdir %s/%s %s/%s"
-#endif
- len = strlen(g.home) * 4 + strlen("BACKUP") * 2 + strlen("BACKUP_COPY") * 2 + strlen(CMD) + 1;
- g.home_backup_init = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_backup_init, len, CMD, g.home, "BACKUP", g.home,
- "BACKUP_COPY", g.home, "BACKUP", g.home, "BACKUP_COPY"));
+ if (fgets(buf, sizeof(buf), fp) == NULL) {
+ if (feof(g.randfp)) {
+ if (eof_ok)
+ return (true);
+ testutil_die(errno, "%s: read-value EOF", name);
+ }
+ testutil_die(errno, "%s: read-value error", name);
+ }
-/*
- * Salvage command, save the interesting files so we can replay the salvage command as necessary.
- *
- * Redirect the "cd" command to /dev/null so chatty cd implementations don't add the new working
- * directory to our output.
- */
-#undef CMD
-#ifdef _WIN32
-#define CMD \
- "cd %s && " \
- "rd /q /s slvg.copy & mkdir slvg.copy && " \
- "copy WiredTiger* slvg.copy\\ >:nul && copy wt* slvg.copy\\ >:nul"
-#else
-#define CMD \
- "cd %s > /dev/null && " \
- "rm -rf slvg.copy && mkdir slvg.copy && " \
- "cp WiredTiger* wt* slvg.copy/"
-#endif
- len = strlen(g.home) + strlen(CMD) + 1;
- g.home_salvage_copy = dmalloc(len);
- testutil_check(__wt_snprintf(g.home_salvage_copy, len, CMD, g.home));
+ errno = 0;
+ ulv = strtoul(buf, &endptr, 10);
+ testutil_assert(errno == 0 && endptr[0] == '\n');
+ testutil_assert(ulv <= UINT32_MAX);
+ *vp = (uint32_t)ulv;
+ return (false);
}
/*
@@ -428,30 +161,20 @@ path_setup(const char *home)
uint32_t
rng_slow(WT_RAND_STATE *rnd)
{
- u_long ulv;
uint32_t v;
- char *endptr, buf[64];
/*
* We can reproduce a single-threaded run based on the random numbers used in the initial run,
* plus the configuration files.
*/
if (g.replay) {
- if (fgets(buf, sizeof(buf), g.randfp) == NULL) {
- if (feof(g.randfp)) {
- fprintf(stderr,
- "\n"
- "end of random number log reached\n");
- exit(EXIT_SUCCESS);
- }
- testutil_die(errno, "random number log");
+ if (fp_readv(g.randfp, g.home_rand, true, &v)) {
+ fprintf(stderr,
+ "\n"
+ "end of random number log reached\n");
+ exit(EXIT_SUCCESS);
}
-
- errno = 0;
- ulv = strtoul(buf, &endptr, 10);
- testutil_assert(errno == 0 && endptr[0] == '\n');
- testutil_assert(ulv <= UINT32_MAX);
- return ((uint32_t)ulv);
+ return (v);
}
v = __wt_random(rnd);
@@ -464,6 +187,32 @@ rng_slow(WT_RAND_STATE *rnd)
}
/*
+ * handle_init --
+ * Initialize logging/random number handles for a run.
+ */
+void
+handle_init(void)
+{
+ /* Open/truncate logging/random number handles. */
+ if (g.logging && (g.logfp = fopen(g.home_log, "w")) == NULL)
+ testutil_die(errno, "fopen: %s", g.home_log);
+ if ((g.randfp = fopen(g.home_rand, g.replay ? "r" : "w")) == NULL)
+ testutil_die(errno, "%s", g.home_rand);
+}
+
+/*
+ * handle_teardown --
+ * Shutdown logging/random number handles for a run.
+ */
+void
+handle_teardown(void)
+{
+ /* Flush/close logging/random number handles. */
+ fclose_and_clear(&g.logfp);
+ fclose_and_clear(&g.randfp);
+}
+
+/*
* fclose_and_clear --
* Close a file and clear the handle so we don't close twice.
*/
@@ -477,83 +226,6 @@ fclose_and_clear(FILE **fpp)
*fpp = NULL;
if (fclose(fp) != 0)
testutil_die(errno, "fclose");
- return;
-}
-
-/*
- * checkpoint --
- * Periodically take a checkpoint
- */
-WT_THREAD_RET
-checkpoint(void *arg)
-{
- WT_CONNECTION *conn;
- WT_DECL_RET;
- WT_SESSION *session;
- u_int secs;
- char config_buf[64];
- const char *ckpt_config;
- bool backup_locked;
-
- (void)arg;
- conn = g.wts_conn;
- testutil_check(conn->open_session(conn, NULL, NULL, &session));
-
- for (secs = mmrand(NULL, 1, 10); !g.workers_finished;) {
- if (secs > 0) {
- __wt_sleep(1, 0);
- --secs;
- continue;
- }
-
- /*
- * LSM and data-sources don't support named checkpoints. Also, don't attempt named
- * checkpoints during a hot backup. It's OK to create named checkpoints during a hot backup,
- * but we can't delete them, so repeating an already existing named checkpoint will fail
- * when we can't drop the previous one.
- */
- ckpt_config = NULL;
- backup_locked = false;
- if (!DATASOURCE("lsm"))
- switch (mmrand(NULL, 1, 20)) {
- case 1:
- /*
- * 5% create a named snapshot. Rotate between a
- * few names to test multiple named snapshots in
- * the system.
- */
- ret = pthread_rwlock_trywrlock(&g.backup_lock);
- if (ret == 0) {
- backup_locked = true;
- testutil_check(__wt_snprintf(
- config_buf, sizeof(config_buf), "name=mine.%" PRIu32, mmrand(NULL, 1, 4)));
- ckpt_config = config_buf;
- } else if (ret != EBUSY)
- testutil_check(ret);
- break;
- case 2:
- /*
- * 5% drop all named snapshots.
- */
- ret = pthread_rwlock_trywrlock(&g.backup_lock);
- if (ret == 0) {
- backup_locked = true;
- ckpt_config = "drop=(all)";
- } else if (ret != EBUSY)
- testutil_check(ret);
- break;
- }
-
- testutil_check(session->checkpoint(session, ckpt_config));
-
- if (backup_locked)
- testutil_check(pthread_rwlock_unlock(&g.backup_lock));
-
- secs = mmrand(NULL, 5, 40);
- }
-
- testutil_check(session->close(session, NULL));
- return (WT_THREAD_RET_VALUE);
}
/*
@@ -561,7 +233,7 @@ checkpoint(void *arg)
* Update the timestamp once.
*/
void
-timestamp_once(void)
+timestamp_once(WT_SESSION *session)
{
static const char *oldest_timestamp_str = "oldest_timestamp=";
WT_CONNECTION *conn;
@@ -574,16 +246,20 @@ timestamp_once(void)
/*
* Lock out transaction timestamp operations. The lock acts as a barrier ensuring we've checked
- * if the workers have finished, we don't want that line reordered.
+ * if the workers have finished, we don't want that line reordered. We can also be called from
+ * places, such as bulk load, where we are single-threaded and the locks haven't been
+ * initialized.
*/
- testutil_check(pthread_rwlock_wrlock(&g.ts_lock));
+ if (LOCK_INITIALIZED(&g.ts_lock))
+ lock_writelock(session, &g.ts_lock);
ret = conn->query_timestamp(conn, buf + strlen(oldest_timestamp_str), "get=all_durable");
testutil_assert(ret == 0 || ret == WT_NOTFOUND);
if (ret == 0)
testutil_check(conn->set_timestamp(conn, buf));
- testutil_check(pthread_rwlock_unlock(&g.ts_lock));
+ if (LOCK_INITIALIZED(&g.ts_lock))
+ lock_writeunlock(session, &g.ts_lock);
}
/*
@@ -593,9 +269,15 @@ timestamp_once(void)
WT_THREAD_RET
timestamp(void *arg)
{
+ WT_CONNECTION *conn;
+ WT_SESSION *session;
bool done;
(void)(arg);
+ conn = g.wts_conn;
+
+ /* Locks need session */
+ testutil_check(conn->open_session(conn, NULL, NULL, &session));
/* Update the oldest timestamp at least once every 15 seconds. */
done = false;
@@ -609,10 +291,11 @@ timestamp(void *arg)
else
random_sleep(&g.rnd, 15);
- timestamp_once();
+ timestamp_once(session);
} while (!done);
+ testutil_check(session->close(session, NULL));
return (WT_THREAD_RET_VALUE);
}
@@ -662,3 +345,38 @@ alter(void *arg)
testutil_check(session->close(session, NULL));
return (WT_THREAD_RET_VALUE);
}
+
+/*
+ * lock_init --
+ * Initialize abstract lock that can use either pthread of wt reader-writer locks.
+ */
+void
+lock_init(WT_SESSION *session, RWLOCK *lock)
+{
+ testutil_assert(lock->lock_type == LOCK_NONE);
+
+ if (g.c_wt_mutex) {
+ testutil_check(__wt_rwlock_init((WT_SESSION_IMPL *)session, &lock->l.wt));
+ lock->lock_type = LOCK_WT;
+ } else {
+ testutil_check(pthread_rwlock_init(&lock->l.pthread, NULL));
+ lock->lock_type = LOCK_PTHREAD;
+ }
+}
+
+/*
+ * lock_destroy --
+ * Destroy abstract lock.
+ */
+void
+lock_destroy(WT_SESSION *session, RWLOCK *lock)
+{
+ testutil_assert(LOCK_INITIALIZED(lock));
+
+ if (lock->lock_type == LOCK_WT) {
+ __wt_rwlock_destroy((WT_SESSION_IMPL *)session, &lock->l.wt);
+ } else {
+ testutil_check(pthread_rwlock_destroy(&lock->l.pthread));
+ }
+ lock->lock_type = LOCK_NONE;
+}
diff --git a/src/third_party/wiredtiger/test/format/vt b/src/third_party/wiredtiger/test/format/vt
deleted file mode 100644
index 00ec92df1ec..00000000000
--- a/src/third_party/wiredtiger/test/format/vt
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /bin/sh
-
-rm -f vgout.*
-
-# Command line argument is the number of iterations.
-r=1
-test $# -eq 0 || r=$1
-
-# Add
-# --db-attach=yes
-# to wait on a debugger attach.
-while test $r -gt 0; do
- r=`expr $r - 1`
- valgrind \
- --leak-check=yes \
- --log-file=vgout.%p \
- --read-var-info=yes \
- --suppressions=vt.suppress \
- --track-fds=yes \
- ./t -1
-done
diff --git a/src/third_party/wiredtiger/test/format/vt.suppress b/src/third_party/wiredtiger/test/format/vt.suppress
deleted file mode 100644
index 135d32e56c7..00000000000
--- a/src/third_party/wiredtiger/test/format/vt.suppress
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- <FreeBSD 8.2 dlopen #1>
- Memcheck:Addr8
- obj:/libexec/ld-elf.so.1
- obj:/libexec/ld-elf.so.1
- obj:/libexec/ld-elf.so.1
- fun:dlopen
- fun:__wt_dlopen
- fun:__conn_load_extension
- fun:wiredtiger_open
- fun:wts_open
- fun:wts_startup
- fun:main
-}
-{
- <FreeBSD 8.2 dlopen #2>
- Memcheck:Addr8
- obj:/libexec/ld-elf.so.1
- obj:/libexec/ld-elf.so.1
- obj:/libexec/ld-elf.so.1
- obj:/libexec/ld-elf.so.1
- fun:dlopen
- fun:__wt_dlopen
- fun:__conn_load_extension
- fun:wiredtiger_open
- fun:wts_open
- fun:wts_startup
- fun:main
-}
diff --git a/src/third_party/wiredtiger/test/format/wts.c b/src/third_party/wiredtiger/test/format/wts.c
index b05893598ea..66a319a9982 100644
--- a/src/third_party/wiredtiger/test/format/wts.c
+++ b/src/third_party/wiredtiger/test/format/wts.c
@@ -29,6 +29,36 @@
#include "format.h"
/*
+ * Home directory initialize command: create the directory if it doesn't exist, else remove
+ * everything except the RNG log file.
+ *
+ * Redirect the "cd" command to /dev/null so chatty cd implementations don't add the new working
+ * directory to our output.
+ */
+#define FORMAT_HOME_INIT_CMD \
+ "test -e %s || mkdir %s; " \
+ "cd %s > /dev/null && rm -rf `ls | sed /CONFIG.rand/d`"
+
+/*
+ * wts_create --
+ * Create the database home.
+ */
+void
+wts_create(void)
+{
+ WT_DECL_RET;
+ size_t len;
+ char *cmd;
+
+ len = strlen(g.home) * 3 + strlen(FORMAT_HOME_INIT_CMD) + 1;
+ cmd = dmalloc(len);
+ testutil_check(__wt_snprintf(cmd, len, FORMAT_HOME_INIT_CMD, g.home, g.home, g.home));
+ if ((ret = system(cmd)) != 0)
+ testutil_die(ret, "home initialization (\"%s\") failed", cmd);
+ free(cmd);
+}
+
+/*
* compressor --
* Configure compression.
*/
@@ -198,7 +228,10 @@ wts_open(const char *home, bool set_api, WT_CONNECTION **connp)
CONFIG_APPEND(p, ",buffer_alignment=512");
#endif
- CONFIG_APPEND(p, ",mmap=%d", g.c_mmap ? 1 : 0);
+ if (g.c_mmap)
+ CONFIG_APPEND(p, ",mmap=1");
+ if (g.c_mmap_all)
+ CONFIG_APPEND(p, ",mmap_all=1");
if (g.c_direct_io)
CONFIG_APPEND(p, ",direct_io=(data)");
@@ -228,8 +261,8 @@ wts_open(const char *home, bool set_api, WT_CONNECTION **connp)
CONFIG_APPEND(p, ",aggressive_sweep");
if (g.c_timing_stress_checkpoint)
CONFIG_APPEND(p, ",checkpoint_slow");
- if (g.c_timing_stress_lookaside_sweep)
- CONFIG_APPEND(p, ",lookaside_sweep_race");
+ if (g.c_timing_stress_hs_sweep)
+ CONFIG_APPEND(p, ",history_store_sweep_race");
if (g.c_timing_stress_split_1)
CONFIG_APPEND(p, ",split_1");
if (g.c_timing_stress_split_2)
@@ -248,6 +281,11 @@ wts_open(const char *home, bool set_api, WT_CONNECTION **connp)
CONFIG_APPEND(p, ",split_8");
CONFIG_APPEND(p, "]");
+#if WIREDTIGER_VERSION_MAJOR >= 10
+ if (g.c_verify)
+ CONFIG_APPEND(p, ",verify_metadata=true");
+#endif
+
/* Extensions. */
CONFIG_APPEND(p,
",extensions=["
@@ -294,34 +332,8 @@ wts_reopen(void)
}
/*
- * wts_checkpoints --
- * Configure WiredTiger library checkpoints.
- */
-void
-wts_checkpoints(void)
-{
- char config[1024];
-
- /*
- * Configuring WiredTiger library checkpoints is done separately, rather than as part of the
- * original database open because format tests small caches and you can get into cache stuck
- * trouble during the initial load (where bulk load isn't configured). There's a single thread
- * doing lots of inserts and creating huge leaf pages. Those pages can't be evicted if there's a
- * checkpoint running in the tree, and the cache can get stuck. That workload is unlikely enough
- * we're not going to fix it in the library, so configure it away here.
- */
- if (g.c_checkpoint_flag != CHECKPOINT_WIREDTIGER)
- return;
-
- testutil_check(
- __wt_snprintf(config, sizeof(config), ",checkpoint=(wait=%" PRIu32 ",log_size=%" PRIu32 ")",
- g.c_checkpoint_wait, MEGABYTE(g.c_checkpoint_log_size)));
- testutil_check(g.wts_conn->reconfigure(g.wts_conn, config));
-}
-
-/*
- * wts_create --
- * Create the underlying store.
+ * wts_init --
+ * Create the database object.
*/
void
wts_init(void)
@@ -448,13 +460,14 @@ void
wts_close(void)
{
WT_CONNECTION *conn;
- const char *config;
conn = g.wts_conn;
- config = g.c_leak_memory ? "leak_memory" : NULL;
+ if (g.backward_compatible)
+ testutil_check(conn->reconfigure(conn, "compatibility=(release=3.3)"));
+
+ testutil_check(conn->close(conn, g.c_leak_memory ? "leak_memory" : NULL));
- testutil_check(conn->close(conn, config));
g.wts_conn = NULL;
g.wt_api = NULL;
}
diff --git a/src/third_party/wiredtiger/test/suite/test_backup11.py b/src/third_party/wiredtiger/test/suite/test_backup11.py
index c5de361eb04..76fa70c4b2b 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup11.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup11.py
@@ -36,48 +36,28 @@ from wtscenario import make_scenarios
# test_backup11.py
# Test cursor backup with a duplicate backup cursor.
class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
+ conn_config= 'cache_size=1G,log=(enabled,file_max=100K)'
dir='backup.dir' # Backup directory name
- logmax="100K"
- uri="table:test"
+ mult=0
nops=100
-
pfx = 'test_backup'
-
- # ('archiving', dict(archive='true')),
- # ('not-archiving', dict(archive='false')),
- scenarios = make_scenarios([
- ('archiving', dict(archive='true')),
- ])
-
- # Create a large cache, otherwise this test runs quite slowly.
- def conn_config(self):
- return 'cache_size=1G,log=(archive=%s,' % self.archive + \
- 'enabled,file_max=%s)' % self.logmax
+ uri="table:test"
def add_data(self):
- log2 = "WiredTigerLog.0000000002"
- log3 = "WiredTigerLog.0000000003"
-
- self.session.create(self.uri, "key_format=S,value_format=S")
- # Insert small amounts of data at a time stopping after we
- # cross into log file 2.
- loop = 0
c = self.session.open_cursor(self.uri)
- while not os.path.exists(log2):
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = 'key' + str(num)
+ val = 'value' + str(num)
+ c[key] = val
+ self.mult += 1
self.session.checkpoint()
c.close()
- return loop
def test_backup11(self):
-
- loop = self.add_data()
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.add_data()
# Open up the backup cursor. This causes a new log file to be created.
# That log file is not part of the list returned. This is a full backup
@@ -86,17 +66,8 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
config = 'incremental=(enabled,this_id="ID1")'
bkup_c = self.session.open_cursor('backup:', None, config)
- # Add some data that will appear in log file 3.
- c = self.session.open_cursor(self.uri)
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
- c.close()
- self.session.log_flush('sync=on')
- self.session.checkpoint()
+ # Add data while the backup cursor is open.
+ self.add_data()
# Now copy the files returned by the backup cursor.
orig_logs = []
@@ -136,37 +107,22 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
bkup_c.close()
# Add more data
- c = self.session.open_cursor(self.uri)
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
- c.close()
- self.session.log_flush('sync=on')
- self.session.checkpoint()
+ self.add_data()
- # Test a few error cases now.
- # - Incremental filename must be on duplicate, not primary.
- # - An incremental duplicate must have an incremental primary.
- # - We cannot make multiple incremental duplcate backup cursors.
- # - We cannot duplicate the duplicate backup cursor.
- # - We cannot mix block incremental with a log target on the same duplicate.
- # - Incremental ids must be on primary, not duplicate.
- # - Incremental must be opened on a primary with a source identifier.
- # - Force stop must be on primary, not duplicate.
+ # Test error cases now.
# - Incremental filename must be on duplicate, not primary.
# Test this first because we currently do not have a primary open.
config = 'incremental=(file=test.wt)'
msg = "/file name can only be specified on a duplicate/"
+ self.pr("Specify file on primary")
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.assertEquals(self.session.open_cursor('backup:',
None, config), 0), msg)
# Open a non-incremental full backup cursor.
# - An incremental duplicate must have an incremental primary.
+ self.pr("Try to open an incremental on a non-incremental primary")
bkup_c = self.session.open_cursor('backup:', None, None)
msg = "/must have an incremental primary/"
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
@@ -229,13 +185,12 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
bkup_c, config), 0), msg)
# - Force stop must be on primary, not duplicate.
- #self.pr("Test force stop")
- #self.pr("=========")
- #config = 'incremental=(force_stop=true)'
- #print "config is " + config
- #self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
- # lambda:self.assertEquals(self.session.open_cursor(None,
- # bkup_c, config), 0), msg)
+ self.pr("Test force stop")
+ self.pr("=========")
+ config = 'incremental=(force_stop=true)'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.open_cursor(None,
+ bkup_c, config), 0), msg)
bkup_c.close()
@@ -255,9 +210,61 @@ class test_backup11(wttest.WiredTigerTestCase, suite_subprocess):
lambda: self.session.open_cursor(None, bkup_c, config), msg)
bkup_c.close()
+ # - Test opening a primary backup with an unknown source id.
+ self.pr("Test incremental with unknown source identifier on primary")
+ self.pr("=========")
+ config = 'incremental=(enabled,src_id="ID_BAD",this_id="ID4")'
+ self.assertRaises(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config))
+
+ # - Test opening a primary backup with an id in WiredTiger namespace.
+ self.pr("Test incremental with illegal src identifier using WiredTiger namespace")
+ self.pr("=========")
+ msg = '/name space may not/'
+ config = 'incremental=(enabled,src_id="WiredTiger.0")'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config), msg)
+
+ # - Test opening a primary backup with an id in WiredTiger namespace.
+ self.pr("Test incremental with illegal this identifier using WiredTiger namespace")
+ self.pr("=========")
+ config = 'incremental=(enabled,this_id="WiredTiger.ID")'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config), msg)
+
+ # - Test opening a primary backup with an id using illegal characters.
+ self.pr("Test incremental with illegal source identifier using illegal colon character")
+ self.pr("=========")
+ msg = '/grouping characters/'
+ config = 'incremental=(enabled,src_id="ID4:4.0")'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config), msg)
+
+ # - Test opening a primary backup with an id using illegal characters.
+ self.pr("Test incremental with illegal this identifier using illegal colon character")
+ self.pr("=========")
+ config = 'incremental=(enabled,this_id="ID4:4.0")'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config), msg)
+
+ # - Test opening a primary backup with the same source id and this id (new id).
+ self.pr("Test incremental with the same new source and this identifiers")
+ self.pr("=========")
+ config = 'incremental=(enabled,src_id="IDSAME",this_id="IDSAME")'
+ self.assertRaises(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config))
+
+ # - Test opening a primary backup with the same source id and this id (reusing id).
+ self.pr("Test incremental with the same re-used source and this identifiers")
+ self.pr("=========")
+ msg = '/already in use/'
+ config = 'incremental=(enabled,src_id="ID2",this_id="ID2")'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config), msg)
+
# After the full backup, open and recover the backup database.
- #backup_conn = self.wiredtiger_open(self.dir)
- #backup_conn.close()
+ backup_conn = self.wiredtiger_open(self.dir)
+ backup_conn.close()
if __name__ == '__main__':
wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup12.py b/src/third_party/wiredtiger/test/suite/test_backup12.py
index 35948da5c44..f5fadcee393 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup12.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup12.py
@@ -36,48 +36,41 @@ from wtscenario import make_scenarios
# test_backup12.py
# Test cursor backup with a block-based incremental cursor.
class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
+ conn_config='cache_size=1G,log=(enabled,file_max=100K)'
dir='backup.dir' # Backup directory name
logmax="100K"
uri="table:test"
- nops=100
+ uri2="table:test2"
+ uri_rem="table:test_rem"
+ nops=1000
+ mult=0
pfx = 'test_backup'
+ # Set the key and value big enough that we modify a few blocks.
+ bigkey = 'Key' * 100
+ bigval = 'Value' * 100
- # ('archiving', dict(archive='true')),
- # ('not-archiving', dict(archive='false')),
- scenarios = make_scenarios([
- ('archiving', dict(archive='true')),
- ])
+ def add_data(self, uri):
- # Create a large cache, otherwise this test runs quite slowly.
- def conn_config(self):
- return 'cache_size=1G,log=(archive=%s,' % self.archive + \
- 'enabled,file_max=%s)' % self.logmax
-
- def add_data(self):
- log2 = "WiredTigerLog.0000000002"
- log3 = "WiredTigerLog.0000000003"
-
- self.session.create(self.uri, "key_format=S,value_format=S")
-
- # Insert small amounts of data at a time stopping after we
- # cross into log file 2.
- loop = 0
- c = self.session.open_cursor(self.uri)
- while not os.path.exists(log2):
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
+ c = self.session.open_cursor(uri)
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = self.bigkey + str(num)
+ val = self.bigval + str(num)
+ c[key] = val
self.session.checkpoint()
c.close()
- return loop
+ # Increase the multiplier so that later calls insert unique items.
+ self.mult += 1
def test_backup12(self):
- loop = self.add_data()
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.session.create(self.uri2, "key_format=S,value_format=S")
+ self.session.create(self.uri_rem, "key_format=S,value_format=S")
+ self.add_data(self.uri)
+ self.add_data(self.uri2)
+ self.add_data(self.uri_rem)
# Open up the backup cursor. This causes a new log file to be created.
# That log file is not part of the list returned. This is a full backup
@@ -86,23 +79,14 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
#
# Note, this first backup is actually done before a checkpoint is taken.
#
- config = 'incremental=(enabled,this_id="ID1")'
+ config = 'incremental=(enabled,granularity=1M,this_id="ID1")'
bkup_c = self.session.open_cursor('backup:', None, config)
- # Add some data that will appear in log file 3.
- c = self.session.open_cursor(self.uri)
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
- c.close()
- self.session.log_flush('sync=on')
- self.session.checkpoint()
+ # Add more data while the backup cursor is open.
+ self.add_data(self.uri)
# Now copy the files returned by the backup cursor.
- orig_logs = []
+ all_files = []
while True:
ret = bkup_c.next()
if ret != 0:
@@ -111,8 +95,7 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
sz = os.path.getsize(newfile)
self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
shutil.copy(newfile, self.dir)
- if "WiredTigerLog" in newfile:
- orig_logs.append(newfile)
+ all_files.append(newfile)
self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
# Now open a duplicate backup cursor.
@@ -129,31 +112,28 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
newfile = dupc.get_key()
self.assertTrue("WiredTigerLog" in newfile)
sz = os.path.getsize(newfile)
- if (newfile not in orig_logs):
+ if (newfile not in all_files):
self.pr('DUP: Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
shutil.copy(newfile, self.dir)
# Record all log files returned for later verification.
dup_logs.append(newfile)
+ all_files.append(newfile)
self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
dupc.close()
bkup_c.close()
# Add more data.
- c = self.session.open_cursor(self.uri)
- for i in range(0, self.nops):
- num = i + (loop * self.nops)
- key = 'key' + str(num)
- val = 'value' + str(num)
- c[key] = val
- loop += 1
- c.close()
- self.session.log_flush('sync=on')
- self.session.checkpoint()
+ self.add_data(self.uri)
+ self.add_data(self.uri2)
+
+ # Drop a table.
+ self.session.drop(self.uri_rem)
# Now do an incremental backup.
config = 'incremental=(src_id="ID1",this_id="ID2")'
bkup_c = self.session.open_cursor('backup:', None, config)
self.pr('Open backup cursor ID1')
+ bkup_files = []
while True:
ret = bkup_c.next()
if ret != 0:
@@ -163,6 +143,8 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
self.pr('Open incremental cursor with ' + config)
dup_cnt = 0
dupc = self.session.open_cursor(None, bkup_c, config)
+ bkup_files.append(newfile)
+ all_files.append(newfile)
while True:
ret = dupc.next()
if ret != 0:
@@ -171,14 +153,34 @@ class test_backup12(wttest.WiredTigerTestCase, suite_subprocess):
offset = incrlist[0]
size = incrlist[1]
curtype = incrlist[2]
+ # 1 is WT_BACKUP_FILE
+ # 2 is WT_BACKUP_RANGE
self.assertTrue(curtype == 1 or curtype == 2)
+ if curtype == 1:
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
+ shutil.copy(newfile, self.dir)
+ else:
+ self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
+ rfp = open(newfile, "r+b")
+ wfp = open(self.dir + '/' + newfile, "w+b")
+ rfp.seek(offset, 0)
+ wfp.seek(offset, 0)
+ buf = rfp.read(size)
+ wfp.write(buf)
+ rfp.close()
+ wfp.close()
dup_cnt += 1
dupc.close()
- self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
- shutil.copy(newfile, self.dir)
self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
bkup_c.close()
+ # We need to remove files in the backup directory that are not in the current backup.
+ all_set = set(all_files)
+ bkup_set = set(bkup_files)
+ rem_files = list(all_set - bkup_set)
+ for l in rem_files:
+ self.pr('Remove file: ' + self.dir + '/' + l)
+ os.remove(self.dir + '/' + l)
# After the full backup, open and recover the backup database.
backup_conn = self.wiredtiger_open(self.dir)
backup_conn.close()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup13.py b/src/third_party/wiredtiger/test/suite/test_backup13.py
new file mode 100644
index 00000000000..445cbaa6dc1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_backup13.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wiredtiger, wttest
+import os, shutil
+from helper import compare_files
+from suite_subprocess import suite_subprocess
+from wtdataset import simple_key
+from wtscenario import make_scenarios
+
+# test_backup13.py
+# Test cursor backup with a block-based incremental cursor and force_stop.
+class test_backup13(wttest.WiredTigerTestCase, suite_subprocess):
+ conn_config='cache_size=1G,log=(enabled,file_max=100K)'
+ dir='backup.dir' # Backup directory name
+ logmax="100K"
+ uri="table:test"
+ nops=1000
+ mult=0
+
+ pfx = 'test_backup'
+ # Set the key and value big enough that we modify a few blocks.
+ bigkey = 'Key' * 100
+ bigval = 'Value' * 100
+
+ def add_data(self, uri):
+
+ c = self.session.open_cursor(uri)
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = self.bigkey + str(num)
+ val = self.bigval + str(num)
+ c[key] = val
+ self.session.checkpoint()
+ c.close()
+ # Increase the multiplier so that later calls insert unique items.
+ self.mult += 1
+
+ def test_backup13(self):
+
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.add_data(self.uri)
+
+ # Open up the backup cursor. This causes a new log file to be created.
+ # That log file is not part of the list returned. This is a full backup
+ # primary cursor with incremental configured.
+ os.mkdir(self.dir)
+ config = 'incremental=(enabled,granularity=1M,this_id="ID1")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+
+ # Add more data while the backup cursor is open.
+ self.add_data(self.uri)
+
+ # Now copy the files returned by the backup cursor.
+ all_files = []
+
+ # We cannot use 'for newfile in bkup_c:' usage because backup cursors don't have
+ # values and adding in get_values returns ENOTSUP and causes the usage to fail.
+ # If that changes then this, and the use of the duplicate below can change.
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ newfile = bkup_c.get_key()
+ sz = os.path.getsize(newfile)
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
+ shutil.copy(newfile, self.dir)
+ all_files.append(newfile)
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ bkup_c.close()
+
+ # Add more data.
+ self.add_data(self.uri)
+
+ # Now do an incremental backup.
+ config = 'incremental=(src_id="ID1",this_id="ID2")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ self.pr('Open backup cursor ID1')
+ bkup_files = []
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ newfile = bkup_c.get_key()
+ config = 'incremental=(file=' + newfile + ')'
+ self.pr('Open incremental cursor with ' + config)
+ dup_cnt = 0
+ dupc = self.session.open_cursor(None, bkup_c, config)
+ bkup_files.append(newfile)
+ all_files.append(newfile)
+ while True:
+ ret = dupc.next()
+ if ret != 0:
+ break
+ incrlist = dupc.get_keys()
+ offset = incrlist[0]
+ size = incrlist[1]
+ curtype = incrlist[2]
+ self.assertTrue(curtype == wiredtiger.WT_BACKUP_FILE or curtype == wiredtiger.WT_BACKUP_RANGE)
+ if curtype == wiredtiger.WT_BACKUP_FILE:
+ self.pr('Copy from: ' + newfile + ' (' + str(sz) + ') to ' + self.dir)
+ shutil.copy(newfile, self.dir)
+ else:
+ self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
+ rfp = open(newfile, "r+b")
+ wfp = open(self.dir + '/' + newfile, "w+b")
+ rfp.seek(offset, 0)
+ wfp.seek(offset, 0)
+ buf = rfp.read(size)
+ wfp.write(buf)
+ rfp.close()
+ wfp.close()
+ dup_cnt += 1
+ dupc.close()
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ bkup_c.close()
+
+ all_set = set(all_files)
+ bkup_set = set(bkup_files)
+ rem_files = list(all_set - bkup_set)
+ for l in rem_files:
+ self.pr('Remove file: ' + self.dir + '/' + l)
+ os.remove(self.dir + '/' + l)
+ # After the full backup, open and recover the backup database.
+ backup_conn = self.wiredtiger_open(self.dir)
+ backup_conn.close()
+
+ # Do a force stop to release resources and reset the system.
+ config = 'incremental=(force_stop=true)'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ bkup_c.close()
+
+ # Make sure after a force stop we cannot access old backup info.
+ config = 'incremental=(src_id="ID1",this_id="ID3")'
+ self.assertRaises(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config))
+ self.reopen_conn()
+ # Make sure after a restart we cannot access old backup info.
+ self.assertRaises(wiredtiger.WiredTigerError,
+ lambda: self.session.open_cursor('backup:', None, config))
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_backup14.py b/src/third_party/wiredtiger/test/suite/test_backup14.py
new file mode 100644
index 00000000000..7a2ec4f427f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_backup14.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-2020 MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wiredtiger, wttest
+import os, shutil
+from helper import compare_files
+from suite_subprocess import suite_subprocess
+from wtdataset import simple_key
+from wtscenario import make_scenarios
+import glob
+
+# test_backup14.py
+# Test cursor backup with a block-based incremental cursor.
+class test_backup14(wttest.WiredTigerTestCase, suite_subprocess):
+ conn_config='cache_size=1G,log=(enabled,file_max=100K)'
+ dir='backup.dir' # Backup directory name
+ logmax="100K"
+ uri="table:main"
+ uri2="table:extra"
+ uri_logged="table:logged_table"
+ uri_not_logged="table:not_logged_table"
+ full_out = "./backup_block_full"
+ incr_out = "./backup_block_incr"
+ bkp_home = "WT_BLOCK"
+ home_full = "WT_BLOCK_LOG_FULL"
+ home_incr = "WT_BLOCK_LOG_INCR"
+ logpath = "logpath"
+ nops=1000
+ mult=0
+ max_iteration=7
+ counter=0
+ new_table=False
+ initial_backup=False
+
+ pfx = 'test_backup'
+ # Set the key and value big enough that we modify a few blocks.
+ bigkey = 'Key' * 100
+ bigval = 'Value' * 100
+
+ #
+ # Set up all the directories needed for the test. We have a full backup directory for each
+ # iteration and an incremental backup for each iteration. That way we can compare the full and
+ # incremental each time through.
+ #
+ def setup_directories(self):
+ for i in range(0, self.max_iteration):
+ remove_dir = self.home_incr + '.' + str(i)
+
+ create_dir = self.home_incr + '.' + str(i) + '/' + self.logpath
+ if os.path.exists(remove_dir):
+ os.remove(remove_dir)
+ os.makedirs(create_dir)
+
+ if i == 0:
+ continue
+ remove_dir = self.home_full + '.' + str(i)
+ create_dir = self.home_full + '.' + str(i) + '/' + self.logpath
+ if os.path.exists(remove_dir):
+ os.remove(remove_dir)
+ os.makedirs(create_dir)
+
+ def take_full_backup(self):
+ if self.counter != 0:
+ hdir = self.home_full + '.' + str(self.counter)
+ else:
+ hdir = self.home_incr
+
+ #
+ # First time through we take a full backup into the incremental directories. Otherwise only
+ # into the appropriate full directory.
+ #
+ buf = None
+ if self.initial_backup == True:
+ buf = 'incremental=(granularity=1M,enabled=true,this_id=ID0)'
+
+ cursor = self.session.open_cursor('backup:', None, buf)
+ while True:
+ ret = cursor.next()
+ if ret != 0:
+ break
+ newfile = cursor.get_key()
+
+ if self.counter == 0:
+ # Take a full bakcup into each incremental directory
+ for i in range(0, self.max_iteration):
+ copy_from = newfile
+ # If it is log file, prepend the path.
+ if ("WiredTigerLog" in newfile):
+ copy_to = self.home_incr + '.' + str(i) + '/' + self.logpath
+ else:
+ copy_to = self.home_incr + '.' + str(i)
+ shutil.copy(copy_from, copy_to)
+ else:
+ copy_from = newfile
+ # If it is log file, prepend the path.
+ if ("WiredTigerLog" in newfile):
+ copy_to = hdir + '/' + self.logpath
+ else:
+ copy_to = hdir
+
+ shutil.copy(copy_from, copy_to)
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ cursor.close()
+
+ def take_incr_backup(self):
+ # Open the backup data source for incremental backup.
+ buf = 'incremental=(src_id="ID' + str(self.counter-1) + '",this_id="ID' + str(self.counter) + '")'
+ bkup_c = self.session.open_cursor('backup:', None, buf)
+ while True:
+ ret = bkup_c.next()
+ if ret != 0:
+ break
+ newfile = bkup_c.get_key()
+ h = self.home_incr + '.0'
+ copy_from = newfile
+ # If it is log file, prepend the path.
+ if ("WiredTigerLog" in newfile):
+ copy_to = h + '/' + self.logpath
+ else:
+ copy_to = h
+
+ shutil.copy(copy_from, copy_to)
+ first = True
+ config = 'incremental=(file=' + newfile + ')'
+ dup_cnt = 0
+ incr_c = self.session.open_cursor(None, bkup_c, config)
+
+ # For each file listed, open a duplicate backup cursor and copy the blocks.
+ while True:
+ ret = incr_c.next()
+ if ret != 0:
+ break
+ incrlist = incr_c.get_keys()
+ offset = incrlist[0]
+ size = incrlist[1]
+ curtype = incrlist[2]
+ # 1 is WT_BACKUP_FILE
+ # 2 is WT_BACKUP_RANGE
+ self.assertTrue(curtype == 1 or curtype == 2)
+ if curtype == 1:
+ if first == True:
+ h = self.home_incr + '.' + str(self.counter)
+ first = False
+
+ copy_from = newfile
+ if ("WiredTigerLog" in newfile):
+ copy_to = h + '/' + self.logpath
+ else:
+ copy_to = h
+ shutil.copy(copy_from, copy_to)
+ else:
+ self.pr('Range copy file ' + newfile + ' offset ' + str(offset) + ' len ' + str(size))
+ write_from = newfile
+ write_to = self.home_incr + '.' + str(self.counter) + '/' + newfile
+ rfp = open(write_from, "r+b")
+ wfp = open(write_to, "w+b")
+ rfp.seek(offset, 0)
+ wfp.seek(offset, 0)
+ buf = rfp.read(size)
+ wfp.write(buf)
+ rfp.close()
+ wfp.close()
+ dup_cnt += 1
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ incr_c.close()
+
+ # For each file, we want to copy the file into each of the later incremental directories
+ for i in range(self.counter, self.max_iteration):
+ h = self.home_incr + '.' + str(i)
+ copy_from = newfile
+ if ("WiredTigerLog" in newfile):
+ copy_to = h + '/' + self.logpath
+ else:
+ copy_to = h
+ shutil.copy(copy_from, copy_to)
+ self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
+ bkup_c.close()
+
+ def compare_backups(self, t_uri):
+ #
+ # Run wt dump on full backup directory
+ #
+ full_backup_out = self.full_out + '.' + str(self.counter)
+ home_dir = self.home_full + '.' + str(self.counter)
+ if self.counter == 0:
+ home_dir = self.home
+
+ self.runWt(['-R', '-h', home_dir, 'dump', t_uri], outfilename=full_backup_out)
+ #
+ # Run wt dump on incremental backup directory
+ #
+ incr_backup_out = self.incr_out + '.' + str(self.counter)
+ home_dir = self.home_incr + '.' + str(self.counter)
+ self.runWt(['-R', '-h', home_dir, 'dump', t_uri], outfilename=incr_backup_out)
+
+ self.assertEqual(True,
+ compare_files(self, full_backup_out, incr_backup_out))
+
+ #
+ # Add data to the given uri.
+ #
+ def add_data(self, uri, bulk_option):
+ c = self.session.open_cursor(uri, None, bulk_option)
+ for i in range(0, self.nops):
+ num = i + (self.mult * self.nops)
+ key = self.bigkey + str(num)
+ val = self.bigval + str(num)
+ c[key] = val
+ c.close()
+
+ # Increase the multiplier so that later calls insert unique items.
+ self.mult += 1
+ # Increase the counter so that later backups have unique ids.
+ if self.initial_backup == False:
+ self.counter += 1
+
+ #
+ # Remove data from uri (table:main)
+ #
+ def remove_data(self):
+ c = self.session.open_cursor(self.uri)
+ #
+ # We run the outer loop until mult value to make sure we remove all the inserted records
+ # from the main table.
+ #
+ for i in range(0, self.mult):
+ for j in range(i, self.nops):
+ num = j + (i * self.nops)
+ key = self.bigkey + str(num)
+ c.set_key(key)
+ self.assertEquals(c.remove(), 0)
+ c.close()
+ # Increase the counter so that later backups have unique ids.
+ self.counter += 1
+
+ #
+ # This function will add records to the table (table:main), take incremental/full backups and
+ # validate the backups.
+ #
+ def add_data_validate_backups(self):
+ self.pr('Adding initial data')
+ self.initial_backup = True
+ self.add_data(self.uri, None)
+ self.take_full_backup()
+ self.initial_backup = False
+ self.session.checkpoint()
+
+ self.add_data(self.uri, None)
+ self.take_full_backup()
+ self.take_incr_backup()
+ self.compare_backups(self.uri)
+
+ #
+ # This function will remove all the records from table (table:main), take backup and validate the
+ # backup.
+ #
+ def remove_all_records_validate(self):
+ self.remove_data()
+ self.take_full_backup()
+ self.take_incr_backup()
+ self.compare_backups(self.uri)
+
+ #
+ # This function will drop the existing table uri (table:main) that is part of the backups and
+ # create new table uri2 (table:extra), take incremental backup and validate.
+ #
+ def drop_old_add_new_table(self):
+
+ # Drop main table.
+ self.session.drop(self.uri)
+
+ # Create uri2 (table:extra)
+ self.session.create(self.uri2, "key_format=S,value_format=S")
+
+ self.new_table = True
+ self.add_data(self.uri2, None)
+ self.take_incr_backup()
+
+ table_list = 'tablelist.txt'
+ # Assert if the dropped table (table:main) exists in the incremental folder.
+ self.runWt(['-R', '-h', self.home, 'list'], outfilename=table_list)
+ ret = os.system("grep " + self.uri + " " + table_list)
+ self.assertNotEqual(ret, 0, self.uri + " dropped, but table exists in " + self.home)
+
+ #
+ # This function will create previously dropped table uri (table:main) and add different content to
+ # it, take backups and validate the backups.
+ #
+ def create_dropped_table_add_new_content(self):
+ self.session.create(self.uri, "key_format=S,value_format=S")
+ self.add_data(self.uri, None)
+ self.take_full_backup()
+ self.take_incr_backup()
+ self.compare_backups(self.uri)
+
+ #
+ # This function will insert bulk data in logged and not-logged table, take backups and validate the
+ # backups.
+ #
+ def insert_bulk_data(self):
+ #
+ # Insert bulk data into uri3 (table:logged_table).
+ #
+ self.session.create(self.uri_logged, "key_format=S,value_format=S")
+ self.add_data(self.uri_logged, 'bulk')
+ self.take_full_backup()
+ self.take_incr_backup()
+ self.compare_backups(self.uri_logged)
+
+ #
+ # Insert bulk data into uri4 (table:not_logged_table).
+ #
+ self.session.create(self.uri_not_logged, "key_format=S,value_format=S,log=(enabled=false)")
+ self.add_data(self.uri_not_logged, 'bulk')
+ self.take_full_backup()
+ self.take_incr_backup()
+ self.compare_backups(self.uri_not_logged)
+
+ def test_backup14(self):
+ os.mkdir(self.bkp_home)
+ self.home = self.bkp_home
+ self.session.create(self.uri, "key_format=S,value_format=S")
+
+ self.setup_directories()
+
+ self.pr('*** Add data, checkpoint, take backups and validate ***')
+ self.add_data_validate_backups()
+
+ self.pr('*** Remove old records and validate ***')
+ self.remove_all_records_validate()
+
+ self.pr('*** Drop old and add new table ***')
+ self.drop_old_add_new_table()
+
+ self.pr('*** Create previously dropped table and add new content ***')
+ self.create_dropped_table_add_new_content()
+
+ self.pr('*** Insert data into Logged and Not-Logged tables ***')
+ self.insert_bulk_data()
+
+if __name__ == '__main__':
+ wttest.run()