summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2023-05-17 13:47:10 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 04:24:30 +0000
commit73eed5057e61272d6c6be53541fb874d6b76c18c (patch)
tree54e5a830538c85cde638d61922fa28a20892bda7
parent0e1191bd32303bf5b973f9ddc4e2893ce5a95c92 (diff)
downloadmongo-73eed5057e61272d6c6be53541fb874d6b76c18c.tar.gz
Import wiredtiger: 60edc6281b4e150f2788af83f0c565e7d590d260 from branch mongodb-7.0
ref: 14dbb66989..60edc6281b for: 7.0.0-rc1 WT-11036 Add test of incremental backup after files are closed (v7.0 backport)
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup29.py159
2 files changed, 112 insertions, 49 deletions
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index ac1949362a3..5307a004812 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-7.0",
- "commit": "14dbb669892bcc40fb5cb6acc95583b8d01a1016"
+ "commit": "60edc6281b4e150f2788af83f0c565e7d590d260"
}
diff --git a/src/third_party/wiredtiger/test/suite/test_backup29.py b/src/third_party/wiredtiger/test/suite/test_backup29.py
index df73d821a07..3b670dd4d80 100644
--- a/src/third_party/wiredtiger/test/suite/test_backup29.py
+++ b/src/third_party/wiredtiger/test/suite/test_backup29.py
@@ -26,26 +26,42 @@
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
-import os, re
+import os, re, time
from wtscenario import make_scenarios
from wtbackup import backup_base
+from wiredtiger import stat
# test_backup29.py
-# Test interaction between restart, checkpoint and incremental backup. There was a bug in
-# maintaining the incremental backup bitmaps correctly across restarts in specific conditions
-# that this test can reproduce.
-#
+# Test interaction between checkpoint and incremental backup. There was a bug in
+# maintaining the incremental backup bitmaps correctly after opening an uncached dhandle.
+# This test reconstructs the failure scenario and verifies correct behavior both when a
+# restart and when dhandle sweep lead to opening an uncached dhandle.
class test_backup29(backup_base):
+ conn_config = 'file_manager=(close_handle_minimum=0,' + \
+ 'close_idle_time=3,close_scan_interval=1),' + \
+ 'statistics=(fast)'
create_config = 'allocation_size=512,key_format=i,value_format=S'
- # Backup directory name
- dir='backup.dir'
- uri = 'test_backup29'
- uri2 = 'test_other'
+ # Backup directory name. Uncomment if actually taking a backup.
+ # dir='backup.dir'
+ uri1 = 'test_first'
+ uri2 = 'test_second'
+ file1_uri = 'file:' + uri1 + '.wt'
+ file2_uri = 'file:' + uri2 + '.wt'
+ table1_uri = 'table:' + uri1
+ table2_uri = 'table:' + uri2
+ active_uri = 'table:active.wt'
+
value_base = '-abcdefghijkl'
few = 100
nentries = 5000
+ def get_open_file_count(self):
+ stat_cursor = self.session.open_cursor('statistics:', None, None)
+ n = stat_cursor[stat.conn.file_open][2]
+ stat_cursor.close()
+ return n
+
def parse_blkmods(self, uri):
meta_cursor = self.session.open_cursor('metadata:')
config = meta_cursor[uri]
@@ -59,30 +75,41 @@ class test_backup29(backup_base):
self.pr("block bitmap: " + blocks)
return blocks
- def test_backup29(self):
- os.mkdir(self.dir)
-
- # Create and populate the table.
- file_uri = 'file:' + self.uri + '.wt'
- file2_uri = 'file:' + self.uri2 + '.wt'
- table_uri = 'table:' + self.uri
- table2_uri = 'table:' + self.uri2
- self.session.create(table_uri, self.create_config)
- self.session.create(table2_uri, self.create_config)
- c = self.session.open_cursor(table_uri)
- c2 = self.session.open_cursor(table2_uri)
+ def compare_bitmap(self, orig, new):
+ # Compare the bitmaps from the metadata. Once a bit is set, it should never
+ # be cleared. But new bits could be set. So the check is only: if the original
+ # bitmap has a bit set then the current bitmap must be set for that bit also.
+ #
+ # First convert both bitmaps to a binary string, accounting for any possible leading
+ # zeroes (that would be truncated off). Then compare bit by bit.
+ orig_bits = bin(int('1'+orig, 16))[3:]
+ new_bits = bin(int('1'+new, 16))[3:]
+ self.pr("Original bitmap in binary: " + orig_bits)
+ self.pr("Reopened bitmap in binary: " + new_bits)
+ for o_bit, n_bit in zip(orig_bits, new_bits):
+ if o_bit != '0':
+ self.assertTrue(n_bit != '0')
+
+ def setup_test(self):
+ # Create and populate the tables.
+ self.session.create(self.table1_uri, self.create_config)
+ self.session.create(self.table2_uri, self.create_config)
+ c1 = self.session.open_cursor(self.table1_uri)
+ c2 = self.session.open_cursor(self.table2_uri)
# Only add a few entries.
self.pr("Write: " + str(self.few) + " initial data items")
for i in range(1, self.few):
val = str(i) + self.value_base
- c[i] = val
+ c1[i] = val
c2[i] = val
self.session.checkpoint()
# Take the initial full backup for incremental.
config = 'incremental=(enabled,granularity=4k,this_id="ID1")'
bkup_c = self.session.open_cursor('backup:', None, config)
- self.take_full_backup(self.dir, bkup_c)
+ # Uncomment these lines if actually taking the full backup is helpful for debugging.
+ # os.mkdir(self.dir)
+ # self.take_full_backup(self.dir, bkup_c)
bkup_c.close()
# Add a lot more data to both tables to generate a filled in block mod bitmap.
@@ -90,52 +117,88 @@ class test_backup29(backup_base):
self.pr("Write: " + str(self.nentries) + " additional data items")
for i in range(self.few, self.nentries):
val = str(i) + self.value_base
- c[i] = val
+ c1[i] = val
c2[i] = val
- last_i = self.nentries
- c.close()
+ c1.close()
c2.close()
self.session.checkpoint()
# Get the block mod bitmap from the file URI.
- orig_bitmap = self.parse_blkmods(file2_uri)
- self.pr("CLOSE and REOPEN conn")
- self.reopen_conn()
- self.pr("Reopened conn")
+ self.orig1_bitmap = self.parse_blkmods(self.file1_uri)
+ self.orig2_bitmap = self.parse_blkmods(self.file2_uri)
+
+ def incr_backup_and_validate(self):
# After reopening we want to open both tables, but only modify one of them for
# the first checkpoint. Then modify the other table, checkpoint, and then check the
# that the block mod bitmap remains correct for the other table.
- c = self.session.open_cursor(table_uri)
- c2 = self.session.open_cursor(table2_uri)
+ c1 = self.session.open_cursor(self.table1_uri)
+ c2 = self.session.open_cursor(self.table2_uri)
+ last_i = self.nentries
# Change one table and checkpoint. Keep the other table clean.
self.pr("Update only table 1: " + str(last_i))
val = str(last_i) + self.value_base
- c[last_i] = val
+ c1[last_i] = val
self.session.checkpoint()
+ new1_bitmap = self.parse_blkmods(self.file1_uri)
# Now change the other table and checkpoint again.
self.pr("Update second table: " + str(last_i))
c2[last_i] = val
self.session.checkpoint()
- new_bitmap = self.parse_blkmods(file2_uri)
+ new2_bitmap = self.parse_blkmods(self.file2_uri)
- c.close()
+ c1.close()
c2.close()
- # Compare the bitmaps from the metadata. Once a bit is set, it should never
- # be cleared. But new bits could be set. So the check is only: if the original
- # bitmap has a bit set then the current bitmap must be set for that bit also.
- #
- # First convert both bitmaps to a binary string, accounting for any possible leading
- # zeroes (that would be truncated off). Then compare bit by bit.
- orig_bits = bin(int('1'+orig_bitmap, 16))[3:]
- new_bits = bin(int('1'+new_bitmap, 16))[3:]
- self.pr("Original bitmap in binary: " + orig_bits)
- self.pr("Reopened bitmap in binary: " + new_bits)
- for orig, new in zip(orig_bits, new_bits):
- if orig != '0':
- self.assertTrue(new != '0')
+ self.compare_bitmap(self.orig1_bitmap, new1_bitmap)
+ self.compare_bitmap(self.orig2_bitmap, new2_bitmap)
+
+ def test_backup29_reopen(self):
+ self.setup_test()
+
+ self.pr("CLOSE and REOPEN conn")
+ self.reopen_conn()
+ self.pr("Reopened conn")
+
+ self.incr_backup_and_validate()
+
+ def test_backup29_sweep(self):
+ self.setup_test()
+
+ self.pr("Waiting to sweep handles")
+ # Create another table and populate it, and checkpoint.
+ self.session.create(self.active_uri, self.create_config)
+ c = self.session.open_cursor(self.active_uri)
+ for i in range(1, self.few):
+ c[i] = str(i) + self.value_base
+ self.session.checkpoint()
+
+ sleep = 0
+ max = 20
+ # The only files sweep won't close should be the metadata, the history store, the
+ # lock file, the statistics file, and our active file.
+ final_nfile = 5
+
+ # Keep updating and checkpointing this table until all other handles have been swept.
+ # The checkpoints have the side effect of sweeping the session cache, which will allow
+ # dhandles to be closed.
+ while sleep < max:
+ i = i + 1
+ c[i] = str(i) + self.value_base
+ self.session.checkpoint()
+ sleep += 0.5
+ time.sleep(0.5)
+ nfile = self.get_open_file_count()
+ if nfile == final_nfile:
+ break
+ c.close()
+
+ # Make sure we swept everything before we ran out of time.
+ self.assertEqual(nfile, final_nfile)
+ self.pr("Sweep done")
+
+ self.incr_backup_and_validate()
if __name__ == '__main__':
wttest.run()