summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2022-05-16 00:12:52 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-16 00:39:52 +0000
commit136ee14d03553882ee2c763105df831c59e09e5b (patch)
tree4a3ef42653ba68722ac524d63e76e26386b2d1f8 /src
parent3e37098fa76e4882b502bdae02f5341b6d373b68 (diff)
downloadmongo-136ee14d03553882ee2c763105df831c59e09e5b.tar.gz
Import wiredtiger: beb7a695734861b78c3540eba0a055c1c2c7d41a from branch mongodb-master
ref: 91d67b193c..beb7a69573 for: 6.1.0-rc0 WT-9307 Recreating a dropped tiered table that was never flushed panics
Diffstat (limited to 'src')
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_drop.c2
-rw-r--r--src/third_party/wiredtiger/test/suite/test_tiered07.py26
3 files changed, 23 insertions, 7 deletions
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index 1dc607c9967..16deeefa558 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "91d67b193cb2981edfa5fc9e3bba1442d340c022"
+ "commit": "beb7a695734861b78c3540eba0a055c1c2c7d41a"
}
diff --git a/src/third_party/wiredtiger/src/schema/schema_drop.c b/src/third_party/wiredtiger/src/schema/schema_drop.c
index 1ce21a7916f..5411f39fe95 100644
--- a/src/third_party/wiredtiger/src/schema/schema_drop.c
+++ b/src/third_party/wiredtiger/src/schema/schema_drop.c
@@ -217,6 +217,7 @@ __drop_tiered(WT_SESSION_IMPL *session, const char *uri, bool force, const char
WT_PREFIX_SKIP_REQUIRED(session, filename, "file:");
WT_ERR(__wt_meta_track_drop(session, filename));
}
+ tiered->tiers[WT_TIERED_INDEX_LOCAL].tier = NULL;
}
/* Close any dhandle and remove any tier: entry from metadata. */
@@ -228,6 +229,7 @@ __drop_tiered(WT_SESSION_IMPL *session, const char *uri, bool force, const char
session, ret = __wt_conn_dhandle_close_all(session, tier->name, true, force)));
WT_ERR(ret);
WT_ERR(__wt_metadata_remove(session, tier->name));
+ tiered->tiers[WT_TIERED_INDEX_SHARED].tier = NULL;
}
/*
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered07.py b/src/third_party/wiredtiger/test/suite/test_tiered07.py
index a7636121c57..07425a0a2be 100644
--- a/src/third_party/wiredtiger/test/suite/test_tiered07.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered07.py
@@ -40,8 +40,11 @@ class test_tiered07(wttest.WiredTigerTestCase, TieredConfigMixin):
# is interpreting a directory to end in a '/', whereas the code in the tiered storage doesn't
# expect that. Enable when fixed.
# Make scenarios for different cloud service providers
+ flush_obj = [('ckpt', dict(first_ckpt=True)),
+ ('no_ckpt', dict(first_ckpt=False)),
+ ]
tiered_storage_dirstore_source = storage_sources[:1]
- scenarios = make_scenarios(tiered_storage_dirstore_source)
+ scenarios = make_scenarios(flush_obj, tiered_storage_dirstore_source)
uri = "table:abc"
uri2 = "table:ab"
@@ -94,9 +97,12 @@ class test_tiered07(wttest.WiredTigerTestCase, TieredConfigMixin):
c = self.session.open_cursor(self.localuri)
c["0"] = "0"
c.close()
- self.session.checkpoint()
+ if (self.first_ckpt):
+ self.session.checkpoint()
self.pr('After data, call flush_tier')
self.session.flush_tier(None)
+ if (not self.first_ckpt):
+ self.session.checkpoint()
# Drop table.
self.pr('call drop')
@@ -110,6 +116,7 @@ class test_tiered07(wttest.WiredTigerTestCase, TieredConfigMixin):
self.assertFalse(os.path.isfile("abc-0000000002.wtobj"))
# Dropping a table using the force setting should succeed even if the table does not exist.
+ self.pr('drop with force')
self.session.drop(self.localuri, 'force=true')
self.session.drop(self.uri, 'force=true')
@@ -122,10 +129,17 @@ class test_tiered07(wttest.WiredTigerTestCase, TieredConfigMixin):
lambda: self.session.drop("table:random_non_existent", None))
# Create new table with same name. This should error.
- msg = "/already exists/"
- self.pr('check cannot create with same name')
- self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
- lambda:self.assertEquals(self.session.create(self.uri, 'key_format=S'), 0), msg)
+ self.session.create(self.newuri, 'key_format=S')
+
+ # If we didn't do a checkpoint before the flush_tier then creating with the same name
+ # will succeed because no bucket objects were created.
+ if (self.first_ckpt):
+ msg = "/already exists/"
+ self.pr('check cannot create with same name')
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda:self.assertEquals(self.session.create(self.uri, 'key_format=S'), 0), msg)
+ else:
+ self.session.create(self.uri, 'key_format=S')
# Make sure there was no problem with overlapping table names.
self.pr('check original similarly named tables')