summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2022-12-09 13:02:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-12-21 08:40:43 +0000
commitfed81c016c212a7a644266f478734a1f9450adb8 (patch)
tree80e22c18e0858b97c77cfb0cfd47ef89e9745af5
parent70bbc40c52b10395d0c54d63e4f7c35abadd8756 (diff)
downloadmongo-fed81c016c212a7a644266f478734a1f9450adb8.tar.gz
SERVER-71689 Refresh the catalogCache after dropping a collection
(cherry picked from commit a92c4effc843cde56ddb0903c36ba10e2fbfb283)
-rw-r--r--etc/backports_required_for_multiversion_tests.yml4
-rw-r--r--jstests/sharding/drop_collection.js70
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp39
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp24
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp13
5 files changed, 82 insertions, 68 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 9b9a1a149c2..b09846c6984 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -232,6 +232,8 @@ last-continuous:
ticket: SERVER-69861
- test_file: jstests/sharding/query/shard_refuses_cursor_ownership.js
ticket: SERVER-65259
+ - test_file: jstests/sharding/drop_collection.js
+ ticket: SERVER-71689
suites: null
last-lts:
all:
@@ -541,4 +543,6 @@ last-lts:
ticket: SERVER-69861
- test_file: jstests/sharding/query/shard_refuses_cursor_ownership.js
ticket: SERVER-65259
+ - test_file: jstests/sharding/drop_collection.js
+ ticket: SERVER-71689
suites: null
diff --git a/jstests/sharding/drop_collection.js b/jstests/sharding/drop_collection.js
index f07ced08ae5..0947b56124a 100644
--- a/jstests/sharding/drop_collection.js
+++ b/jstests/sharding/drop_collection.js
@@ -32,17 +32,29 @@ function assertCollectionDropped(ns, uuid = null) {
configDB.tags.countDocuments({ns: ns}),
"Found unexpected tag for a collection after drop.");
+ // Sharding metadata checks
+
+ // No more coll entry
+ assert.eq(null, st.s.getCollection(ns).exists());
+ assert.eq(0,
+ configDB.collections.countDocuments({_id: ns}),
+ "Found collection entry in 'config.collection' after drop.");
+
// No more chunks
- const errMsg = "Found collection entry in 'config.collection' after drop.";
- // Before 5.0 chunks were indexed by ns, now by uuid
- assert.eq(0, configDB.chunks.countDocuments({ns: ns}), errMsg);
if (uuid != null) {
- assert.eq(0, configDB.chunks.countDocuments({uuid: uuid}), errMsg);
+ assert.eq(0,
+ configDB.chunks.countDocuments({uuid: uuid}),
+ "Found references to collection uuid in 'config.chunks' after drop.");
}
- // No more coll entry
- assert.eq(null, st.s.getCollection(ns).exists());
- assert.eq(0, configDB.collections.countDocuments({_id: ns}));
+ // Verify that persisted cached metadata was removed as part of the dropCollection
+ const chunksCollName = 'cache.chunks.' + ns;
+ for (let configDb of [st.shard0.getDB('config'), st.shard1.getDB('config')]) {
+ assert.eq(configDb['cache.collections'].countDocuments({_id: ns}),
+ 0,
+ "Found collection entry in 'config.cache.collections' after drop.");
+ assert(!configDb[chunksCollName].exists());
+ }
}
jsTest.log("Drop unsharded collection.");
@@ -74,10 +86,11 @@ jsTest.log("Drop unsharded collection also remove tags.");
assert.commandWorked(db.runCommand({drop: coll.getName()}));
assertCollectionDropped(coll.getFullName());
}
+
jsTest.log("Drop sharded collection repeated.");
{
const db = getNewDb();
- const coll = db['unshardedColl0'];
+ const coll = db['shardedColl0'];
// Create the database
assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()}));
for (var i = 0; i < 3; i++) {
@@ -85,9 +98,11 @@ jsTest.log("Drop sharded collection repeated.");
assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {x: 1}}));
assert.commandWorked(coll.insert({x: 123}));
assert.eq(1, coll.countDocuments({x: 123}));
+
// Drop the collection
+ var uuid = getCollectionUUID(coll.getFullName());
assert.commandWorked(db.runCommand({drop: coll.getName()}));
- assertCollectionDropped(coll.getFullName());
+ assertCollectionDropped(coll.getFullName(), uuid);
}
}
@@ -245,7 +260,9 @@ jsTest.log("Move primary with drop and recreate - new primary own chunks.");
configDB, coll.getFullName(), {shard: st.shard1.shardName}));
jsTest.log("Drop sharded collection");
+ var uuid = getCollectionUUID(coll.getFullName());
coll.drop();
+ assertCollectionDropped(coll.getFullName(), uuid);
jsTest.log("Re-Create sharded collection with one chunk on shard 0");
st.shardColl(coll, {skey: 1}, false, false);
@@ -257,11 +274,12 @@ jsTest.log("Move primary with drop and recreate - new primary own chunks.");
st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
jsTest.log("Drop sharded collection");
+ uuid = getCollectionUUID(coll.getFullName());
coll.drop();
+ assertCollectionDropped(coll.getFullName(), uuid);
}
-jsTest.log(
- "Test that dropping a non-sharded collection, relevant events are properly logged on CSRS");
+jsTest.log("Test that dropping an unsharded collection, relevant events are logged on CSRS.");
{
// Create a non-sharded collection
const db = getNewDb();
@@ -270,6 +288,7 @@ jsTest.log(
// Drop the collection
assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName());
// Verify that the drop collection start event has been logged
const startLogCount =
@@ -282,7 +301,7 @@ jsTest.log(
assert.gte(1, endLogCount, "dropCollection end event not found in changelog");
}
-jsTest.log("Test that dropping a sharded collection, relevant events are properly logged on CSRS");
+jsTest.log("Test that dropping a sharded collection, relevant events are logged on CSRS.");
{
// Create a sharded collection
const db = getNewDb();
@@ -299,7 +318,9 @@ jsTest.log("Test that dropping a sharded collection, relevant events are properl
assert.commandWorked(coll.insert({_id: -10}));
// Drop the collection
+ const uuid = getCollectionUUID(coll.getFullName());
assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName(), uuid);
// Verify that the drop collection start event has been logged
const startLogCount =
@@ -312,7 +333,7 @@ jsTest.log("Test that dropping a sharded collection, relevant events are properl
assert.gte(1, endLogCount, "dropCollection end event not found in changelog");
}
-jsTest.log("Test that dropping a sharded collection, the cached metadata on shards is cleaned up");
+jsTest.log("Test that dropping a sharded collection, the cached metadata on shards is cleaned up.");
{
// Create a sharded collection
const db = getNewDb();
@@ -320,24 +341,21 @@ jsTest.log("Test that dropping a sharded collection, the cached metadata on shar
assert.commandWorked(
st.s.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName}));
assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- // Distribute the chunks among the shards
- assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
assert.commandWorked(coll.insert({_id: 10}));
- assert.commandWorked(coll.insert({_id: -10}));
- // Get the chunks cache collection name
- const configCollDoc = st.s0.getDB('config').collections.findOne({_id: coll.getFullName()});
- const chunksCollName = 'cache.chunks.' + coll.getFullName();
+ // At this point only one shard has valid filtering information (i.e. the one that has data).
+ // Below we are forcing to all shards to have their valid filtering information. For the shard
+ // that doesn't own any data, that means having a filtering information stating that no chunks
+ // are owned by that shard.
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: coll.getFullName()}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: coll.getFullName()}));
// Drop the collection
+ const uuid = getCollectionUUID(coll.getFullName());
assert.commandWorked(db.runCommand({drop: coll.getName()}));
-
- // Verify that the cached metadata on shards is cleaned up
- for (let configDb of [st.shard0.getDB('config'), st.shard1.getDB('config')]) {
- assert.eq(configDb['cache.collections'].countDocuments({_id: coll.getFullName()}), 0);
- assert(!configDb[chunksCollName].exists());
- }
+ assertCollectionDropped(coll.getFullName(), uuid);
}
st.stop();
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 76b86dfeec8..11c6fe8d601 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -44,6 +44,24 @@
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
namespace mongo {
+namespace {
+
+void dropCollectionHonouringFromMigrateFlag(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool fromMigrate) {
+ if (fromMigrate) {
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx, nss);
+ } else {
+ DropReply unused;
+ uassertStatusOK(
+ dropCollection(opCtx,
+ nss,
+ &unused,
+ DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
+ }
+}
+
+} // namespace
DropCollectionCoordinator::DropCollectionCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
@@ -87,17 +105,18 @@ void DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
csr->clearFilteringMetadataForDroppedCollection(opCtx);
}
- DropReply unused;
- if (fromMigrate)
- mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx, nss);
- else
- uassertStatusOK(
- dropCollection(opCtx,
- nss,
- &unused,
- DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
+ try {
+ dropCollectionHonouringFromMigrateFlag(opCtx, nss, fromMigrate);
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // Note that even if the namespace was not found we have to execute the code below!
+ LOGV2_DEBUG(5280920,
+ 1,
+ "Namespace not found while trying to delete local collection",
+ "namespace"_attr = nss);
+ }
- // Force the refresh of the catalog cache to purge outdated information
+ // Force the refresh of the catalog cache to purge outdated information. Note also that this
+ // code is indirectly used to notify to secondary nodes to clear their filtering information.
const auto catalog = Grid::get(opCtx)->catalogCache();
uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, nss));
CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, nss);
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index a7f27d01bb9..fd456ccf9fd 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -59,29 +59,11 @@ const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
* Drop the collection locally and clear stale metadata from cache collections.
*/
void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss) {
- bool knownNss = [&]() {
- try {
- DropCollectionCoordinator::dropCollectionLocally(opCtx, nss, false /* fromMigrate */);
- return true;
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- return false;
- }
- }();
-
+ DropCollectionCoordinator::dropCollectionLocally(opCtx, nss, false /* fromMigrate */);
LOGV2_DEBUG(5515100,
1,
- "Dropped target collection locally on renameCollection participant",
- "namespace"_attr = nss,
- "collectionExisted"_attr = knownNss);
-}
-
-/* Clear the CollectionShardingRuntime entry for the specified namespace */
-void clearFilteringMetadata(OperationContext* opCtx, const NamespaceString& nss) {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- Lock::DBLock dbLock(opCtx, nss.db(), MODE_IX);
- Lock::CollectionLock collLock(opCtx, nss, MODE_IX);
- auto* csr = CollectionShardingRuntime::get(opCtx, nss);
- csr->clearFilteringMetadata(opCtx);
+ "Dropped target collection locally on renameCollection participant.",
+ "namespace"_attr = nss);
}
/*
diff --git a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
index 950f2d296d7..dbfd5d1a3ef 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
@@ -75,17 +75,8 @@ public:
opCtx->setAlwaysInterruptAtStepDownOrUp();
- try {
- bool fromMigrate =
- request().getFromMigrate() ? request().getFromMigrate().value() : false;
-
- DropCollectionCoordinator::dropCollectionLocally(opCtx, ns(), fromMigrate);
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- LOGV2_DEBUG(5280920,
- 1,
- "Namespace not found while trying to delete local collection",
- "namespace"_attr = ns());
- }
+ bool fromMigrate = request().getFromMigrate().value_or(false);
+ DropCollectionCoordinator::dropCollectionLocally(opCtx, ns(), fromMigrate);
// The txnParticipant will only be missing when the command was sent from a coordinator
// running an old 5.0.0 binary that didn't attach a sessionId & txnNumber.