summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/sharding/change_stream_no_drop.js62
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp11
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp34
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.h6
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp4
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.cpp27
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.h10
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp7
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp7
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h2
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp32
-rw-r--r--src/mongo/db/s/sharding_ddl_util.h14
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp5
-rw-r--r--src/mongo/s/request_types/sharded_ddl_commands.idl5
17 files changed, 174 insertions, 60 deletions
diff --git a/jstests/sharding/change_stream_no_drop.js b/jstests/sharding/change_stream_no_drop.js
new file mode 100644
index 00000000000..d378421cd18
--- /dev/null
+++ b/jstests/sharding/change_stream_no_drop.js
@@ -0,0 +1,62 @@
+/**
+ * DDL coordinator are responsible for dropping temporary collections, especially after failures.
+ * However, the change stream should not be aware of those events.
+ * @tags: [
+ * # Requires all nodes to be running the latest binary.
+ * multiversion_incompatible,
+ * ]
+ */
+function assertNoDrop(changeStream) {
+ while (changeStream.hasNext()) {
+ assert.neq(changeStream.next().operationType, 'drop');
+ }
+}
+
+function emptyChangeStream(changeStream) {
+ while (changeStream.hasNext()) {
+ changeStream.next();
+ }
+}
+
+(function() {
+
+const dbName = 'db';
+
+load('jstests/libs/fail_point_util.js'); // For configureFailPoint
+
+// Enable explicitly the periodic no-op writer to allow the router to process change stream events
+// coming from all shards. This is enabled for production clusters by default.
+const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}},
+ other: {enableBalancer: true}
+});
+
+// create a database and a change stream on it
+jsTest.log('Creating a change stream on ' + dbName);
+assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));
+let changeStream = st.s.getDB('db').watch();
+
+// setFeatureCompatibilityVersion might cause dropping of deprecated collections
+emptyChangeStream(changeStream);
+
+jsTest.log(
+ 'The shard_collection_coordinator at second attempt (after failure) should not report drop events for orphaned');
+{
+ configureFailPoint(st.shard0,
+ 'failAtCommitCreateCollectionCoordinator',
+ data = {},
+ failPointMode = {times: 1});
+
+ collectionName = dbName + '.coll';
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: collectionName, key: {_id: "hashed"}, numInitialChunks: 10}));
+
+ assertNoDrop(changeStream);
+}
+
+st.stop();
+}());
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 7eb8f9cff2e..73ea88abc94 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -66,6 +66,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+MONGO_FAIL_POINT_DEFINE(failAtCommitCreateCollectionCoordinator);
namespace mongo {
@@ -415,8 +416,8 @@ void broadcastDropCollection(OperationContext* opCtx,
const NamespaceString& nss,
const std::shared_ptr<executor::TaskExecutor>& executor,
const OperationSessionInfo& osi) {
- const auto primaryShardId = ShardingState::get(opCtx)->shardId();
const ShardsvrDropCollectionParticipant dropCollectionParticipant(nss);
+ const auto primaryShardId = ShardingState::get(opCtx)->shardId();
auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
// Remove primary shard from participants
@@ -424,7 +425,7 @@ void broadcastDropCollection(OperationContext* opCtx,
participants.end());
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss, participants, executor, osi);
+ opCtx, nss, participants, executor, osi, true /* fromMigrate */);
}
} // namespace
@@ -1208,6 +1209,12 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx,
const std::shared_ptr<executor::TaskExecutor>& executor) {
LOGV2_DEBUG(5277906, 2, "Create collection _commit", "namespace"_attr = nss());
+ if (MONGO_unlikely(failAtCommitCreateCollectionCoordinator.shouldFail())) {
+ LOGV2_DEBUG(6960301, 2, "About to hit failAtCommitCreateCollectionCoordinator fail point");
+ uasserted(ErrorCodes::InterruptedAtShutdown,
+ "failAtCommitCreateCollectionCoordinator fail point");
+ }
+
// Upsert Chunks.
_updateSession(opCtx);
insertChunks(opCtx, _initialChunks->chunks, getCurrentSession());
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index b306ff6fc8d..66334299cff 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/s/drop_collection_coordinator.h"
#include "mongo/db/catalog/collection_uuid_mismatch.h"
+#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/range_deletion_util.h"
@@ -46,8 +47,9 @@
namespace mongo {
-DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
- const NamespaceString& nss) {
+void DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool fromMigrate) {
boost::optional<UUID> collectionUUID;
{
@@ -97,9 +99,15 @@ DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opC
}
}
- DropReply result;
- uassertStatusOK(dropCollection(
- opCtx, nss, &result, DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
+ DropReply unused;
+ if (fromMigrate)
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx, nss, collectionUUID);
+ else
+ uassertStatusOK(
+ dropCollection(opCtx,
+ nss,
+ &unused,
+ DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
// Force the refresh of the catalog cache to purge outdated information
@@ -110,8 +118,6 @@ DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opC
// Ensures the remove of range deletions and the refresh of the catalog cache will be waited for
// majority at the end of the command
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
-
- return result;
}
ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
@@ -210,13 +216,23 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
participants.end());
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss(), participants, **executor, getCurrentSession());
+ opCtx,
+ nss(),
+ participants,
+ **executor,
+ getCurrentSession(),
+ false /*fromMigrate*/);
// The sharded collection must be dropped on the primary shard after it has been
// dropped on all of the other shards to ensure it can only be re-created as
// unsharded with a higher optime than all of the drops.
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss(), {primaryShardId}, **executor, getCurrentSession());
+ opCtx,
+ nss(),
+ {primaryShardId},
+ **executor,
+ getCurrentSession(),
+ false /*fromMigrate*/);
sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss(), boost::none);
diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h
index 765f11286f1..7a8288c4e9c 100644
--- a/src/mongo/db/s/drop_collection_coordinator.h
+++ b/src/mongo/db/s/drop_collection_coordinator.h
@@ -52,8 +52,12 @@ public:
/**
* Locally drops a collection, cleans its CollectionShardingRuntime metadata and refreshes the
* catalog cache.
+ * The oplog entry associated with the drop collection will be generated with the fromMigrate
+ * flag.
*/
- static DropReply dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss);
+ static void dropCollectionLocally(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool fromMigrate);
private:
StringData serializePhase(const Phase& phase) const override {
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index fc7fd244719..fcd78681055 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -143,13 +143,13 @@ void DropDatabaseCoordinator::_dropShardedCollection(
participants.erase(std::remove(participants.begin(), participants.end(), primaryShardId),
participants.end());
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss, participants, **executor, getCurrentSession());
+ opCtx, nss, participants, **executor, getCurrentSession(), false /* fromMigrate */);
// The sharded collection must be dropped on the primary shard after it has been dropped on all
// of the other shards to ensure it can only be re-created as unsharded with a higher optime
// than all of the drops.
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss, {primaryShardId}, **executor, getCurrentSession());
+ opCtx, nss, {primaryShardId}, **executor, getCurrentSession(), false /* fromMigrate */);
// Remove collection's query analyzer configuration document, if it exists.
sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss, coll.getUuid());
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index b907a3e7f26..a9d1f29254e 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -58,7 +58,7 @@ const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss) {
bool knownNss = [&]() {
try {
- DropCollectionCoordinator::dropCollectionLocally(opCtx, nss);
+ DropCollectionCoordinator::dropCollectionLocally(opCtx, nss, false /* fromMigrate */);
return true;
} catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
return false;
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
index 105bec7d644..6b994aa4ed5 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/s/resharding/resharding_txn_cloner_progress_gen.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/session_catalog_migration.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/session/session_catalog_mongod.h"
#include "mongo/db/session/session_txn_record_gen.h"
#include "mongo/db/storage/write_unit_of_work.h"
@@ -70,28 +71,6 @@ void ensureCollectionExists(OperationContext* opCtx,
});
}
-void ensureCollectionDropped(OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<UUID>& uuid) {
- invariant(!opCtx->lockState()->isLocked());
- invariant(!opCtx->lockState()->inAWriteUnitOfWork());
-
- writeConflictRetry(
- opCtx, "resharding::data_copy::ensureCollectionDropped", nss.toString(), [&] {
- AutoGetCollection coll(opCtx, nss, MODE_X);
- if (!coll || (uuid && coll->uuid() != uuid)) {
- // If the collection doesn't exist or exists with a different UUID, then the
- // requested collection has been dropped already.
- return;
- }
-
- WriteUnitOfWork wuow(opCtx);
- uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem(
- opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */));
- wuow.commit();
- });
-}
-
void ensureOplogCollectionsDropped(OperationContext* opCtx,
const UUID& reshardingUUID,
const UUID& sourceUUID,
@@ -118,11 +97,11 @@ void ensureOplogCollectionsDropped(OperationContext* opCtx,
// Drop the conflict stash collection for this donor.
auto stashNss = getLocalConflictStashNamespace(sourceUUID, donor.getShardId());
- ensureCollectionDropped(opCtx, stashNss);
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx, stashNss);
// Drop the oplog buffer collection for this donor.
auto oplogBufferNss = getLocalOplogBufferNamespace(sourceUUID, donor.getShardId());
- ensureCollectionDropped(opCtx, oplogBufferNss);
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx, oplogBufferNss);
}
}
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.h b/src/mongo/db/s/resharding/resharding_data_copy_util.h
index 835f6db1cb6..bf3f21a7118 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.h
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.h
@@ -60,16 +60,6 @@ void ensureCollectionExists(OperationContext* opCtx,
const CollectionOptions& options);
/**
- * Drops the specified collection or returns without error if the collection has already been
- * dropped. A particular incarnation of the collection can be dropped by specifying its UUID.
- *
- * This functions assumes the collection being dropped doesn't have any two-phase index builds
- * active on it.
- */
-void ensureCollectionDropped(OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<UUID>& uuid = boost::none);
-/**
* Removes documents from the oplog applier progress and transaction applier progress collections
* that are associated with an in-progress resharding operation. Also drops all oplog buffer
* collections and conflict stash collections that are associated with the in-progress resharding
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index 3ec02bf34fe..646b2b1f743 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/s/resharding/resharding_future_util.h"
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/db/s/resharding/resharding_util.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/sharding_recovery_service.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/write_block_bypass.h"
@@ -823,7 +824,7 @@ void ReshardingDonorService::DonorStateMachine::_dropOriginalCollectionThenTrans
// Allow bypassing user write blocking. The check has already been performed on the
// db-primary shard's ReshardCollectionCoordinator.
WriteBlockBypass::get(opCtx.get()).set(true);
- resharding::data_copy::ensureCollectionDropped(
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(
opCtx.get(), _metadata.getSourceNss(), _metadata.getSourceUUID());
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index 53c0044ae75..f2ea3c60e4b 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/s/resharding/resharding_donor_service.h"
#include "mongo/db/s/resharding/resharding_service_test_helpers.h"
#include "mongo/db/s/resharding/resharding_util.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/sharding_recovery_service.h"
#include "mongo/db/service_context.h"
#include "mongo/logv2/log.h"
@@ -165,7 +166,8 @@ public:
void createSourceCollection(OperationContext* opCtx, const ReshardingDonorDocument& donorDoc) {
CollectionOptions options;
options.uuid = donorDoc.getSourceUUID();
- resharding::data_copy::ensureCollectionDropped(opCtx, donorDoc.getSourceNss());
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx,
+ donorDoc.getSourceNss());
resharding::data_copy::ensureCollectionExists(opCtx, donorDoc.getSourceNss(), options);
}
@@ -173,7 +175,8 @@ public:
const ReshardingDonorDocument& donorDoc) {
CollectionOptions options;
options.uuid = donorDoc.getReshardingUUID();
- resharding::data_copy::ensureCollectionDropped(opCtx, donorDoc.getTempReshardingNss());
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(
+ opCtx, donorDoc.getTempReshardingNss());
resharding::data_copy::ensureCollectionExists(
opCtx, donorDoc.getTempReshardingNss(), options);
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 00bc2433322..0216f048155 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/s/resharding/resharding_recipient_service_external_state.h"
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/db/s/shard_key_util.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/sharding_recovery_service.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/write_block_bypass.h"
@@ -829,7 +830,7 @@ void ReshardingRecipientService::RecipientStateMachine::_cleanupReshardingCollec
opCtx.get(), _metadata.getReshardingUUID(), _metadata.getSourceUUID(), _donorShards);
if (aborted) {
- resharding::data_copy::ensureCollectionDropped(
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(
opCtx.get(), _metadata.getTempReshardingNss(), _metadata.getReshardingUUID());
}
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 1f21ea9342b..ee2325bf6ba 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/s/resharding/resharding_recipient_service.h"
#include "mongo/db/s/resharding/resharding_recipient_service_external_state.h"
#include "mongo/db/s/resharding/resharding_service_test_helpers.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/service_context.h"
#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/logv2/log.h"
@@ -267,7 +268,8 @@ public:
const ReshardingRecipientDocument& recipientDoc) {
CollectionOptions options;
options.uuid = recipientDoc.getSourceUUID();
- resharding::data_copy::ensureCollectionDropped(opCtx, recipientDoc.getSourceNss());
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(opCtx,
+ recipientDoc.getSourceNss());
resharding::data_copy::ensureCollectionExists(opCtx, recipientDoc.getSourceNss(), options);
}
@@ -275,7 +277,8 @@ public:
const ReshardingRecipientDocument& recipientDoc) {
CollectionOptions options;
options.uuid = recipientDoc.getReshardingUUID();
- resharding::data_copy::ensureCollectionDropped(opCtx, recipientDoc.getTempReshardingNss());
+ mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent(
+ opCtx, recipientDoc.getTempReshardingNss());
resharding::data_copy::ensureCollectionExists(
opCtx, recipientDoc.getTempReshardingNss(), options);
}
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index c404df4445b..a3c44f8ae4c 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -352,7 +352,7 @@ protected:
}
}
- // lazily acqiure Logical Session ID and a txn number
+ // lazily acquire Logical Session ID and a txn number
void _updateSession(OperationContext* opCtx) {
auto newDoc = [&] {
stdx::lock_guard lk{_docMutex};
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 75b43813718..c53e01c1c2f 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands/feature_compatibility_version.h"
+#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
@@ -625,8 +626,11 @@ void sendDropCollectionParticipantCommandToShards(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ShardId>& shardIds,
std::shared_ptr<executor::TaskExecutor> executor,
- const OperationSessionInfo& osi) {
- const ShardsvrDropCollectionParticipant dropCollectionParticipant(nss);
+ const OperationSessionInfo& osi,
+ bool fromMigrate) {
+ ShardsvrDropCollectionParticipant dropCollectionParticipant(nss);
+ dropCollectionParticipant.setFromMigrate(fromMigrate);
+
const auto cmdObj =
CommandHelpers::appendMajorityWriteConcern(dropCollectionParticipant.toBSON({}));
@@ -639,5 +643,29 @@ BSONObj getCriticalSectionReasonForRename(const NamespaceString& from, const Nam
<< "rename"
<< "from" << from.toString() << "to" << to.toString());
}
+
+void ensureCollectionDroppedNoChangeEvent(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID>& uuid) {
+ invariant(!opCtx->lockState()->isLocked());
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork());
+
+ writeConflictRetry(opCtx,
+ "mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent",
+ nss.toString(),
+ [&] {
+ AutoGetCollection coll(opCtx, nss, MODE_X);
+ if (!coll || (uuid && coll->uuid() != uuid)) {
+ // If the collection doesn't exist or exists with a different UUID,
+ // then the requested collection has been dropped already.
+ return;
+ }
+
+ WriteUnitOfWork wuow(opCtx);
+ uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem(
+ opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */));
+ wuow.commit();
+ });
+}
} // namespace sharding_ddl_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h
index d65998494a7..6b907f0e08f 100644
--- a/src/mongo/db/s/sharding_ddl_util.h
+++ b/src/mongo/db/s/sharding_ddl_util.h
@@ -203,9 +203,21 @@ void sendDropCollectionParticipantCommandToShards(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ShardId>& shardIds,
std::shared_ptr<executor::TaskExecutor> executor,
- const OperationSessionInfo& osi);
+ const OperationSessionInfo& osi,
+ bool fromMigrate);
BSONObj getCriticalSectionReasonForRename(const NamespaceString& from, const NamespaceString& to);
+/**
+ * Drops the specified collection or returns without error if the collection has already been
+ * dropped. A particular incarnation of the collection can be dropped by specifying its UUID.
+ *
+ * This functions assumes the collection being dropped doesn't have any two-phase index builds
+ * active on it.
+ */
+void ensureCollectionDroppedNoChangeEvent(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID>& uuid = boost::none);
+
} // namespace sharding_ddl_util
} // namespace mongo
diff --git a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
index ab9849b55ac..a30231f3fd0 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
@@ -87,7 +87,10 @@ public:
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
try {
- DropCollectionCoordinator::dropCollectionLocally(opCtx, ns());
+ bool fromMigrate =
+ request().getFromMigrate() ? request().getFromMigrate().value() : false;
+
+ DropCollectionCoordinator::dropCollectionLocally(opCtx, ns(), fromMigrate);
} catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
LOGV2_DEBUG(5280920,
1,
diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl
index c8ec71bc979..95d62f2113c 100644
--- a/src/mongo/s/request_types/sharded_ddl_commands.idl
+++ b/src/mongo/s/request_types/sharded_ddl_commands.idl
@@ -335,6 +335,11 @@ commands:
api_version: ""
cpp_name: ShardsvrDropCollectionParticipant
strict: false
+ fields:
+ fromMigrate:
+ type: bool
+ description: "Whether the drop comes as a result of an interrupted migration process."
+ optional: true
_shardsvrRenameCollection:
command_name: _shardsvrRenameCollection