summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-04-28 18:04:24 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-04-28 18:30:32 +0000
commitfa47fcd4c4aeb16c644918886ed0337a88ad6007 (patch)
treea5727bc0ded966944159e45ea68954a02654e988 /src
parent75a84d831d04c13ee6d0578f2b22b01e234c9737 (diff)
downloadmongo-fa47fcd4c4aeb16c644918886ed0337a88ad6007.tar.gz
SERVER-55151 Implement rename participant primary only service
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/mongod_main.cpp2
-rw-r--r--src/mongo/db/namespace_string.cpp3
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/db/s/SConscript3
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp78
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.h2
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp367
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.h174
-rw-r--r--src/mongo/db/s/sharded_rename_collection.idl (renamed from src/mongo/db/s/rename_collection_coordinator_document.idl)64
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp28
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection_command.cpp12
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp120
-rw-r--r--src/mongo/s/request_types/sharded_ddl_commands.idl22
13 files changed, 724 insertions, 154 deletions
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 87ef0c7a245..cd4ae044047 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -139,6 +139,7 @@
#include "mongo/db/s/migration_util.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/s/periodic_sharded_index_consistency_checker.h"
+#include "mongo/db/s/rename_collection_participant_service.h"
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/db/s/resharding/resharding_donor_service.h"
#include "mongo/db/s/resharding/resharding_op_observer.h"
@@ -317,6 +318,7 @@ void registerPrimaryOnlyServices(ServiceContext* serviceContext) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
services.push_back(std::make_unique<ReshardingCoordinatorService>(serviceContext));
} else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ services.push_back(std::make_unique<RenameCollectionParticipantService>(serviceContext));
services.push_back(std::make_unique<ShardingDDLCoordinatorService>(serviceContext));
services.push_back(std::make_unique<ReshardingDonorService>(serviceContext));
services.push_back(std::make_unique<ReshardingRecipientService>(serviceContext));
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 3e4ebaca039..d8e0ad191f4 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -107,6 +107,9 @@ const NamespaceString NamespaceString::kRecipientReshardingOperationsNamespace(
const NamespaceString NamespaceString::kShardingDDLCoordinatorsNamespace(
NamespaceString::kConfigDb, "system.sharding_ddl_coordinators");
+const NamespaceString NamespaceString::kShardingRenameParticipantsNamespace(
+ NamespaceString::kConfigDb, "localRenameParticipants");
+
const NamespaceString NamespaceString::kConfigSettingsNamespace(NamespaceString::kConfigDb,
"settings");
const NamespaceString NamespaceString::kVectorClockNamespace(NamespaceString::kConfigDb,
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 5545129a4ff..7b3a78f1120 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -157,6 +157,9 @@ public:
// Namespace for persisting sharding DDL coordinators state documents
static const NamespaceString kShardingDDLCoordinatorsNamespace;
+ // Namespace for persisting sharding DDL rename participant state documents
+ static const NamespaceString kShardingRenameParticipantsNamespace;
+
// Namespace for balancer settings and default read and write concerns.
static const NamespaceString kConfigSettingsNamespace;
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 36f54afd5ba..30667281232 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -337,7 +337,8 @@ env.Library(
'move_chunk_command.cpp',
'move_primary_coordinator.cpp',
'rename_collection_coordinator.cpp',
- 'rename_collection_coordinator_document.idl',
+ 'sharded_rename_collection.idl',
+ 'rename_collection_participant_service.cpp',
'refine_collection_shard_key_coordinator.cpp',
'reshard_collection_coordinator.cpp',
'resharding_test_commands.cpp',
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index aab8f99b31b..c80d696e8a0 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/persistent_task_store.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state.h"
@@ -79,7 +80,7 @@ void RenameCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) con
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Another rename collection for namespace " << nss()
- << "is being executed with different parameters: " << selfReq,
+ << " is being executed with different parameters: " << selfReq,
SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq));
}
@@ -174,12 +175,15 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// Make sure the source collection exists
const auto optSourceCollType = getShardedCollection(opCtx, nss());
const bool sourceIsSharded = (bool)optSourceCollType;
+
if (sourceIsSharded) {
uassert(ErrorCodes::CommandFailed,
str::stream() << "Source and destination collections must be on the "
"same database because "
<< fromNss << " is sharded.",
fromNss.db() == toNss.db());
+ _doc.setOptShardedCollInfo(optSourceCollType);
+ _doc.setSourceUUID(optSourceCollType->getUuid());
} else {
Lock::DBLock dbLock(opCtx, fromNss.db(), MODE_IS);
Lock::CollectionLock collLock(opCtx, fromNss, MODE_IS);
@@ -193,9 +197,9 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
if (fromNss.db() != toNss.db()) {
sharding_ddl_util::checkDbPrimariesOnTheSameShard(opCtx, fromNss, toNss);
}
- }
- _doc.setOptShardedCollInfo(optSourceCollType);
+ _doc.setSourceUUID(sourceCollPtr->uuid());
+ }
// Make sure the target namespace is not a view
{
@@ -231,36 +235,38 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
BSON("source" << fromNss.toString() << "destination" << toNss.toString()),
ShardingCatalogClient::kMajorityWriteConcern);
}))
- .then(_executePhase(
- Phase::kBlockCRUDAndRename,
- [this, executor = executor, anchor = shared_from_this()] {
- auto opCtxHolder = cc().makeOperationContext();
- auto* opCtx = opCtxHolder.get();
- getForwardableOpMetadata().setOn(opCtx);
-
- const auto& fromNss = nss();
-
- // On participant shards:
- // - Block CRUD on source and target collection in case at least one
- // of such collections is currently sharded.
- // - Locally drop the target collection
- // - Locally rename source to target
- ShardsvrRenameCollectionParticipant renameCollParticipantRequest(fromNss);
- renameCollParticipantRequest.setDbName(fromNss.db());
- renameCollParticipantRequest.setRenameCollectionRequest(
- _doc.getRenameCollectionRequest());
-
- auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
- // We need to send the command to all the shards because both movePrimary and
- // moveChunk leave garbage behind for sharded collections.
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx,
- fromNss.db(),
- CommandHelpers::appendMajorityWriteConcern(
- renameCollParticipantRequest.toBSON({})),
- participants,
- **executor);
- }))
+ .then(_executePhase(Phase::kBlockCRUDAndRename,
+ [this, executor = executor, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ getForwardableOpMetadata().setOn(opCtx);
+
+ const auto& fromNss = nss();
+
+ // On participant shards:
+ // - Block CRUD on source and target collection in case at least one
+ // of such collections is currently sharded.
+ // - Locally drop the target collection
+ // - Locally rename source to target
+ ShardsvrRenameCollectionParticipant renameCollParticipantRequest(
+ fromNss, _doc.getSourceUUID().get());
+ renameCollParticipantRequest.setDbName(fromNss.db());
+ renameCollParticipantRequest.setRenameCollectionRequest(
+ _doc.getRenameCollectionRequest());
+
+ auto participants =
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
+ // We need to send the command to all the shards because both
+ // movePrimary and moveChunk leave garbage behind for sharded
+ // collections.
+ sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx,
+ fromNss.db(),
+ CommandHelpers::appendMajorityWriteConcern(
+ renameCollParticipantRequest.toBSON({})),
+ participants,
+ **executor);
+ }))
.then(_executePhase(
Phase::kRenameMetadata,
[this, anchor = shared_from_this()] {
@@ -289,9 +295,11 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// On participant shards:
// - Unblock CRUD on participants for both source and destination collections
- ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest(fromNss);
+ ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest(
+ fromNss, _doc.getSourceUUID().get());
unblockParticipantRequest.setDbName(fromNss.db());
- unblockParticipantRequest.setTo(_doc.getTo());
+ unblockParticipantRequest.setRenameCollectionRequest(
+ _doc.getRenameCollectionRequest());
auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
sharding_ddl_util::sendAuthenticatedCommandToShards(
diff --git a/src/mongo/db/s/rename_collection_coordinator.h b/src/mongo/db/s/rename_collection_coordinator.h
index ca1ef068f07..e753b946693 100644
--- a/src/mongo/db/s/rename_collection_coordinator.h
+++ b/src/mongo/db/s/rename_collection_coordinator.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/db/s/rename_collection_coordinator_document_gen.h"
+#include "mongo/db/s/sharded_rename_collection_gen.h"
#include "mongo/db/s/sharding_ddl_coordinator.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
new file mode 100644
index 00000000000..a15c8ed4e07
--- /dev/null
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -0,0 +1,367 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/base/checked_cast.h"
+#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/rename_collection.h"
+#include "mongo/db/persistent_task_store.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/database_sharding_state.h"
+#include "mongo/db/s/operation_sharding_state.h"
+#include "mongo/db/s/range_deletion_util.h"
+#include "mongo/db/s/rename_collection_participant_service.h"
+#include "mongo/db/s/shard_metadata_util.h"
+#include "mongo/db/s/sharding_ddl_util.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/grid.h"
+
+namespace mongo {
+
+namespace {
+
+/*
+ * Drop the collection locally and clear stale metadata from cache collections.
+ */
+void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss) {
+ bool knownNss = [&]() {
+ try {
+ sharding_ddl_util::dropCollectionLocally(opCtx, nss);
+ return true;
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ return false;
+ }
+ }();
+
+ if (knownNss) {
+ uassertStatusOK(shardmetadatautil::dropChunksAndDeleteCollectionsEntry(opCtx, nss));
+ }
+
+ LOGV2_DEBUG(5515100,
+ 1,
+ "Dropped target collection locally on renameCollection participant",
+ "namespace"_attr = nss,
+ "collectionExisted"_attr = knownNss);
+}
+
+/*
+ * Rename the collection if exists locally, otherwise simply drop the target collection.
+ */
+void renameOrDropTarget(OperationContext* opCtx,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
+ const RenameCollectionOptions& options,
+ const UUID& sourceUUID) {
+ {
+ Lock::DBLock dbLock(opCtx, toNss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx, toNss, MODE_IS);
+ const auto targetCollPtr =
+ CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, toNss);
+ if (targetCollPtr && targetCollPtr->uuid() == sourceUUID) {
+ // Early return if the rename previously succeeded
+ return;
+ }
+ }
+
+ try {
+ validateAndRunRenameCollection(opCtx, fromNss, toNss, options);
+ } catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // It's ok for a participant shard to have no knowledge about a collection
+ LOGV2_DEBUG(5515101,
+ 1,
+ "Source namespace not found while trying to rename collection on participant",
+ "namespace"_attr = fromNss);
+ dropCollectionLocally(opCtx, toNss);
+ deleteRangeDeletionTasksForRename(opCtx, fromNss, toNss);
+ }
+}
+} // namespace
+
+RenameCollectionParticipantService* RenameCollectionParticipantService::getService(
+ OperationContext* opCtx) {
+ auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext());
+ auto service = registry->lookupServiceByName(kServiceName);
+ return checked_cast<RenameCollectionParticipantService*>(std::move(service));
+}
+
+std::shared_ptr<RenameCollectionParticipantService::Instance>
+RenameCollectionParticipantService::constructInstance(BSONObj initialState) {
+ LOGV2_DEBUG(5515102,
+ 2,
+ "Constructing new rename participant",
+ "renameParticipantDoc"_attr = initialState);
+ return std::make_shared<RenameParticipantInstance>(std::move(initialState));
+}
+
+RenameParticipantInstance::~RenameParticipantInstance() {
+ invariant(_unblockCRUDPromise.getFuture().isReady());
+}
+
+bool RenameParticipantInstance::hasSameOptions(const BSONObj& participantDoc) {
+ const auto otherDoc = RenameCollectionParticipantDocument::parse(
+ IDLParserErrorContext("RenameCollectionParticipantDocument"), participantDoc);
+
+ const auto& selfReq = _doc.getRenameCollectionRequest().toBSON();
+ const auto& otherReq = otherDoc.getRenameCollectionRequest().toBSON();
+
+ return SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq);
+}
+
+boost::optional<BSONObj> RenameParticipantInstance::reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
+
+ BSONObjBuilder cmdBob;
+ if (const auto& optComment = _doc.getForwardableOpMetadata().getComment()) {
+ cmdBob.append(optComment.get().firstElement());
+ }
+ BSONObjBuilder bob;
+ bob.append("type", "op");
+ bob.append("desc", "RenameParticipantInstance");
+ bob.append("op", "command");
+ bob.append("ns", fromNss().toString());
+ bob.append("to", toNss().toString());
+ bob.append("command", cmdBob.obj());
+ bob.append("currentPhase", _doc.getPhase());
+ bob.append("active", true);
+ return bob.obj();
+}
+
+void RenameParticipantInstance::_enterPhase(Phase newPhase) {
+ StateDoc newDoc(_doc);
+ newDoc.setPhase(newPhase);
+
+ LOGV2_DEBUG(5515104,
+ 2,
+ "Rename participant phase transition",
+ "fromNs"_attr = fromNss(),
+ "toNs"_attr = toNss(),
+ "newPhase"_attr = RenameCollectionParticipantPhase_serializer(newDoc.getPhase()),
+ "oldPhase"_attr = RenameCollectionParticipantPhase_serializer(_doc.getPhase()));
+
+ auto opCtx = cc().makeOperationContext();
+ PersistentTaskStore<StateDoc> store(NamespaceString::kShardingRenameParticipantsNamespace);
+
+ if (_doc.getPhase() == Phase::kUnset) {
+ store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcern);
+ } else {
+ store.update(opCtx.get(),
+ BSON(StateDoc::kFromNssFieldName << fromNss().ns()),
+ newDoc.toBSON(),
+ WriteConcerns::kMajorityWriteConcern);
+ }
+
+ _doc = std::move(newDoc);
+}
+
+void RenameParticipantInstance::_removeStateDocument(OperationContext* opCtx) {
+ LOGV2_DEBUG(5515105,
+ 2,
+ "Removing state document for rename collection participant",
+ "fromNs"_attr = fromNss(),
+ "toNs"_attr = toNss());
+
+ PersistentTaskStore<StateDoc> store(NamespaceString::kShardingRenameParticipantsNamespace);
+ store.remove(opCtx,
+ BSON(StateDoc::kFromNssFieldName << fromNss().ns()),
+ WriteConcerns::kMajorityWriteConcern);
+
+ _doc = {};
+}
+
+
+void RenameParticipantInstance::_invalidateFutures(const Status& errStatus) {
+ stdx::lock_guard<Latch> lg(_mutex);
+ if (!_blockCRUDAndRenameCompletionPromise.getFuture().isReady()) {
+ _blockCRUDAndRenameCompletionPromise.setError(errStatus);
+ }
+
+ if (!_canUnblockCRUDPromise.getFuture().isReady()) {
+ _canUnblockCRUDPromise.setError(errStatus);
+ }
+
+ if (!_unblockCRUDPromise.getFuture().isReady()) {
+ _unblockCRUDPromise.setError(errStatus);
+ }
+}
+
+SemiFuture<void> RenameParticipantInstance::run(
+ std::shared_ptr<executor::ScopedTaskExecutor> executor,
+ const CancellationToken& token) noexcept {
+ return ExecutorFuture<void>(**executor)
+ .then(_executePhase(
+ Phase::kBlockCRUDAndSnapshotRangeDeletions,
+ [this, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+
+ // Acquire source/target critical sections
+ const auto reason =
+ BSON("command"
+ << "rename"
+ << "from" << fromNss().toString() << "to" << toNss().toString());
+ sharding_ddl_util::acquireRecoverableCriticalSectionBlockWrites(
+ opCtx, fromNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
+ sharding_ddl_util::acquireRecoverableCriticalSectionBlockReads(
+ opCtx, fromNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
+ sharding_ddl_util::acquireRecoverableCriticalSectionBlockWrites(
+ opCtx, toNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
+ sharding_ddl_util::acquireRecoverableCriticalSectionBlockReads(
+ opCtx, toNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
+
+ snapshotRangeDeletionsForRename(opCtx, fromNss(), toNss());
+ }))
+ .then(_executePhase(
+ Phase::kRenameLocalAndRestoreRangeDeletions,
+ [this, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ _doc.getForwardableOpMetadata().setOn(opCtx);
+
+ const RenameCollectionOptions options{_doc.getDropTarget(), _doc.getStayTemp()};
+ renameOrDropTarget(opCtx, fromNss(), toNss(), options, _doc.getSourceUUID());
+
+ restoreRangeDeletionTasksForRename(opCtx, toNss());
+ }))
+ .then(
+ _executePhase(Phase::kDeleteFromRangeDeletions,
+ [this, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ deleteRangeDeletionTasksForRename(opCtx, fromNss(), toNss());
+
+ {
+ stdx::lock_guard<Latch> lg(_mutex);
+ if (!_blockCRUDAndRenameCompletionPromise.getFuture().isReady()) {
+ _blockCRUDAndRenameCompletionPromise.setFrom(Status::OK());
+ }
+ }
+
+ LOGV2(5515106,
+ "Collection locally renamed, waiting for CRUD to be unblocked",
+ "fromNs"_attr = fromNss(),
+ "toNs"_attr = toNss());
+
+ // TODO SERVER-56380 Wait asynchronously
+ _canUnblockCRUDPromise.getFuture().get(opCtx);
+ }))
+ .then(_executePhase(
+ Phase::kUnblockCRUD,
+ [this, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+
+ // Release source/target critical sections
+ const auto reason =
+ BSON("command"
+ << "rename"
+ << "from" << fromNss().toString() << "to" << toNss().toString());
+ sharding_ddl_util::releaseRecoverableCriticalSection(
+ opCtx, fromNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
+ sharding_ddl_util::releaseRecoverableCriticalSection(
+ opCtx, toNss(), reason, ShardingCatalogClient::kMajorityWriteConcern);
+
+ Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(fromNss());
+ Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(toNss());
+
+ LOGV2(5515107, "CRUD unblocked", "fromNs"_attr = fromNss(), "toNs"_attr = toNss());
+ }))
+ .onCompletion([this, anchor = shared_from_this()](const Status& status) {
+ if (!status.isOK()) {
+ if (!status.isA<ErrorCategory::NotPrimaryError>() &&
+ !status.isA<ErrorCategory::ShutdownError>()) {
+ LOGV2_ERROR(5515109,
+ "Error executing rename collection participant",
+ "fromNs"_attr = fromNss(),
+ "toNs"_attr = toNss(),
+ "error"_attr = redact(status));
+ }
+
+ _invalidateFutures(status);
+ return;
+ }
+
+ try {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ _removeStateDocument(opCtx);
+ } catch (DBException& ex) {
+ LOGV2_WARNING(5515108,
+ "Failed to remove rename participant state document",
+ "error"_attr = redact(ex));
+ ex.addContext("Failed to remove rename participant state document"_sd);
+ stdx::lock_guard<Latch> lg(_mutex);
+ if (!_unblockCRUDPromise.getFuture().isReady()) {
+ _unblockCRUDPromise.setError(ex.toStatus());
+ }
+ throw;
+ }
+
+ stdx::lock_guard<Latch> lg(_mutex);
+ if (!_unblockCRUDPromise.getFuture().isReady()) {
+ _unblockCRUDPromise.emplaceValue();
+ }
+ })
+ .semi();
+}
+
+void RenameParticipantInstance::interrupt(Status status) noexcept {
+ LOGV2_DEBUG(5515110,
+ 2,
+ "Interrupt while running rename collection on participant",
+ "fromNs"_attr = fromNss(),
+ "toNs"_attr = toNss(),
+ "error"_attr = redact(status));
+
+ auto releaseInMemoryCritSec = [](const NamespaceString& nss) {
+ auto client = cc().getServiceContext()->makeClient("RenameParticipantCleanupClient");
+ AlternativeClientRegion acr(client);
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ auto* const csr = CollectionShardingRuntime::get_UNSAFE(opCtx->getServiceContext(), nss);
+ auto csrLock = CollectionShardingRuntime::CSRLock::lockExclusive(opCtx, csr);
+ csr->exitCriticalSection(csrLock);
+ csr->clearFilteringMetadata(opCtx);
+ };
+
+ invariant(status.isA<ErrorCategory::NotPrimaryError>() ||
+ status.isA<ErrorCategory::ShutdownError>());
+
+ releaseInMemoryCritSec(fromNss());
+ releaseInMemoryCritSec(toNss());
+ _invalidateFutures(status);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/rename_collection_participant_service.h b/src/mongo/db/s/rename_collection_participant_service.h
new file mode 100644
index 00000000000..9a2c885916d
--- /dev/null
+++ b/src/mongo/db/s/rename_collection_participant_service.h
@@ -0,0 +1,174 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/repl/primary_only_service.h"
+#include "mongo/db/s/sharded_rename_collection_gen.h"
+
+namespace mongo {
+
+class RenameCollectionParticipantService final : public repl::PrimaryOnlyService {
+public:
+ static constexpr StringData kServiceName = "RenameCollectionParticipantService"_sd;
+
+ explicit RenameCollectionParticipantService(ServiceContext* serviceContext)
+ : PrimaryOnlyService(serviceContext) {}
+
+ ~RenameCollectionParticipantService() = default;
+
+ static RenameCollectionParticipantService* getService(OperationContext* opCtx);
+
+ StringData getServiceName() const override {
+ return kServiceName;
+ }
+
+ NamespaceString getStateDocumentsNS() const override {
+ return NamespaceString::kShardingRenameParticipantsNamespace;
+ }
+
+ ThreadPool::Limits getThreadPoolLimits() const override {
+ return ThreadPool::Limits();
+ }
+
+ std::shared_ptr<Instance> constructInstance(BSONObj initialState) override;
+};
+
+/*
+ * POS instance managing a rename operation on a single node.
+ *
+ * At a higher level, a rename operation corresponds to 2 steps.
+ * - STEP 1 (upon receiving `ShardsvrRenameCollectionParticipantCommand`):
+ * -- Block CRUD operations, drop target and rename source collection.
+ *
+ * - STEP 2 (Upon receiving `ShardsvrRenameCollectionUnblockParticipantCommand`):
+ * -- Unblock CRUD operations.
+ *
+ */
+class RenameParticipantInstance
+ : public repl::PrimaryOnlyService::TypedInstance<RenameParticipantInstance> {
+public:
+ using StateDoc = RenameCollectionParticipantDocument;
+ using Phase = RenameCollectionParticipantPhaseEnum;
+
+ explicit RenameParticipantInstance(const BSONObj& participantDoc)
+ : _doc(RenameCollectionParticipantDocument::parse(
+ IDLParserErrorContext("RenameCollectionParticipantDocument"), participantDoc)) {}
+
+ ~RenameParticipantInstance();
+
+ /*
+ * Check if the given participant document has the same options as the current instance.
+ * If it does, the participant can be joined.
+ */
+ bool hasSameOptions(const BSONObj& participantDoc);
+
+ const BSONObj doc() {
+ return _doc.toBSON();
+ }
+
+ const NamespaceString& fromNss() {
+ return _doc.getFromNss();
+ }
+
+ const NamespaceString& toNss() {
+ return _doc.getTo();
+ }
+
+ /*
+ * Returns a future that will be ready when the local rename is completed.
+ */
+ SharedSemiFuture<void> getBlockCRUDAndRenameCompletionFuture() {
+ return _blockCRUDAndRenameCompletionPromise.getFuture();
+ }
+
+ /*
+ * Flags CRUD operations as ready to be served and returns a future that will be ready right
+ * after releasing the critical section on source and target collection.
+ */
+ SharedSemiFuture<void> getUnblockCrudFuture() {
+ stdx::lock_guard<Latch> lg(_mutex);
+ if (!_canUnblockCRUDPromise.getFuture().isReady()) {
+ _canUnblockCRUDPromise.setFrom(Status::OK());
+ }
+
+ return _unblockCRUDPromise.getFuture();
+ }
+
+ boost::optional<BSONObj> reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+
+private:
+ RenameCollectionParticipantDocument _doc;
+
+ SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
+ const CancellationToken& token) noexcept override final;
+
+ void interrupt(Status status) noexcept override final;
+
+ template <typename Func>
+ auto _executePhase(const Phase& newPhase, Func&& func) {
+ return [=] {
+ const auto& currPhase = _doc.getPhase();
+
+ if (currPhase > newPhase) {
+ // Do not execute this phase if we already reached a subsequent one.
+ return;
+ }
+ if (currPhase < newPhase) {
+ // Persist the new phase if this is the first time we are executing it.
+ _enterPhase(newPhase);
+ }
+ return func();
+ };
+ }
+
+ void _removeStateDocument(OperationContext* opCtx);
+ void _enterPhase(Phase newPhase);
+ void _invalidateFutures(const Status& errStatus);
+
+ Mutex _mutex = MONGO_MAKE_LATCH("RenameParticipantInstance::_mutex");
+
+ // Ready when step 1 (drop target && rename source) has been completed: once set, a successful
+ // response to `ShardsvrRenameCollectionParticipantCommand` can be returned to the coordinator.
+ SharedPromise<void> _blockCRUDAndRenameCompletionPromise;
+
+ // Ready when the "unblock CRUD" command has been received: once set, the participant POS can
+ // proceed to unblock CRUD operations.
+ SharedPromise<void> _canUnblockCRUDPromise;
+
+ // Ready when step 2 (unblock CRUD operations) have been completed: once set, a successful
+ // response to `ShardsvrRenameCollectionUnblockParticipantCommand` can be returned to the
+ // coordinator.
+ SharedPromise<void> _unblockCRUDPromise;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/rename_collection_coordinator_document.idl b/src/mongo/db/s/sharded_rename_collection.idl
index a8151d3130f..eeec450fbc3 100644
--- a/src/mongo/db/s/rename_collection_coordinator_document.idl
+++ b/src/mongo/db/s/sharded_rename_collection.idl
@@ -41,7 +41,7 @@ imports:
enums:
RenameCollectionCoordinatorPhase:
- description: "The current state of a drop collection operation on the coordinator."
+ description: "The current state of a rename collection operation on the coordinator."
type: string
values:
kUnset: "unset"
@@ -51,6 +51,16 @@ enums:
kUnblockCRUD: "unblockCRUD"
kSetResponse: "setResponse"
+ RenameCollectionParticipantPhase:
+ description: "The current state of a rename collection operation on a participant."
+ type: string
+ values:
+ kUnset: "unset"
+ kBlockCRUDAndSnapshotRangeDeletions: "blockCRUDAndSnapshotRangeDeletions"
+ kRenameLocalAndRestoreRangeDeletions: "renameLocalAndRestoreRangeDeletions"
+ kDeleteFromRangeDeletions: "deleteFromRangeDeletions"
+ kUnblockCRUD: "unblockCRUD"
+
types:
CollectionInfo:
description: "Information of the sharded collection to rename."
@@ -59,6 +69,35 @@ types:
serializer: "mongo::CollectionType::toBSON"
deserializer: "mongo::CollectionType"
+commands:
+ _shardsvrRenameCollectionParticipant:
+ command_name: _shardsvrRenameCollectionParticipant
+ cpp_name: ShardsvrRenameCollectionParticipant
+ description: "Internal command sent to shard participating in a rename collection procedure"
+ strict: false
+ api_version: ""
+ namespace: concatenate_with_db
+ chained_structs:
+ RenameCollectionRequest: RenameCollectionRequest
+ fields:
+ sourceUUID:
+ type: uuid
+ description: "ID of the collection getting renamed."
+
+ _shardsvrRenameCollectionParticipantUnblock:
+ command_name: _shardsvrRenameCollectionParticipantUnblock
+ cpp_name: ShardsvrRenameCollectionUnblockParticipant
+ description: "Parser for the _shardsvrRenameCollectionParticipantUnblock command"
+ strict: false
+ api_version: ""
+ namespace: concatenate_with_db
+ chained_structs:
+ RenameCollectionRequest: RenameCollectionRequest
+ fields:
+ sourceUUID:
+ type: uuid
+ description: "ID of the collection getting renamed."
+
structs:
RenameCollectionCoordinatorDocument:
description: "Represents a rename collection operation on the coordinator shard."
@@ -79,3 +118,26 @@ structs:
description: "CollectionType of the collection currently being renamed (if sharded)."
type: CollectionInfo
optional: true
+ sourceUUID:
+ type: uuid
+ description: "ID of the collection getting renamed."
+ optional: true
+
+ RenameCollectionParticipantDocument:
+ description: "Represents a rename collection operation on a participant shard."
+ generate_comparison_operators: false
+ strict: false
+ chained_structs:
+ RenameCollectionRequest: RenameCollectionRequest
+ fields:
+ _id:
+ type: namespacestring
+ cpp_name: fromNss
+ phase:
+ type: RenameCollectionParticipantPhase
+ default: kUnset
+ forwardableOpMetadata:
+ type: ForwardableOperationMetadata
+ sourceUUID:
+ type: uuid
+ description: "ID of the collection getting renamed."
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index b6df1079544..c7918c12838 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -189,9 +189,24 @@ void shardedRenameMetadata(OperationContext* opCtx,
auto fromNss = fromCollType.getNss();
// Delete eventual TO chunk/collection entries referring a dropped collection
- removeCollMetadataFromConfig(opCtx, toNss);
+ try {
+ auto coll = catalogClient->getCollection(opCtx, toNss);
+
+ if (coll.getUuid() == fromCollType.getUuid()) {
+ // Metadata rename already happened
+ return;
+ }
- // Clone FROM tags to TO
+ // Delete TO chunk/collection entries referring a dropped collection
+ removeCollMetadataFromConfig(opCtx, toNss);
+ } catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // The TO collection is not sharded or doesn't exist
+ }
+
+ // Delete FROM collection entry
+ deleteCollection(opCtx, fromNss);
+
+ // Update FROM tags to TO
updateTags(opCtx, fromNss, toNss);
// Insert the TO collection entry
@@ -201,18 +216,16 @@ void shardedRenameMetadata(OperationContext* opCtx,
CollectionType::ConfigNS,
fromCollType.toBSON(),
ShardingCatalogClient::kMajorityWriteConcern));
-
- deleteCollection(opCtx, fromNss);
}
void checkShardedRenamePreconditions(OperationContext* opCtx,
const NamespaceString& toNss,
const bool dropTarget) {
+ auto catalogClient = Grid::get(opCtx)->catalogClient();
if (!dropTarget) {
// Check that the sharded target collection doesn't exist
- auto catalogCache = Grid::get(opCtx)->catalogCache();
try {
- catalogCache->getShardedCollectionRoutingInfo(opCtx, toNss);
+ catalogClient->getCollection(opCtx, toNss);
// If no exception is thrown, the collection exists and is sharded
uasserted(ErrorCodes::CommandFailed,
str::stream() << "Sharded target collection " << toNss.ns()
@@ -234,7 +247,6 @@ void checkShardedRenamePreconditions(OperationContext* opCtx,
}
// Check that there are no tags associated to the target collection
- auto catalogClient = Grid::get(opCtx)->catalogClient();
auto tags = uassertStatusOK(catalogClient->getTagsForCollection(opCtx, toNss));
uassert(ErrorCodes::CommandFailed,
str::stream() << "Can't rename to target collection " << toNss.ns()
@@ -511,6 +523,7 @@ void retakeInMemoryRecoverableCriticalSections(OperationContext* opCtx) {
AutoGetCollection cCollLock(opCtx, nss, MODE_S);
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
auto csrLock = CollectionShardingRuntime ::CSRLock::lockExclusive(opCtx, csr);
+
// It may happen that the ReplWriterWorker enters the critical section before drain mode
// upon committing a recoverable critical section oplog entry (SERVER-56104)
if (!csr->getCriticalSectionSignal(
@@ -524,6 +537,7 @@ void retakeInMemoryRecoverableCriticalSections(OperationContext* opCtx) {
AutoGetCollection cCollLock(opCtx, nss, MODE_X);
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
auto csrLock = CollectionShardingRuntime ::CSRLock::lockExclusive(opCtx, csr);
+
// It may happen that the ReplWriterWorker enters the critical section before drain mode
// upon committing a recoverable critical section oplog entry (SERVER-56104)
if (!csr->getCriticalSectionSignal(
diff --git a/src/mongo/db/s/shardsvr_rename_collection_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_command.cpp
index f8d2c5f6471..63c315b23d4 100644
--- a/src/mongo/db/s/shardsvr_rename_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection_command.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/rename_collection_coordinator.h"
-#include "mongo/db/s/rename_collection_coordinator_document_gen.h"
+#include "mongo/db/s/sharded_rename_collection_gen.h"
#include "mongo/db/s/sharding_ddl_50_upgrade_downgrade.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
#include "mongo/db/s/sharding_ddl_util.h"
@@ -141,6 +141,16 @@ public:
<< opCtx->getWriteConcern().wMode,
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+ // TODO SERVER-56296 Uniform `fromNss == toNss` behavior between RS and sharded cluster
+ if (fromNss == toNss) {
+ // Simply return the current collection version
+ const auto catalog = Grid::get(opCtx)->catalogCache();
+ const auto cm =
+ uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, fromNss));
+ return RenameCollectionResponse(cm.isSharded() ? cm.getVersion()
+ : ChunkVersion::UNSHARDED());
+ }
+
validateNamespacesForRenameCollection(opCtx, fromNss, toNss);
auto coordinatorDoc = RenameCollectionCoordinatorDocument();
diff --git a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
index e230cc6d4cd..a07cdf79707 100644
--- a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
@@ -32,62 +32,16 @@
#include "mongo/platform/basic.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/commands.h"
-#include "mongo/db/s/range_deletion_util.h"
-#include "mongo/db/s/shard_metadata_util.h"
-#include "mongo/db/s/sharding_ddl_util.h"
+#include "mongo/db/s/rename_collection_participant_service.h"
+#include "mongo/db/s/sharded_rename_collection_gen.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/write_concern.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
namespace mongo {
namespace {
-void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss) {
- bool knownNss = [&]() {
- try {
- sharding_ddl_util::dropCollectionLocally(opCtx, nss);
- return true;
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- return false;
- }
- }();
-
- if (knownNss) {
- uassertStatusOK(shardmetadatautil::dropChunksAndDeleteCollectionsEntry(opCtx, nss));
- }
-
- LOGV2_DEBUG(5448800,
- 1,
- "Dropped target collection locally on renameCollection participant",
- "namespace"_attr = nss,
- "collectionExisted"_attr = knownNss);
-}
-
-/*
- * Rename the collection if exists locally, otherwise simply drop the source collection.
- * Returns true if the source collection is known, false otherwise.
- */
-void renameOrDropTarget(OperationContext* opCtx,
- const NamespaceString& fromNss,
- const NamespaceString& toNss,
- const RenameCollectionOptions& options) {
- try {
- validateAndRunRenameCollection(opCtx, fromNss, toNss, options);
- } catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- // It's ok for a participant shard to have no knowledge about a collection
- LOGV2_DEBUG(5448801,
- 1,
- "Source namespace not found while trying to rename collection on participant",
- "namespace"_attr = fromNss);
- dropCollectionLocally(opCtx, toNss);
- deleteRangeDeletionTasksForRename(opCtx, fromNss, toNss);
- }
-}
-
class ShardsvrRenameCollectionParticipantCommand final
: public TypedCommand<ShardsvrRenameCollectionParticipantCommand> {
public:
@@ -118,31 +72,25 @@ public:
auto const shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
-
- const auto& req = request();
- const auto& fromNss = ns();
- const auto& toNss = req.getTo();
- const RenameCollectionOptions options{req.getDropTarget(), req.getStayTemp()};
-
- // Acquire source/target critical sections
- const auto reason = BSON("command"
- << "rename"
- << "from" << fromNss.toString() << "to" << toNss.toString());
- sharding_ddl_util::acquireRecoverableCriticalSectionBlockWrites(
- opCtx, fromNss, reason, ShardingCatalogClient::kLocalWriteConcern);
- sharding_ddl_util::acquireRecoverableCriticalSectionBlockReads(
- opCtx, fromNss, reason, ShardingCatalogClient::kLocalWriteConcern);
- sharding_ddl_util::acquireRecoverableCriticalSectionBlockWrites(
- opCtx, toNss, reason, ShardingCatalogClient::kLocalWriteConcern);
- sharding_ddl_util::acquireRecoverableCriticalSectionBlockReads(
- opCtx, toNss, reason, ShardingCatalogClient::kMajorityWriteConcern);
-
- snapshotRangeDeletionsForRename(opCtx, fromNss, toNss);
-
- renameOrDropTarget(opCtx, fromNss, toNss, options);
-
- restoreRangeDeletionTasksForRename(opCtx, toNss);
- deleteRangeDeletionTasksForRename(opCtx, fromNss, toNss);
+ auto const& req = request();
+
+ const NamespaceString& fromNss = ns();
+ RenameCollectionParticipantDocument participantDoc(
+ fromNss, ForwardableOperationMetadata(opCtx), req.getSourceUUID());
+ participantDoc.setRenameCollectionRequest(req.getRenameCollectionRequest());
+
+ const auto service = RenameCollectionParticipantService::getService(opCtx);
+ const auto participantDocBSON = participantDoc.toBSON();
+ const auto renameCollectionParticipant =
+ RenameParticipantInstance::getOrCreate(opCtx, service, participantDocBSON);
+ bool hasSameOptions = renameCollectionParticipant->hasSameOptions(participantDocBSON);
+ invariant(hasSameOptions,
+ str::stream() << "Another rename participant for namespace " << fromNss
+ << "is instantiated with different parameters: `"
+ << renameCollectionParticipant->doc() << "` vs `"
+ << participantDocBSON << "`");
+
+ renameCollectionParticipant->getBlockCRUDAndRenameCompletionFuture().get(opCtx);
}
private:
@@ -197,20 +145,20 @@ public:
auto const shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
- const auto& fromNss = ns();
- const auto& toNss = request().getTo();
-
- // Release source/target critical sections
- const auto reason = BSON("command"
- << "rename"
- << "from" << fromNss.toString() << "to" << toNss.toString());
- sharding_ddl_util::releaseRecoverableCriticalSection(
- opCtx, fromNss, reason, ShardingCatalogClient::kLocalWriteConcern);
- sharding_ddl_util::releaseRecoverableCriticalSection(
- opCtx, toNss, reason, ShardingCatalogClient::kMajorityWriteConcern);
+ const NamespaceString& fromNss = ns();
+ const auto& req = request();
- auto catalog = Grid::get(opCtx)->catalogCache();
- uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, toNss));
+ RenameCollectionParticipantDocument participantDoc(
+ fromNss, ForwardableOperationMetadata(opCtx), req.getSourceUUID());
+ participantDoc.setRenameCollectionRequest(req.getRenameCollectionRequest());
+
+ const auto service = RenameCollectionParticipantService::getService(opCtx);
+ const auto id = BSON("_id" << fromNss.ns());
+ const auto optRenameCollectionParticipant =
+ RenameParticipantInstance::lookup(opCtx, service, id);
+ if (optRenameCollectionParticipant) {
+ optRenameCollectionParticipant.get()->getUnblockCrudFuture().get(opCtx);
+ }
}
private:
diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl
index 9621ca22626..d14e12e5516 100644
--- a/src/mongo/s/request_types/sharded_ddl_commands.idl
+++ b/src/mongo/s/request_types/sharded_ddl_commands.idl
@@ -199,28 +199,6 @@ commands:
chained_structs:
RenameCollectionRequest: RenameCollectionRequest
- _shardsvrRenameCollectionParticipant:
- command_name: _shardsvrRenameCollectionParticipant
- cpp_name: ShardsvrRenameCollectionParticipant
- description: "Internal command sent to shard participating in a rename collection procedure"
- strict: false
- api_version: ""
- namespace: concatenate_with_db
- chained_structs:
- RenameCollectionRequest: RenameCollectionRequest
-
- _shardsvrRenameCollectionUnblockParticipant:
- command_name: _shardsvrRenameCollectionUnblockParticipant
- cpp_name: ShardsvrRenameCollectionUnblockParticipant
- description: "Parser for the _shardsvrRenameCollectionUnblockParticipant command"
- strict: false
- api_version: ""
- namespace: concatenate_with_db
- fields:
- to:
- type: namespacestring
- description: "The new namespace for the collection being renamed."
-
_shardsvrReshardCollection:
command_name: _shardsvrReshardCollection
cpp_name: ShardsvrReshardCollection