summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2021-01-27 16:56:13 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-29 10:46:28 +0000
commita1c14013a1bb88b32105f997c5035bf28597ac75 (patch)
tree158c5e730696bd32652bc0eb9aac0650da87fe00 /src/mongo/db
parentd8fb94ae2b7b4dbd21c56c06a54ba908ac081694 (diff)
downloadmongo-a1c14013a1bb88b32105f997c5035bf28597ac75.tar.gz
SERVER-52811 Implement the new drop database path in _shardsvrDropDatabase
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/drop_database.h2
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp34
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.h1
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp164
-rw-r--r--src/mongo/db/s/drop_database_coordinator.h51
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp28
-rw-r--r--src/mongo/db/s/sharding_ddl_util.h6
-rw-r--r--src/mongo/db/s/shardsvr_drop_database_command.cpp32
-rw-r--r--src/mongo/db/s/shardsvr_drop_database_participant_command.cpp117
10 files changed, 404 insertions, 33 deletions
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
index 0e3b6e59125..365a7a61e0a 100644
--- a/src/mongo/db/catalog/drop_database.h
+++ b/src/mongo/db/catalog/drop_database.h
@@ -27,6 +27,8 @@
* it in the license file.
*/
+#pragma once
+
#include "mongo/base/status.h"
namespace mongo {
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 1163deccec7..fa69d7b1cc3 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -327,6 +327,7 @@ env.Library(
'config/configsvr_split_chunk_command.cpp',
'config/configsvr_update_zone_key_range_command.cpp',
'drop_collection_coordinator.cpp',
+ 'drop_database_coordinator.cpp',
'flush_database_cache_updates_command.cpp',
'flush_routing_table_cache_updates_command.cpp',
'get_database_version_command.cpp',
@@ -344,6 +345,7 @@ env.Library(
'shardsvr_drop_collection_command.cpp',
'shardsvr_drop_collection_participant_command.cpp',
'shardsvr_drop_database_command.cpp',
+ 'shardsvr_drop_database_participant_command.cpp',
'shardsvr_refine_collection_shard_key_command.cpp',
'shardsvr_rename_collection.cpp',
'shardsvr_shard_collection_command.cpp',
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 18220e4e0d7..99883326ab2 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -37,6 +37,8 @@
#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/db/s/database_sharding_state.h"
#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/drop_collection_coordinator.h"
+#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -49,8 +51,6 @@
namespace mongo {
-static constexpr int kMaxNumStaleShardVersionRetries = 10;
-
DropCollectionCoordinator::DropCollectionCoordinator(OperationContext* opCtx,
const NamespaceString& nss)
: ShardingDDLCoordinator(nss), _serviceContext(opCtx->getServiceContext()) {
@@ -83,34 +83,6 @@ void DropCollectionCoordinator::_sendDropCollToParticipants(OperationContext* op
}
}
-void DropCollectionCoordinator::_removeCollMetadataFromConfig(OperationContext* opCtx) {
- IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
-
- ON_BLOCK_EXIT([this, opCtx] {
- Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(_nss);
- });
-
- // Remove chunk data
- uassertStatusOK(
- catalogClient->removeConfigDocuments(opCtx,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns(_nss.ns())),
- ShardingCatalogClient::kMajorityWriteConcern));
- // Remove tag data
- uassertStatusOK(
- catalogClient->removeConfigDocuments(opCtx,
- TagsType::ConfigNS,
- BSON(TagsType::ns(_nss.ns())),
- ShardingCatalogClient::kMajorityWriteConcern));
- // Remove coll metadata
- uassertStatusOK(
- catalogClient->removeConfigDocuments(opCtx,
- CollectionType::ConfigNS,
- BSON(CollectionType::kNssFieldName << _nss.ns()),
- ShardingCatalogClient::kMajorityWriteConcern));
-}
-
void DropCollectionCoordinator::_stopMigrations(OperationContext* opCtx) {
// TODO SERVER-53861 this will not stop current ongoing migrations
uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument(
@@ -144,7 +116,7 @@ SemiFuture<void> DropCollectionCoordinator::runImpl(
const auto routingInfo = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, _nss));
- _removeCollMetadataFromConfig(opCtx);
+ sharding_ddl_util::removeCollMetadataFromConfig(opCtx, _nss);
if (routingInfo.isSharded()) {
_participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h
index 02347408d5b..933e1e1c4f7 100644
--- a/src/mongo/db/s/drop_collection_coordinator.h
+++ b/src/mongo/db/s/drop_collection_coordinator.h
@@ -45,7 +45,6 @@ private:
SemiFuture<void> runImpl(std::shared_ptr<executor::TaskExecutor> executor) override;
void _stopMigrations(OperationContext* opCtx);
- void _removeCollMetadataFromConfig(OperationContext* opCtx);
void _sendDropCollToParticipants(OperationContext* opCtx);
ServiceContext* _serviceContext;
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
new file mode 100644
index 00000000000..f341e4ed6c4
--- /dev/null
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -0,0 +1,164 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/db/s/drop_database_coordinator.h"
+
+#include "mongo/db/api_parameters.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/concurrency/lock_manager_defs.h"
+#include "mongo/db/s/database_sharding_state.h"
+#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/drop_collection_coordinator.h"
+#include "mongo/db/s/sharding_ddl_util.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/catalog/type_shard.h"
+#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+namespace {
+
+void sendCommandToAllShards(OperationContext* opCtx,
+ StringData dbName,
+ StringData cmdName,
+ BSONObj cmd) {
+ auto* const shardRegistry = Grid::get(opCtx)->shardRegistry();
+ const auto participants = shardRegistry->getAllShardIds(opCtx);
+
+ for (const auto& shardId : participants) {
+ const auto& shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardId));
+
+ const auto swDropResult = shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ dbName.toString(),
+ CommandHelpers::appendMajorityWriteConcern(cmd),
+ Shard::RetryPolicy::kIdempotent);
+
+ uassertStatusOKWithContext(
+ Shard::CommandResponse::getEffectiveStatus(std::move(swDropResult)),
+ str::stream() << "Error processing " << cmdName << " on shard " << shardId);
+ }
+}
+
+void removeDatabaseMetadataFromConfig(OperationContext* opCtx, StringData dbName) {
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+
+ ON_BLOCK_EXIT([&, dbName = dbName.toString()] {
+ Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName);
+ });
+
+ // Remove the database entry from the metadata.
+ const Status status =
+ catalogClient->removeConfigDocuments(opCtx,
+ DatabaseType::ConfigNS,
+ BSON(DatabaseType::name(dbName.toString())),
+ ShardingCatalogClient::kMajorityWriteConcern);
+ uassertStatusOKWithContext(status,
+ str::stream()
+ << "Could not remove database metadata from config server for '"
+ << dbName << "'.");
+}
+
+} // namespace
+
+DropDatabaseCoordinator::DropDatabaseCoordinator(OperationContext* opCtx, StringData dbName)
+ : ShardingDDLCoordinator({dbName, ""}), _serviceContext(opCtx->getServiceContext()) {
+ auto authSession = AuthorizationSession::get(opCtx->getClient());
+ _users =
+ userNameIteratorToContainer<std::vector<UserName>>(authSession->getImpersonatedUserNames());
+ _roles =
+ roleNameIteratorToContainer<std::vector<RoleName>>(authSession->getImpersonatedRoleNames());
+}
+
+SemiFuture<void> DropDatabaseCoordinator::runImpl(
+ std::shared_ptr<executor::TaskExecutor> executor) {
+ return ExecutorFuture<void>(executor, Status::OK())
+ .then([this, anchor = shared_from_this()]() {
+ ThreadClient tc{"DropDatabaseCoordinator", _serviceContext};
+ auto opCtxHolder = tc->makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+
+ auto authSession = AuthorizationSession::get(opCtx->getClient());
+ authSession->setImpersonatedUserData(_users, _roles);
+
+ const auto dbName = _nss.db();
+ auto distLockManager = DistLockManager::get(_serviceContext);
+ const auto dbDistLock = uassertStatusOK(distLockManager->lock(
+ opCtx, dbName, "DropDatabase", DistLockManager::kDefaultLockTimeout));
+
+ // Drop all collections under this DB
+ auto const catalogClient = Grid::get(opCtx)->catalogClient();
+ const auto allCollectionsForDb = catalogClient->getAllShardedCollectionsForDb(
+ opCtx, dbName, repl::ReadConcernLevel::kMajorityReadConcern);
+
+ for (const auto& nss : allCollectionsForDb) {
+ // TODO SERVER-53905 to support failovers here we need to store the
+ // current namespace of this loop before to delete it from config server
+ // so that on step-up we will remmeber to resume the drop collection for that
+ // namespace.
+ sharding_ddl_util::removeCollMetadataFromConfig(opCtx, nss);
+ const auto dropCollParticipantCmd = ShardsvrDropCollectionParticipant(nss);
+ sendCommandToAllShards(opCtx,
+ dbName,
+ ShardsvrDropCollectionParticipant::kCommandName,
+ dropCollParticipantCmd.toBSON({}));
+ }
+
+ // Drop the DB itself.
+ // The DistLockManager will prevent to re-create the database before each shard
+ // have actually dropped it locally.
+ removeDatabaseMetadataFromConfig(opCtx, dbName);
+ auto dropDatabaseParticipantCmd = ShardsvrDropDatabaseParticipant();
+ dropDatabaseParticipantCmd.setDbName(dbName);
+ sendCommandToAllShards(opCtx,
+ dbName,
+ ShardsvrDropDatabaseParticipant::kCommandName,
+ dropDatabaseParticipantCmd.toBSON({}));
+ })
+ .onError([this, anchor = shared_from_this()](const Status& status) {
+ LOGV2_ERROR(5281131,
+ "Error running drop database",
+ "database"_attr = _nss.db(),
+ "error"_attr = redact(status));
+ return status;
+ })
+ .semi();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/drop_database_coordinator.h b/src/mongo/db/s/drop_database_coordinator.h
new file mode 100644
index 00000000000..2a66c832175
--- /dev/null
+++ b/src/mongo/db/s/drop_database_coordinator.h
@@ -0,0 +1,51 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/auth/user_name.h"
+#include "mongo/db/s/sharding_ddl_coordinator.h"
+#include "mongo/s/shard_id.h"
+
+namespace mongo {
+
+class DropDatabaseCoordinator final : public ShardingDDLCoordinator,
+ public std::enable_shared_from_this<DropDatabaseCoordinator> {
+public:
+ DropDatabaseCoordinator(OperationContext* opCtx, StringData dbName);
+
+private:
+ SemiFuture<void> runImpl(std::shared_ptr<executor::TaskExecutor> executor) override;
+
+ ServiceContext* _serviceContext;
+ std::vector<UserName> _users;
+ std::vector<RoleName> _roles;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 231b6d91863..b36aa2ffd35 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -47,6 +47,34 @@ namespace mongo {
namespace sharding_ddl_util {
+void removeCollMetadataFromConfig(OperationContext* opCtx, NamespaceString nss) {
+
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+
+ ON_BLOCK_EXIT(
+ [&] { Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss); });
+
+ // Remove chunk data
+ uassertStatusOK(
+ catalogClient->removeConfigDocuments(opCtx,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(nss.ns())),
+ ShardingCatalogClient::kMajorityWriteConcern));
+ // Remove tag data
+ uassertStatusOK(
+ catalogClient->removeConfigDocuments(opCtx,
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(nss.ns())),
+ ShardingCatalogClient::kMajorityWriteConcern));
+ // Remove coll metadata
+ uassertStatusOK(
+ catalogClient->removeConfigDocuments(opCtx,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::kNssFieldName << nss.ns()),
+ ShardingCatalogClient::kMajorityWriteConcern));
+}
+
void shardedRenameMetadata(OperationContext* opCtx,
const NamespaceString& fromNss,
const NamespaceString& toNss) {
diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h
index 2161eebb199..16cdb020090 100644
--- a/src/mongo/db/s/sharding_ddl_util.h
+++ b/src/mongo/db/s/sharding_ddl_util.h
@@ -34,6 +34,12 @@ namespace mongo {
namespace sharding_ddl_util {
/**
+ * Erase collection metadata from config server and invalidate the locally cached once.
+ * In particular remove chunks, tags, and the description associated with the given namespace.
+ */
+void removeCollMetadataFromConfig(OperationContext* opCtx, NamespaceString nss);
+
+/**
* Rename sharded collection metadata as part of a renameCollection operation.
*
* Transaction:
diff --git a/src/mongo/db/s/shardsvr_drop_database_command.cpp b/src/mongo/db/s/shardsvr_drop_database_command.cpp
index 516bb6a2b79..98bdcfed9ad 100644
--- a/src/mongo/db/s/shardsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_database_command.cpp
@@ -29,12 +29,18 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+#include "mongo/platform/basic.h"
+
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/s/drop_database_coordinator.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+#include "mongo/s/sharded_collections_ddl_parameters_gen.h"
namespace mongo {
namespace {
@@ -86,7 +92,31 @@ public:
<< opCtx->getWriteConcern().wMode,
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
- return dropDatabaseLegacy(opCtx, request().getDbName());
+ const auto dbName = request().getDbName();
+
+ if (!feature_flags::gShardingFullDDLSupport.isEnabled(
+ serverGlobalParams.featureCompatibility) ||
+ feature_flags::gDisableIncompleteShardingDDLSupport.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
+
+ LOGV2_DEBUG(
+ 5281110, 1, "Running legacy drop database procedure", "database"_attr = dbName);
+ return dropDatabaseLegacy(opCtx, dbName);
+ }
+
+ LOGV2_DEBUG(
+ 5281111, 1, "Running new drop database procedure", "database"_attr = dbName);
+
+ // Since this operation is not directly writing locally we need to force its db
+ // profile level increase in order to be logged in "<db>.system.profile"
+ CurOp::get(opCtx)->raiseDbProfileLevel(
+ CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(dbName));
+
+ auto dropDatabaseCoordinator = std::make_shared<DropDatabaseCoordinator>(opCtx, dbName);
+ dropDatabaseCoordinator->run(opCtx).get();
+
+ // The following response can be omitted once 5.0 became last LTS
+ return Response();
}
private:
diff --git a/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp
new file mode 100644
index 00000000000..25b40a522a1
--- /dev/null
+++ b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp
@@ -0,0 +1,117 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/drop_database.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/s/database_sharding_state.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+
+namespace mongo {
+namespace {
+
+class ShardsvrDropDatabaseParticipantCommand final
+ : public TypedCommand<ShardsvrDropDatabaseParticipantCommand> {
+public:
+ bool acceptsAnyApiVersionParameters() const override {
+ return true;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return Command::AllowedOnSecondary::kNever;
+ }
+
+ std::string help() const override {
+ return "Internal command, which is exported by secondary sharding servers. Do not call "
+ "directly. Participates in droping a database.";
+ }
+
+ using Request = ShardsvrDropDatabaseParticipant;
+
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ void typedRun(OperationContext* opCtx) {
+ uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << Request::kCommandName
+ << " must be called with majority writeConcern, got "
+ << opCtx->getWriteConcern().wMode,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
+ const auto& dbName = request().getDbName();
+
+ try {
+ uassertStatusOK(dropDatabase(opCtx, dbName.toString()));
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ LOGV2_DEBUG(5281101,
+ 1,
+ "Received a ShardsvrDropDatabaseParticipant but did not find the "
+ "database locally",
+ "database"_attr = dbName);
+ }
+
+ {
+ // Clear CollectionShardingRuntime entry
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ Lock::DBLock dbLock(opCtx, dbName, MODE_X);
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ dss->clearDatabaseInfo(opCtx);
+ }
+ }
+
+ private:
+ NamespaceString ns() const override {
+ return {request().getDbName(), ""};
+ }
+
+ bool supportsWriteConcern() const override {
+ return true;
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
+} sharsvrdDropCollectionParticipantCommand;
+
+} // namespace
+} // namespace mongo