summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2021-01-27 11:27:16 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-02-09 12:36:58 +0000
commit4a2571bb8ef9b0844f61dc1c780e88c555172458 (patch)
tree29a0daffa8d5302f27c9d80ab77e73d96fa4fbe5
parent5ad5c0409eace36efc15cfaa2d8d2936a09639b1 (diff)
downloadmongo-4a2571bb8ef9b0844f61dc1c780e88c555172458.tar.gz
SERVER-54283 Move the legacy dropDatabase/Collection path to execute from the shard
... rather than forwarding to the config server
-rw-r--r--jstests/concurrency/fsm_libs/runner.js16
-rw-r--r--jstests/concurrency/fsm_utils/setup_teardown_functions.js22
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp85
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp112
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h21
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp216
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp18
-rw-r--r--src/mongo/db/s/drop_collection_legacy.cpp329
-rw-r--r--src/mongo/db/s/drop_collection_legacy.h48
-rw-r--r--src/mongo/db/s/drop_database_legacy.cpp140
-rw-r--r--src/mongo/db/s/drop_database_legacy.h40
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_command.cpp15
-rw-r--r--src/mongo/db/s/shardsvr_drop_database_command.cpp17
15 files changed, 579 insertions, 503 deletions
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index e683528898b..94b99530f9e 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -5,7 +5,6 @@ load('jstests/concurrency/fsm_libs/cluster.js');
load('jstests/concurrency/fsm_libs/parse_config.js');
load('jstests/concurrency/fsm_libs/thread_mgr.js');
load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and uniqueDBName
-load('jstests/concurrency/fsm_utils/setup_teardown_functions.js');
var runner = (function() {
function validateExecutionMode(mode) {
@@ -403,18 +402,6 @@ var runner = (function() {
config.data, 'threadCount', {enumerable: true, value: config.threadCount});
}
- function useDropDistLockFailPoint(cluster, clusterOptions) {
- assert(cluster.isSharded(), 'cluster is not sharded');
-
- // For sharded clusters, enable a fail point that allows dropCollection to wait longer
- // to acquire the distributed lock. This prevents tests from failing if the distributed
- // lock is already held by the balancer or by a workload operation. The increased wait
- // is shorter than the distributed-lock-takeover period because otherwise the node
- // would be assumed to be down and the lock would be overtaken.
- clusterOptions.setupFunctions.config.push(increaseDropDistLockTimeout);
- clusterOptions.teardownFunctions.config.push(resetDropDistLockTimeout);
- }
-
function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) {
workloads.forEach(function(workload) {
load(workload); // for $config
@@ -644,9 +631,6 @@ var runner = (function() {
var threadMgr = new ThreadManager(clusterOptions, executionMode);
var cluster = new Cluster(clusterOptions);
- if (cluster.isSharded()) {
- useDropDistLockFailPoint(cluster, clusterOptions);
- }
cluster.setup();
// Clean up the state left behind by other tests in the concurrency suite
diff --git a/jstests/concurrency/fsm_utils/setup_teardown_functions.js b/jstests/concurrency/fsm_utils/setup_teardown_functions.js
deleted file mode 100644
index 8490e273cbf..00000000000
--- a/jstests/concurrency/fsm_utils/setup_teardown_functions.js
+++ /dev/null
@@ -1,22 +0,0 @@
-'use strict';
-
-/**
- * Functions used by runners to set up and tear down their clusters.
- * Each function is called by executeOnMongodNodes and executeOnMongosNodes
- * (if the cluster is sharded). Each function should accept a connection to
- * the 'admin' database.
- */
-
-var increaseDropDistLockTimeout = function increaseDropDistLockTimeout(db) {
- var waitTimeSecs = 10 * 60; // 10 minutes
- assert.commandWorked(db.runCommand({
- configureFailPoint: 'setDropCollDistLockWait',
- mode: 'alwaysOn',
- data: {waitForSecs: waitTimeSecs}
- }));
-};
-
-var resetDropDistLockTimeout = function resetDropDistLockTimeout(db) {
- assert.commandWorked(
- db.runCommand({configureFailPoint: 'setDropCollDistLockWait', mode: 'off'}));
-};
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 922e63dc38f..ca823f10c88 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -2080,7 +2080,6 @@ env.Library(
'repl/topology_coordinator',
'rw_concern_d',
's/sessions_collection_config_server',
- 's/sharding_catalog_manager',
's/sharding_commands_d',
's/sharding_runtime_d',
'service_context_d',
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 7b7cef23bcc..a31e205c457 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -261,6 +261,8 @@ env.Library(
'dist_lock_catalog.cpp',
'dist_lock_manager_replset.cpp',
'dist_lock_manager.cpp',
+ 'drop_collection_legacy.cpp',
+ 'drop_database_legacy.cpp',
'type_lockpings.cpp',
'type_locks.cpp',
],
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index 11657f40f17..ca91a68242d 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -34,24 +34,11 @@
#include "mongo/db/commands.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog_cache.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/cluster_commands_helpers.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/stale_exception.h"
-#include "mongo/util/fail_point.h"
-#include "mongo/util/scopeguard.h"
+#include "mongo/db/s/drop_collection_legacy.h"
namespace mongo {
namespace {
-MONGO_FAIL_POINT_DEFINE(setDropCollDistLockWait);
-
/**
* Internal sharding command run on config servers to drop a collection from a database.
*/
@@ -117,78 +104,10 @@ public:
<< cmdObj,
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
- Seconds waitFor(DistLockManager::kDefaultLockTimeout);
- setDropCollDistLockWait.execute(
- [&](const BSONObj& data) { waitFor = Seconds(data["waitForSecs"].numberInt()); });
-
- auto dbDistLock = uassertStatusOK(
- DistLockManager::get(opCtx)->lock(opCtx, nss.db(), "dropCollection", waitFor));
- auto collDistLock = uassertStatusOK(
- DistLockManager::get(opCtx)->lock(opCtx, nss.ns(), "dropCollection", waitFor));
-
- ON_BLOCK_EXIT([opCtx, nss] {
- Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss);
- });
-
- _dropCollection(opCtx, nss);
+ dropCollectionLegacy(opCtx, nss);
return true;
}
-
-private:
- static void _dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
- auto const catalogClient = Grid::get(opCtx)->catalogClient();
-
- CollectionType collection;
- try {
- catalogClient->getCollection(opCtx, nss, repl::ReadConcernArgs::get(opCtx).getLevel());
- ShardingCatalogManager::get(opCtx)->dropCollection(opCtx, nss);
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- // We checked the sharding catalog and found that this collection doesn't exist. This
- // may be because it never existed, or because a drop command was sent previously. This
- // data might not be majority committed though, so we will set the client's last optime
- // to the system's last optime to ensure the client waits for the writeConcern to be
- // satisfied.
- repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
-
- // If the DB isn't in the sharding catalog either, consider the drop a success.
- DatabaseType dbt;
- try {
- dbt = catalogClient->getDatabase(
- opCtx, nss.db().toString(), repl::ReadConcernArgs::get(opCtx).getLevel());
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- return;
- }
-
- // If we found the DB but not the collection, and the primary shard for the database is
- // the config server, run the drop only against the config server unless the collection
- // is config.system.sessions, since no other collections whose primary shard is the
- // config server can have been sharded.
- if (dbt.getPrimary() == ShardId::kConfigServerId &&
- nss != NamespaceString::kLogicalSessionsNamespace) {
- auto cmdDropResult =
- uassertStatusOK(Grid::get(opCtx)
- ->shardRegistry()
- ->getConfigShard()
- ->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- nss.db().toString(),
- BSON("drop" << nss.coll()),
- Shard::RetryPolicy::kIdempotent));
-
- // If the collection doesn't exist, consider the drop a success.
- if (cmdDropResult.commandStatus == ErrorCodes::NamespaceNotFound) {
- return;
- }
- uassertStatusOK(cmdDropResult.commandStatus);
- return;
- }
-
- ShardingCatalogManager::get(opCtx)->ensureDropCollectionCompleted(opCtx, nss);
- }
- }
-
} configsvrDropCollectionCmd;
} // namespace
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 79d1f5abd8c..de258fab32d 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -36,12 +36,8 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/db/s/sharding_logging.h"
+#include "mongo/db/s/drop_database_legacy.h"
#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog_cache.h"
-#include "mongo/s/grid.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -117,113 +113,13 @@ public:
<< cmdObj,
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
- auto const catalogClient = Grid::get(opCtx)->catalogClient();
- auto const catalogManager = ShardingCatalogManager::get(opCtx);
-
- auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
-
- // Invalidate the database metadata so the next access kicks off a full reload.
- ON_BLOCK_EXIT([opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
-
- DatabaseType dbType;
- try {
- dbType = catalogClient->getDatabase(
- opCtx, dbname, repl::ReadConcernArgs::get(opCtx).getLevel());
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- // If the namespace isn't found, treat the drop as a success. In case the drop just
- // happened and has not fully propagated, set the client's last optime to the system's
- // last optime to ensure the client waits.
- result.append("info", "database does not exist");
+ auto reply = dropDatabaseLegacy(opCtx, dbname);
+ if (!reply.getDropped()) {
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
- return true;
- }
-
- uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
- opCtx,
- "dropDatabase.start",
- dbname,
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern));
-
- // Drop the database's collections.
- for (const auto& nss : catalogClient->getAllShardedCollectionsForDb(
- opCtx, dbname, repl::ReadConcernArgs::get(opCtx).getLevel())) {
- auto collDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), "dropCollection", DistLockManager::kDefaultLockTimeout));
- catalogManager->dropCollection(opCtx, nss);
}
-
- // Drop the database from the primary shard first.
- _dropDatabaseFromShard(opCtx, dbType.getPrimary(), dbname);
-
- // Drop the database from each of the remaining shards.
- const auto allShardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload();
- for (const ShardId& shardId : allShardIds) {
- _dropDatabaseFromShard(opCtx, shardId, dbname);
- }
-
-
- // Remove the database entry from the metadata.
- const Status status =
- catalogClient->removeConfigDocuments(opCtx,
- DatabaseType::ConfigNS,
- BSON(DatabaseType::name(dbname)),
- ShardingCatalogClient::kMajorityWriteConcern);
- uassertStatusOKWithContext(
- status, str::stream() << "Could not remove database '" << dbname << "' from metadata");
-
- // Send _flushDatabaseCacheUpdates to all shards
- IgnoreAPIParametersBlock ignoreApiParametersBlock{opCtx};
- for (const ShardId& shardId : allShardIds) {
- const auto shard =
- uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
- auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("_flushDatabaseCacheUpdates" << dbname),
- Shard::RetryPolicy::kIdempotent));
- uassertStatusOK(cmdResponse.commandStatus);
- }
-
- ShardingLogging::get(opCtx)->logChange(
- opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
-
- result.append("dropped", dbname);
+ reply.serialize(&result);
return true;
}
-
-private:
- /**
- * Sends the 'dropDatabase' command for the specified database to the specified shard. Throws
- * DBException on failure.
- */
- static void _dropDatabaseFromShard(OperationContext* opCtx,
- const ShardId& shardId,
- const std::string& dbName) {
-
- const auto dropDatabaseCommandBSON = [opCtx] {
- BSONObjBuilder builder;
- builder.append("dropDatabase", 1);
- builder.append(WriteConcernOptions::kWriteConcernField,
- opCtx->getWriteConcern().toBSON());
- return builder.obj();
- }();
-
- const auto shard =
- uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
- auto cmdDropDatabaseResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- dbName,
- dropDatabaseCommandBSON,
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(cmdDropDatabaseResult.commandStatus);
- uassertStatusOK(cmdDropDatabaseResult.writeConcernStatus);
- };
-
} configsvrDropDatabaseCmd;
} // namespace
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index aa3847ff594..c907c044af5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -336,27 +336,6 @@ public:
//
/**
- * Drops the specified collection from the collection metadata store.
- *
- * Throws a DBException for any failures. These are some of the known failures:
- * - NamespaceNotFound - Collection does not exist
- */
- void dropCollection(OperationContext* opCtx, const NamespaceString& nss);
-
- /**
- * Ensures that a namespace that has received a dropCollection, but no longer has an entry in
- * config.collections, has cleared all relevant metadata entries for the corresponding
- * collection. As part of this, sends dropCollection and setShardVersion to all shards -- in
- * case shards didn't receive these commands as part of the original dropCollection.
- *
- * This function does not guarantee that all shards will eventually receive setShardVersion,
- * unless the client infinitely retries until hearing back success. This function does, however,
- * increase the likelihood of shards having received setShardVersion.
- */
-
- void ensureDropCollectionCompleted(OperationContext* opCtx, const NamespaceString& nss);
-
- /**
* Refines the shard key of an existing collection with namespace 'nss'. Here, 'shardKey'
* denotes the new shard key, which must contain the old shard key as a prefix.
*
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 92397f38e88..4b4503a3c40 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -68,7 +68,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h"
-#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/shard_util.h"
#include "mongo/s/sharded_collections_ddl_parameters_gen.h"
@@ -91,7 +90,6 @@ MONGO_FAIL_POINT_DEFINE(hangRefineCollectionShardKeyBeforeCommit);
namespace {
const ReadPreferenceSetting kConfigReadSelector(ReadPreference::Nearest, TagSet{});
-static constexpr int kMaxNumStaleShardVersionRetries = 10;
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
const char kWriteConcernField[] = "writeConcern";
@@ -183,222 +181,8 @@ void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const Collectio
}
}
-void sendDropCollectionToAllShards(OperationContext* opCtx, const NamespaceString& nss) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
-
- const auto shardsStatus =
- catalogClient->getAllShards(opCtx, repl::ReadConcernLevel::kLocalReadConcern);
- uassertStatusOK(shardsStatus.getStatus());
-
- vector<ShardType> allShards = std::move(shardsStatus.getValue().value);
-
- const auto dropCommandBSON = [opCtx, &nss] {
- BSONObjBuilder builder;
- builder.append("drop", nss.coll());
-
- if (!opCtx->getWriteConcern().usedDefault) {
- builder.append(WriteConcernOptions::kWriteConcernField,
- opCtx->getWriteConcern().toBSON());
- }
-
- ChunkVersion::IGNORED().appendToCommand(&builder);
- return builder.obj();
- }();
-
- auto* const shardRegistry = Grid::get(opCtx)->shardRegistry();
-
- for (const auto& shardEntry : allShards) {
- bool keepTrying;
- size_t numStaleShardVersionAttempts = 0;
- do {
- const auto& shard =
- uassertStatusOK(shardRegistry->getShard(opCtx, shardEntry.getName()));
-
- auto swDropResult = shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- nss.db().toString(),
- dropCommandBSON,
- Shard::RetryPolicy::kIdempotent);
-
- const std::string dropCollectionErrMsg = str::stream()
- << "Error dropping collection on shard " << shardEntry.getName();
-
- auto dropResult = uassertStatusOKWithContext(swDropResult, dropCollectionErrMsg);
- uassertStatusOKWithContext(dropResult.writeConcernStatus, dropCollectionErrMsg);
-
- auto dropCommandStatus = std::move(dropResult.commandStatus);
-
- if (dropCommandStatus.code() == ErrorCodes::NamespaceNotFound) {
- // The dropCollection command on the shard is not idempotent, and can return
- // NamespaceNotFound. We can ignore NamespaceNotFound since we have already asserted
- // that there is no writeConcern error.
- keepTrying = false;
- } else if (ErrorCodes::isStaleShardVersionError(dropCommandStatus.code())) {
- numStaleShardVersionAttempts++;
- if (numStaleShardVersionAttempts == kMaxNumStaleShardVersionRetries) {
- uassertStatusOKWithContext(dropCommandStatus,
- str::stream() << dropCollectionErrMsg
- << " due to exceeded retry attempts");
- }
- // No need to refresh cache, the command was sent with ChunkVersion::IGNORED and the
- // shard is allowed to throw, which means that the drop will serialize behind a
- // refresh.
- keepTrying = true;
- } else {
- uassertStatusOKWithContext(dropCommandStatus, dropCollectionErrMsg);
- keepTrying = false;
- }
- } while (keepTrying);
- }
-}
-
-void sendSSVToAllShards(OperationContext* opCtx, const NamespaceString& nss) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
-
- const auto shardsStatus =
- catalogClient->getAllShards(opCtx, repl::ReadConcernLevel::kLocalReadConcern);
- uassertStatusOK(shardsStatus.getStatus());
-
- vector<ShardType> allShards = std::move(shardsStatus.getValue().value);
-
- auto* const shardRegistry = Grid::get(opCtx)->shardRegistry();
-
- IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
- for (const auto& shardEntry : allShards) {
- const auto& shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardEntry.getName()));
-
- SetShardVersionRequest ssv(
- nss, ChunkVersion::UNSHARDED(), true /* isAuthoritative */, true /* forceRefresh */);
-
- auto ssvResult = shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- ssv.toBSON(),
- Shard::RetryPolicy::kIdempotent);
-
- uassertStatusOK(ssvResult.getStatus());
- uassertStatusOK(ssvResult.getValue().commandStatus);
- }
-}
-
-void removeChunksForDroppedCollection(OperationContext* opCtx,
- const NamespaceStringOrUUID& nssOrUUID) {
- IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
-
- // Remove chunk data
- const auto chunksQuery = [&]() {
- if (nssOrUUID.uuid()) {
- return BSON(ChunkType::collectionUUID << *nssOrUUID.uuid());
- } else {
- return BSON(ChunkType::ns(nssOrUUID.nss()->ns()));
- }
- }();
- uassertStatusOK(catalogClient->removeConfigDocuments(
- opCtx, ChunkType::ConfigNS, chunksQuery, ShardingCatalogClient::kMajorityWriteConcern));
-}
-
-void removeTagsForDroppedCollection(OperationContext* opCtx, const NamespaceString& nss) {
- IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
-
- // Remove tag data
- uassertStatusOK(
- catalogClient->removeConfigDocuments(opCtx,
- TagsType::ConfigNS,
- BSON(TagsType::ns(nss.ns())),
- ShardingCatalogClient::kMajorityWriteConcern));
-}
-
} // namespace
-void ShardingCatalogManager::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
- uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
- opCtx,
- "dropCollection.start",
- nss.ns(),
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern));
-
- LOGV2_DEBUG(21924,
- 1,
- "dropCollection {namespace} started",
- "dropCollection started",
- "namespace"_attr = nss.ns());
-
- sendDropCollectionToAllShards(opCtx, nss);
-
- LOGV2_DEBUG(21925,
- 1,
- "dropCollection {namespace} shard data deleted",
- "dropCollection shard data deleted",
- "namespace"_attr = nss.ns());
-
- try {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
- auto collType = catalogClient->getCollection(opCtx, nss);
- const auto nssOrUUID = [&]() {
- if (collType.getTimestamp()) {
- return NamespaceStringOrUUID(std::string(), collType.getUuid());
- } else {
- return NamespaceStringOrUUID(collType.getNss());
- }
- }();
- removeChunksForDroppedCollection(opCtx, nssOrUUID);
- removeTagsForDroppedCollection(opCtx, nss);
-
- LOGV2_DEBUG(21926,
- 1,
- "dropCollection {namespace} chunk and tag data deleted",
- "dropCollection chunk and tag data deleted",
- "namespace"_attr = nss.ns());
-
- uassertStatusOK(
- catalogClient->removeConfigDocuments(opCtx,
- CollectionType::ConfigNS,
- BSON(CollectionType::kNssFieldName << nss.ns()),
- ShardingCatalogClient::kMajorityWriteConcern));
- LOGV2_DEBUG(21927,
- 1,
- "dropCollection {namespace} collection entry deleted",
- "dropCollection collection entry deleted",
- "namespace"_attr = nss.ns());
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- LOGV2(5310500,
- "dropCollection {namespace} collection entry not found",
- "dropCollection {namespace} collection entry not found",
- "namespace"_attr = nss.ns());
- }
-
- sendSSVToAllShards(opCtx, nss);
-
- LOGV2_DEBUG(21928,
- 1,
- "dropCollection {namespace} completed",
- "dropCollection completed",
- "namespace"_attr = nss.ns());
-
- ShardingLogging::get(opCtx)->logChange(
- opCtx, "dropCollection", nss.ns(), BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
-}
-
-void ShardingCatalogManager::ensureDropCollectionCompleted(OperationContext* opCtx,
- const NamespaceString& nss) {
-
- LOGV2_DEBUG(21929,
- 1,
- "Ensuring config entries for {namespace} from previous dropCollection are cleared",
- "Ensuring config entries from previous dropCollection are cleared",
- "namespace"_attr = nss.ns());
- sendDropCollectionToAllShards(opCtx, nss);
-
- IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
- removeTagsForDroppedCollection(opCtx, nss);
- sendSSVToAllShards(opCtx, nss);
-}
-
// Returns the pipeline updates to be used for updating a refined collection's chunk and tag
// documents.
//
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
index 743494e85e4..ec669cde486 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
@@ -34,13 +34,13 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/drop_collection_legacy.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/util/scopeguard.h"
@@ -79,7 +79,13 @@ public:
->getTargeter());
shard2Targeter->setFindHostReturnValue(HostAndPort(_shard2.getHost()));
- // insert documents into the config database
+ // Create the database, collection, chunks and zones in the config collection, so the test
+ // starts with a properly created collection
+ DatabaseType dbt(
+ dropNS().db().toString(), _shard1.getName(), true, DatabaseVersion(UUID::gen()));
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), DatabaseType::ConfigNS, dbt.toBSON()));
+
CollectionType shardedCollection(dropNS(), OID::gen(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON(_shardKey << 1));
ASSERT_OK(insertToConfigCollection(
@@ -168,9 +174,9 @@ public:
}
void doDrop() {
- ThreadClient tc("Test", getGlobalServiceContext());
- auto opCtx = cc().makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->dropCollection(opCtx.get(), dropNS());
+ ThreadClient tc("Test", getServiceContext());
+ auto opCtx = tc->makeOperationContext();
+ dropCollectionLegacy(opCtx.get(), dropNS());
}
const NamespaceString& dropNS() const {
diff --git a/src/mongo/db/s/drop_collection_legacy.cpp b/src/mongo/db/s/drop_collection_legacy.cpp
new file mode 100644
index 00000000000..dad6be97dc6
--- /dev/null
+++ b/src/mongo/db/s/drop_collection_legacy.cpp
@@ -0,0 +1,329 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/drop_collection_legacy.h"
+
+#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/sharding_logging.h"
+#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/catalog_cache.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/set_shard_version_request.h"
+
+namespace mongo {
+namespace {
+
+static constexpr int kMaxNumStaleShardVersionRetries = 10;
+
+void sendDropCollectionToAllShards(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const std::vector<ShardType>& allShards) {
+ const auto dropCommandBSON = [opCtx, &nss] {
+ BSONObjBuilder builder;
+ builder.append("drop", nss.coll());
+
+ if (!opCtx->getWriteConcern().usedDefault) {
+ builder.append(WriteConcernOptions::kWriteConcernField,
+ opCtx->getWriteConcern().toBSON());
+ }
+
+ ChunkVersion::IGNORED().appendToCommand(&builder);
+ return builder.obj();
+ }();
+
+ auto* const shardRegistry = Grid::get(opCtx)->shardRegistry();
+
+ for (const auto& shardEntry : allShards) {
+ bool keepTrying;
+ size_t numStaleShardVersionAttempts = 0;
+ do {
+ const auto& shard =
+ uassertStatusOK(shardRegistry->getShard(opCtx, shardEntry.getName()));
+
+ auto swDropResult = shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ nss.db().toString(),
+ dropCommandBSON,
+ Shard::RetryPolicy::kIdempotent);
+
+ const std::string dropCollectionErrMsg = str::stream()
+ << "Error dropping collection on shard " << shardEntry.getName();
+
+ auto dropResult = uassertStatusOKWithContext(swDropResult, dropCollectionErrMsg);
+ uassertStatusOKWithContext(dropResult.writeConcernStatus, dropCollectionErrMsg);
+
+ auto dropCommandStatus = std::move(dropResult.commandStatus);
+
+ if (dropCommandStatus.code() == ErrorCodes::NamespaceNotFound) {
+ // The dropCollection command on the shard is not idempotent, and can return
+ // NamespaceNotFound. We can ignore NamespaceNotFound since we have already asserted
+ // that there is no writeConcern error.
+ keepTrying = false;
+ } else if (ErrorCodes::isStaleShardVersionError(dropCommandStatus.code())) {
+ numStaleShardVersionAttempts++;
+ if (numStaleShardVersionAttempts == kMaxNumStaleShardVersionRetries) {
+ uassertStatusOKWithContext(dropCommandStatus,
+ str::stream() << dropCollectionErrMsg
+ << " due to exceeded retry attempts");
+ }
+ // No need to refresh cache, the command was sent with ChunkVersion::IGNORED and the
+ // shard is allowed to throw, which means that the drop will serialize behind a
+ // refresh.
+ keepTrying = true;
+ } else {
+ uassertStatusOKWithContext(dropCommandStatus, dropCollectionErrMsg);
+ keepTrying = false;
+ }
+ } while (keepTrying);
+ }
+}
+
+void sendSSVToAllShards(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const std::vector<ShardType>& allShards) {
+ auto* const shardRegistry = Grid::get(opCtx)->shardRegistry();
+
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ for (const auto& shardEntry : allShards) {
+ const auto& shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardEntry.getName()));
+
+ SetShardVersionRequest ssv(
+ nss, ChunkVersion::UNSHARDED(), true /* isAuthoritative */, true /* forceRefresh */);
+
+ auto ssvResult = shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ ssv.toBSON(),
+ Shard::RetryPolicy::kIdempotent);
+
+ uassertStatusOK(ssvResult.getStatus());
+ uassertStatusOK(ssvResult.getValue().commandStatus);
+ }
+}
+
+void removeChunksForDroppedCollection(OperationContext* opCtx,
+ const NamespaceStringOrUUID& nssOrUUID) {
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+
+ // Remove chunk data
+ const auto chunksQuery = [&]() {
+ if (nssOrUUID.uuid()) {
+ return BSON(ChunkType::collectionUUID << *nssOrUUID.uuid());
+ } else {
+ return BSON(ChunkType::ns(nssOrUUID.nss()->ns()));
+ }
+ }();
+ uassertStatusOK(catalogClient->removeConfigDocuments(
+ opCtx, ChunkType::ConfigNS, chunksQuery, ShardingCatalogClient::kMajorityWriteConcern));
+}
+
+void removeTagsForDroppedCollection(OperationContext* opCtx, const NamespaceString& nss) {
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+
+ // Remove tag data
+ uassertStatusOK(
+ catalogClient->removeConfigDocuments(opCtx,
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(nss.ns())),
+ ShardingCatalogClient::kMajorityWriteConcern));
+}
+
+
+/**
+ * Ensures that a namespace that has received a dropCollection, but no longer has an entry in
+ * config.collections, has cleared all relevant metadata entries for the corresponding collection.
+ * As part of this, sends dropCollection and setShardVersion to all shards -- in case shards didn't
+ * receive these commands as part of the original dropCollection.
+ *
+ * This function does not guarantee that all shards will eventually receive setShardVersion, unless
+ * the client infinitely retries until hearing back success. This function does, however, increase
+ * the likelihood of shards having received setShardVersion.
+ */
+void ensureDropCollectionCompleted(OperationContext* opCtx, const NamespaceString& nss) {
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+ auto allShards = uassertStatusOK(catalogClient->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern))
+ .value;
+
+ LOGV2_DEBUG(21929,
+ 1,
+ "Ensuring config entries for {namespace} from previous dropCollection are cleared",
+ "Ensuring config entries from previous dropCollection are cleared",
+ "namespace"_attr = nss.ns());
+
+ sendDropCollectionToAllShards(opCtx, nss, allShards);
+
+ IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx);
+ removeTagsForDroppedCollection(opCtx, nss);
+ sendSSVToAllShards(opCtx, nss, allShards);
+}
+
+} // namespace
+
+void dropCollectionLegacy(OperationContext* opCtx, const NamespaceString& nss) {
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, nss.db(), "dropCollection", DistLockManager::kDefaultLockTimeout));
+ auto collDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, nss.ns(), "dropCollection", DistLockManager::kDefaultLockTimeout));
+
+ ON_BLOCK_EXIT([opCtx, nss] {
+ Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss);
+ });
+
+ auto const catalogClient = Grid::get(opCtx)->catalogClient();
+
+ CollectionType collection;
+ try {
+ catalogClient->getCollection(opCtx, nss, repl::ReadConcernLevel::kMajorityReadConcern);
+ dropCollectionNoDistLock(opCtx, nss);
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // If the DB isn't in the sharding catalog either, consider the drop a success.
+ DatabaseType dbt;
+ try {
+ dbt = catalogClient->getDatabase(
+ opCtx, nss.db().toString(), repl::ReadConcernLevel::kMajorityReadConcern);
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ return;
+ }
+
+ // If we found the DB but not the collection, and the primary shard for the database is the
+ // config server, run the drop only against the config server unless the collection is
+ // config.system.sessions, since no other collections whose primary shard is the config
+ // server can have been sharded.
+ if (dbt.getPrimary() == ShardId::kConfigServerId &&
+ nss != NamespaceString::kLogicalSessionsNamespace) {
+ auto cmdDropResult =
+ uassertStatusOK(Grid::get(opCtx)
+ ->shardRegistry()
+ ->getConfigShard()
+ ->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ nss.db().toString(),
+ BSON("drop" << nss.coll()),
+ Shard::RetryPolicy::kIdempotent));
+
+ // If the collection doesn't exist, consider the drop a success.
+ if (cmdDropResult.commandStatus == ErrorCodes::NamespaceNotFound) {
+ return;
+ }
+ uassertStatusOK(cmdDropResult.commandStatus);
+ return;
+ }
+
+ ensureDropCollectionCompleted(opCtx, nss);
+ }
+}
+
+void dropCollectionNoDistLock(OperationContext* opCtx, const NamespaceString& nss) {
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "dropCollection.start",
+ nss.ns(),
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern));
+
+ LOGV2_DEBUG(21924,
+ 1,
+ "dropCollection {namespace} started",
+ "dropCollection started",
+ "namespace"_attr = nss.ns());
+
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
+ auto allShards = uassertStatusOK(catalogClient->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern))
+ .value;
+
+ sendDropCollectionToAllShards(opCtx, nss, allShards);
+
+ LOGV2_DEBUG(21925,
+ 1,
+ "dropCollection {namespace} shard data deleted",
+ "dropCollection shard data deleted",
+ "namespace"_attr = nss.ns());
+
+ try {
+ auto collType = catalogClient->getCollection(opCtx, nss);
+ const auto nssOrUUID = [&]() {
+ if (collType.getTimestamp()) {
+ return NamespaceStringOrUUID(std::string(), collType.getUuid());
+ } else {
+ return NamespaceStringOrUUID(collType.getNss());
+ }
+ }();
+ removeChunksForDroppedCollection(opCtx, nssOrUUID);
+ removeTagsForDroppedCollection(opCtx, nss);
+
+ LOGV2_DEBUG(21926,
+ 1,
+ "dropCollection {namespace} chunk and tag data deleted",
+ "dropCollection chunk and tag data deleted",
+ "namespace"_attr = nss.ns());
+
+ uassertStatusOK(
+ catalogClient->removeConfigDocuments(opCtx,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::kNssFieldName << nss.ns()),
+ ShardingCatalogClient::kMajorityWriteConcern));
+ LOGV2_DEBUG(21927,
+ 1,
+ "dropCollection {namespace} collection entry deleted",
+ "dropCollection collection entry deleted",
+ "namespace"_attr = nss.ns());
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ LOGV2(5310500,
+ "dropCollection {namespace} collection entry not found",
+ "dropCollection {namespace} collection entry not found",
+ "namespace"_attr = nss.ns());
+ }
+
+ sendSSVToAllShards(opCtx, nss, allShards);
+
+ LOGV2_DEBUG(21928,
+ 1,
+ "dropCollection {namespace} completed",
+ "dropCollection completed",
+ "namespace"_attr = nss.ns());
+
+ ShardingLogging::get(opCtx)->logChange(
+ opCtx, "dropCollection", nss.ns(), BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/drop_collection_legacy.h b/src/mongo/db/s/drop_collection_legacy.h
new file mode 100644
index 00000000000..00c08090749
--- /dev/null
+++ b/src/mongo/db/s/drop_collection_legacy.h
@@ -0,0 +1,48 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+
+void dropCollectionLegacy(OperationContext* opCtx, const NamespaceString& nss);
+
+/**
+ * Contains the underlying logic for drop collection, assuming that the caller calls it in isolation
+ * from other DDLs through the means of the DistLock.
+ *
+ * Throws a DBException for any failures. These are some of the known failures:
+ * - NamespaceNotFound - Collection does not exist
+ */
+void dropCollectionNoDistLock(OperationContext* opCtx, const NamespaceString& nss);
+
+} // namespace mongo
diff --git a/src/mongo/db/s/drop_database_legacy.cpp b/src/mongo/db/s/drop_database_legacy.cpp
new file mode 100644
index 00000000000..cd257699ce8
--- /dev/null
+++ b/src/mongo/db/s/drop_database_legacy.cpp
@@ -0,0 +1,140 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/drop_database_legacy.h"
+
+#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/drop_collection_legacy.h"
+#include "mongo/db/s/sharding_logging.h"
+#include "mongo/s/grid.h"
+
+namespace mongo {
+namespace {
+
+void dropDatabaseFromShard(OperationContext* opCtx, const ShardId& shardId, StringData dbName) {
+ const auto dropDatabaseCommandBSON = [opCtx] {
+ BSONObjBuilder builder;
+ builder.append("dropDatabase", 1);
+ builder.append(WriteConcernOptions::kWriteConcernField, opCtx->getWriteConcern().toBSON());
+ return builder.obj();
+ }();
+
+ const auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
+ auto cmdDropDatabaseResult = uassertStatusOK(
+ shard->runCommandWithFixedRetryAttempts(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ dbName.toString(),
+ dropDatabaseCommandBSON,
+ Shard::RetryPolicy::kIdempotent));
+
+ uassertStatusOK(cmdDropDatabaseResult.commandStatus);
+ uassertStatusOK(cmdDropDatabaseResult.writeConcernStatus);
+}
+
+} // namespace
+
+DropDatabaseReply dropDatabaseLegacy(OperationContext* opCtx, StringData dbName) {
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, dbName, "dropDatabase", DistLockManager::kDefaultLockTimeout));
+
+ ON_BLOCK_EXIT([&] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName); });
+
+ auto const catalogClient = Grid::get(opCtx)->catalogClient();
+
+ DatabaseType dbType;
+ try {
+ dbType =
+ catalogClient->getDatabase(opCtx, dbName, repl::ReadConcernLevel::kMajorityReadConcern);
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ DropDatabaseReply reply;
+ reply.setInfo("database does not exist"_sd);
+ return reply;
+ }
+
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "dropDatabase.start",
+ dbName,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern));
+
+ // Drop the database's collections.
+ for (const auto& nss : catalogClient->getAllShardedCollectionsForDb(
+ opCtx, dbName, repl::ReadConcernLevel::kMajorityReadConcern)) {
+ auto collDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, nss.ns(), "dropCollection", DistLockManager::kDefaultLockTimeout));
+ dropCollectionNoDistLock(opCtx, nss);
+ }
+
+ // Drop the database from the primary shard first.
+ dropDatabaseFromShard(opCtx, dbType.getPrimary(), dbName);
+
+ // Drop the database from each of the remaining shards.
+ const auto allShardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload();
+ for (const ShardId& shardId : allShardIds) {
+ dropDatabaseFromShard(opCtx, shardId, dbName);
+ }
+
+ // Remove the database entry from the metadata.
+ const Status status =
+ catalogClient->removeConfigDocuments(opCtx,
+ DatabaseType::ConfigNS,
+ BSON(DatabaseType::name(dbName.toString())),
+ ShardingCatalogClient::kMajorityWriteConcern);
+ uassertStatusOKWithContext(
+ status, str::stream() << "Could not remove database '" << dbName << "' from metadata");
+
+ // Send _flushDatabaseCacheUpdates to all shards
+ IgnoreAPIParametersBlock ignoreApiParametersBlock{opCtx};
+ for (const ShardId& shardId : allShardIds) {
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
+ auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("_flushDatabaseCacheUpdates" << dbName),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(cmdResponse.commandStatus);
+ }
+
+ ShardingLogging::get(opCtx)->logChange(
+ opCtx, "dropDatabase", dbName, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+
+ DropDatabaseReply reply;
+ reply.setDropped(dbName);
+ return reply;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/drop_database_legacy.h b/src/mongo/db/s/drop_database_legacy.h
new file mode 100644
index 00000000000..f03f06c688d
--- /dev/null
+++ b/src/mongo/db/s/drop_database_legacy.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+
+namespace mongo {
+
+DropDatabaseReply dropDatabaseLegacy(OperationContext* opCtx, StringData dbName);
+
+} // namespace mongo
diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
index bf0c068bdc2..537286b9b73 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
@@ -32,10 +32,10 @@
#include "mongo/platform/basic.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands.h"
#include "mongo/db/curop.h"
#include "mongo/db/s/drop_collection_coordinator.h"
+#include "mongo/db/s/drop_collection_legacy.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
@@ -45,19 +45,6 @@
namespace mongo {
namespace {
-void dropCollectionLegacy(OperationContext* opCtx, const NamespaceString& nss) {
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- const auto cmdResponse = configShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- CommandHelpers::appendMajorityWriteConcern(
- BSON("_configsvrDropCollection" << nss.toString()), opCtx->getWriteConcern()),
- Shard::RetryPolicy::kIdempotent);
-
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponse));
-}
-
class ShardsvrDropCollectionCommand final : public TypedCommand<ShardsvrDropCollectionCommand> {
public:
using Request = ShardsvrDropCollection;
diff --git a/src/mongo/db/s/shardsvr_drop_database_command.cpp b/src/mongo/db/s/shardsvr_drop_database_command.cpp
index 4ba2286d8e5..90548c530df 100644
--- a/src/mongo/db/s/shardsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_database_command.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/curop.h"
#include "mongo/db/s/drop_database_coordinator.h"
+#include "mongo/db/s/drop_database_legacy.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
@@ -45,22 +46,6 @@
namespace mongo {
namespace {
-DropDatabaseReply dropDatabaseLegacy(OperationContext* opCtx, StringData dbName) {
- const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- CommandHelpers::appendMajorityWriteConcern(BSON("_configsvrDropDatabase" << dbName),
- opCtx->getWriteConcern()),
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponse));
-
- return DropDatabaseReply::parse(IDLParserErrorContext("dropDatabase-reply"),
- cmdResponse.response);
-}
-
class ShardsvrDropDatabaseCommand final : public TypedCommand<ShardsvrDropDatabaseCommand> {
public:
using Request = ShardsvrDropDatabase;