summaryrefslogtreecommitdiff
path: root/src/mongo/db/catalog/drop_database.cpp
diff options
context:
space:
mode:
authorGabriel Marks <gabriel.marks@mongodb.com>2021-11-11 15:10:42 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-11-11 16:54:05 +0000
commit63ea241ba9d2224987440735fed9e19eee23b1d9 (patch)
tree018d7cf23777c2b59df99edc43ff7df3fd3694af /src/mongo/db/catalog/drop_database.cpp
parent858236f313e5e22b04d1690a744f0f60a205cbe9 (diff)
downloadmongo-63ea241ba9d2224987440735fed9e19eee23b1d9.tar.gz
SERVER-49866 Remove TempRelease
Diffstat (limited to 'src/mongo/db/catalog/drop_database.cpp')
-rw-r--r--src/mongo/db/catalog/drop_database.cpp137
1 files changed, 63 insertions, 74 deletions
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index a75e574a8b6..153f986a1d1 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -304,86 +304,75 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
}
});
- {
- // Holding of any locks is disallowed while awaiting replication because this can
- // potentially block for long time while doing network activity.
- //
- // Even though dropDatabase() does not explicitly acquire any locks before awaiting
- // replication, it is possible that the caller of this function may already have acquired
- // a lock. The applyOps command is an example of a dropDatabase() caller that does this.
- // Therefore, we have to release any locks using a TempRelease RAII object.
- //
- // TODO: Remove the use of this TempRelease object when SERVER-29802 is completed.
- // The work in SERVER-29802 will adjust the locking rules around applyOps operations and
- // dropDatabase is expected to be one of the operations where we expect to no longer acquire
- // the global lock.
- Lock::TempRelease release(opCtx->lockState());
-
- auto awaitOpTime = [&]() {
- if (numCollectionsToDrop > 0U) {
- const auto& clientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
- return clientInfo.getLastOp();
- }
- invariant(!latestDropPendingOpTime.isNull());
- return latestDropPendingOpTime;
- }();
-
- // The user-supplied wTimeout should be used when waiting for majority write concern.
- const auto& userWriteConcern = opCtx->getWriteConcern();
- const auto wTimeout = !userWriteConcern.isImplicitDefaultWriteConcern()
- ? Milliseconds{userWriteConcern.wTimeout}
- : duration_cast<Milliseconds>(Minutes(10));
-
- // This is used to wait for the collection drops to replicate to a majority of the replica
- // set. Note: Even though we're setting UNSET here, kMajority implies JOURNAL if journaling
- // is supported by mongod and writeConcernMajorityJournalDefault is set to true in the
- // ReplSetConfig.
- const WriteConcernOptions dropDatabaseWriteConcern(
- WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, wTimeout);
-
- LOGV2(20340,
- "dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
- "{dropDatabaseWriteConcern}. Dropping {numCollectionsToDrop} collection(s), with "
- "last collection drop at {latestDropPendingOpTime}",
- "dropDatabase waiting for replication and dropping collections",
- "db"_attr = dbName,
- "awaitOpTime"_attr = awaitOpTime,
- "dropDatabaseWriteConcern"_attr = dropDatabaseWriteConcern.toBSON(),
- "numCollectionsToDrop"_attr = numCollectionsToDrop,
- "latestDropPendingOpTime"_attr = latestDropPendingOpTime);
-
- auto result = replCoord->awaitReplication(opCtx, awaitOpTime, dropDatabaseWriteConcern);
-
- // If the user-provided write concern is weaker than majority, this is effectively a no-op.
- if (result.status.isOK() && !userWriteConcern.usedDefaultConstructedWC) {
- LOGV2(20341,
- "dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
- "{userWriteConcern}",
- "dropDatabase waiting for replication",
- "db"_attr = dbName,
- "awaitOpTime"_attr = awaitOpTime,
- "writeConcern"_attr = userWriteConcern.toBSON());
- result = replCoord->awaitReplication(opCtx, awaitOpTime, userWriteConcern);
+ // Holding of any locks is disallowed while awaiting replication because this can potentially
+ // block for long time while doing network activity.
+ invariant(!opCtx->lockState()->isLocked());
+
+ auto awaitOpTime = [&]() {
+ if (numCollectionsToDrop > 0U) {
+ const auto& clientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
+ return clientInfo.getLastOp();
}
+ invariant(!latestDropPendingOpTime.isNull());
+ return latestDropPendingOpTime;
+ }();
+
+ // The user-supplied wTimeout should be used when waiting for majority write concern.
+ const auto& userWriteConcern = opCtx->getWriteConcern();
+ const auto wTimeout = !userWriteConcern.isImplicitDefaultWriteConcern()
+ ? Milliseconds{userWriteConcern.wTimeout}
+ : duration_cast<Milliseconds>(Minutes(10));
+
+ // This is used to wait for the collection drops to replicate to a majority of the replica
+ // set. Note: Even though we're setting UNSET here, kMajority implies JOURNAL if journaling
+ // is supported by mongod and writeConcernMajorityJournalDefault is set to true in the
+ // ReplSetConfig.
+ const WriteConcernOptions dropDatabaseWriteConcern(
+ WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, wTimeout);
+
+ LOGV2(20340,
+ "dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
+ "{dropDatabaseWriteConcern}. Dropping {numCollectionsToDrop} collection(s), with "
+ "last collection drop at {latestDropPendingOpTime}",
+ "dropDatabase waiting for replication and dropping collections",
+ "db"_attr = dbName,
+ "awaitOpTime"_attr = awaitOpTime,
+ "dropDatabaseWriteConcern"_attr = dropDatabaseWriteConcern.toBSON(),
+ "numCollectionsToDrop"_attr = numCollectionsToDrop,
+ "latestDropPendingOpTime"_attr = latestDropPendingOpTime);
- if (!result.status.isOK()) {
- return result.status.withContext(str::stream()
- << "dropDatabase " << dbName << " failed waiting for "
- << numCollectionsToDrop
- << " collection drop(s) (most recent drop optime: "
- << awaitOpTime.toString() << ") to replicate.");
- }
+ auto result = replCoord->awaitReplication(opCtx, awaitOpTime, dropDatabaseWriteConcern);
- LOGV2(20342,
- "dropDatabase {dbName} - successfully dropped {numCollectionsToDrop} collection(s) "
- "(most recent drop optime: {awaitOpTime}) after {result_duration}. dropping database",
- "dropDatabase - successfully dropped collections",
+ // If the user-provided write concern is weaker than majority, this is effectively a no-op.
+ if (result.status.isOK() && !userWriteConcern.usedDefaultConstructedWC) {
+ LOGV2(20341,
+ "dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
+ "{userWriteConcern}",
+ "dropDatabase waiting for replication",
"db"_attr = dbName,
- "numCollectionsDropped"_attr = numCollectionsToDrop,
- "mostRecentDropOpTime"_attr = awaitOpTime,
- "duration"_attr = result.duration);
+ "awaitOpTime"_attr = awaitOpTime,
+ "writeConcern"_attr = userWriteConcern.toBSON());
+ result = replCoord->awaitReplication(opCtx, awaitOpTime, userWriteConcern);
+ }
+
+ if (!result.status.isOK()) {
+ return result.status.withContext(str::stream()
+ << "dropDatabase " << dbName << " failed waiting for "
+ << numCollectionsToDrop
+ << " collection drop(s) (most recent drop optime: "
+ << awaitOpTime.toString() << ") to replicate.");
}
+ LOGV2(20342,
+ "dropDatabase {dbName} - successfully dropped {numCollectionsToDrop} collection(s) "
+ "(most recent drop optime: {awaitOpTime}) after {result_duration}. dropping database",
+ "dropDatabase - successfully dropped collections",
+ "db"_attr = dbName,
+ "numCollectionsDropped"_attr = numCollectionsToDrop,
+ "mostRecentDropOpTime"_attr = awaitOpTime,
+ "duration"_attr = result.duration);
+
+
if (MONGO_unlikely(dropDatabaseHangAfterAllCollectionsDrop.shouldFail())) {
LOGV2(20343,
"dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. "