summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2019-04-25 14:52:27 -0400
committerXiangyu Yao <xiangyu.yao@mongodb.com>2019-04-25 14:54:20 -0400
commit62aa69d685e1b6cde2fd3ab826cf12d1f20221fd (patch)
tree3f07096bd5f5c7f5edde9a43a084400480a41431
parent24d4a296aacf4acc981094f5cdf973ad62ab67fa (diff)
downloadmongo-62aa69d685e1b6cde2fd3ab826cf12d1f20221fd.tar.gz
Revert "SERVER-39520 Use database IX lock for dropCollection"
This reverts commit fdc3712e4cb89c23451061b4c927a78340269d89.
-rw-r--r--jstests/core/txns/drop_collection_not_blocked_by_txn.js32
-rw-r--r--jstests/core/txns/transactions_profiling_with_drops.js87
-rw-r--r--jstests/replsets/transient_txn_error_labels_with_write_concern.js44
-rw-r--r--src/mongo/db/catalog/SConscript13
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp2
-rw-r--r--src/mongo/db/catalog/collection.h1
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_impl.h1
-rw-r--r--src/mongo/db/catalog/collection_mock.h1
-rw-r--r--src/mongo/db/catalog/collection_test.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp5
-rw-r--r--src/mongo/db/catalog/database_impl.cpp51
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp14
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp2
-rw-r--r--src/mongo/db/catalog/index_consistency.h2
-rw-r--r--src/mongo/db/catalog/uuid_catalog.cpp34
-rw-r--r--src/mongo/db/catalog/uuid_catalog.h18
-rw-r--r--src/mongo/db/catalog/uuid_catalog_helper.cpp63
-rw-r--r--src/mongo/db/catalog/uuid_catalog_helper.h53
-rw-r--r--src/mongo/db/catalog/uuid_catalog_test.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp12
-rw-r--r--src/mongo/db/commands/SConscript2
-rw-r--r--src/mongo/db/commands/dbhash.cpp111
-rw-r--r--src/mongo/db/commands/list_collections.cpp41
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/validate.cpp5
-rw-r--r--src/mongo/db/repair_database.cpp4
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp1
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp3
-rw-r--r--src/mongo/db/storage/kv/SConscript1
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp9
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp28
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp42
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp102
-rw-r--r--src/mongo/dbtests/validate_tests.cpp1
35 files changed, 290 insertions, 505 deletions
diff --git a/jstests/core/txns/drop_collection_not_blocked_by_txn.js b/jstests/core/txns/drop_collection_not_blocked_by_txn.js
deleted file mode 100644
index 626a7128a5c..00000000000
--- a/jstests/core/txns/drop_collection_not_blocked_by_txn.js
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Test that drop collection only takes database IX lock and will not be blocked by transactions.
- *
- * @tags: [uses_transactions, requires_db_locking, assumes_unsharded_collection]
- */
-
-(function() {
- "use strict";
-
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- let db = rst.getPrimary().getDB("test");
-
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
-
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
-
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "a".
- sessionDb.a.insert({y: 1});
-
- // This only requires database IX lock.
- assert.commandWorked(db.runCommand({drop: "b"}));
-
- session.commitTransaction();
-
- rst.stopSet();
-})();
diff --git a/jstests/core/txns/transactions_profiling_with_drops.js b/jstests/core/txns/transactions_profiling_with_drops.js
index 4c2c1c0047a..418bc45330b 100644
--- a/jstests/core/txns/transactions_profiling_with_drops.js
+++ b/jstests/core/txns/transactions_profiling_with_drops.js
@@ -7,17 +7,20 @@
const dbName = "test";
const collName = "transactions_profiling_with_drops";
+ const otherCollName = "other";
const adminDB = db.getSiblingDB("admin");
const testDB = db.getSiblingDB(dbName);
+ const otherColl = testDB[otherCollName];
const session = db.getMongo().startSession({causalConsistency: false});
const sessionDb = session.getDatabase(dbName);
const sessionColl = sessionDb[collName];
sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}});
assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"}));
+ assert.commandWorked(otherColl.insert({_id: "doc"}, {w: "majority"}));
assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1}));
- jsTest.log("Test read profiling with operation holding database X lock.");
+ jsTest.log("Test read profiling with drops.");
jsTest.log("Start transaction.");
session.startTransaction();
@@ -29,50 +32,39 @@
profilerHasSingleMatchingEntryOrThrow(
{profileDB: testDB, filter: {"command.comment": "read success"}});
- // Lock 'test' database in X mode.
- let lockShell = startParallelShell(function() {
- assert.commandFailed(db.adminCommand({
- sleep: 1,
- secs: 500,
- lock: "w",
- lockTarget: "test",
- $comment: "transaction_profiling_with_drops lock sleep"
- }));
+ jsTest.log("Start a drop, which will hang.");
+ let awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test").runCommand({drop: "other", writeConcern: {w: "majority"}});
});
- const waitForCommand = function(opFilter) {
- let opId = -1;
- assert.soon(function() {
- const curopRes = testDB.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(opFilter);
-
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
+ // Wait for the drop to have a pending MODE_X lock on the database.
+ assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": otherCollName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]));
});
- return opId;
- };
-
- // Wait for sleep to appear in currentOp
- let opId = waitForCommand(
- op => (op["ns"] == "admin.$cmd" &&
- op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep"));
jsTest.log("Run a slow read. Profiling in the transaction should fail.");
assert.sameMembers(
[{_id: "doc"}],
sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray());
session.commitTransaction();
-
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
-
+ awaitDrop();
profilerHasZeroMatchingEntriesOrThrow(
{profileDB: testDB, filter: {"command.comment": "read failure"}});
- jsTest.log("Test write profiling with operation holding database X lock.");
+ jsTest.log("Test write profiling with drops.");
+
+ // Recreate the "other" collection so it can be dropped again.
+ assert.commandWorked(otherColl.insert({_id: "doc"}, {w: "majority"}));
jsTest.log("Start transaction.");
session.startTransaction();
@@ -83,24 +75,31 @@
profilerHasSingleMatchingEntryOrThrow(
{profileDB: testDB, filter: {"command.collation": {locale: "en"}}});
- // Lock 'test' database in X mode.
- lockShell = startParallelShell(function() {
- assert.commandFailed(db.getSiblingDB("test").adminCommand(
- {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"}));
+ jsTest.log("Start a drop, which will hang.");
+ awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test").runCommand({drop: "other", writeConcern: {w: "majority"}});
});
- // Wait for sleep to appear in currentOp
- opId = waitForCommand(
- op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep"));
+ // Wait for the drop to have a pending MODE_X lock on the database.
+ assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": otherCollName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]));
+ });
jsTest.log("Run a slow write. Profiling in the transaction should fail.");
assert.commandWorked(sessionColl.update(
{$where: "sleep(1000); return true;"}, {$inc: {bad: 1}}, {collation: {locale: "fr"}}));
session.commitTransaction();
-
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
-
+ awaitDrop();
profilerHasZeroMatchingEntriesOrThrow(
{profileDB: testDB, filter: {"command.collation": {locale: "fr"}}});
diff --git a/jstests/replsets/transient_txn_error_labels_with_write_concern.js b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
index b422bb96ccc..55dc269831e 100644
--- a/jstests/replsets/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
@@ -73,35 +73,16 @@
jsTest.log(
"If the noop write for NoSuchTransaction cannot occur, the error is not transient");
-
- // Lock 'local' database in X mode.
- let lockShell = startParallelShell(function() {
- assert.commandFailed(db.adminCommand({
- sleep: 1,
- secs: 500,
- lock: "w",
- lockTarget: "local",
- $comment: "transient_txn_error_labels_with_write_concern lock sleep"
- }));
- }, rst.ports[0]);
-
- // Wait for sleep to appear in currentOp
- let opId = -1;
- assert.soon(function() {
- const curopRes = testDB.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(
- op => (op["ns"] == "admin.$cmd" &&
- op["command"]["$comment"] ==
- "transient_txn_error_labels_with_write_concern lock sleep"));
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
- });
-
+ assert.commandWorked(testDB.getSiblingDB("local").createCollection("todrop"));
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
+ // Create a pending drop on a collection in the local database. This will hold an X lock on
+ // the local database.
+ let awaitDrop = startParallelShell(() => assert(db.getSiblingDB("local")["todrop"].drop()),
+ rst.ports[0]);
+ checkLog.contains(testDB.getMongo(), "hangDuringDropCollection fail point enabled");
// The server will attempt to perform a noop write, since the command returns
- // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
+ // NoSuchTransaction. The noop write will time out acquiring a lock on the local database.
// This should not be a TransientTransactionError, since the server has not successfully
// replicated a write to confirm that it is primary.
// Use a txnNumber that is one higher than the server has tracked.
@@ -113,10 +94,9 @@
}));
assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
assert(!res.hasOwnProperty("errorLabels"));
-
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
-
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
+ awaitDrop();
rst.awaitReplication();
}
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 591691e1b13..e2eacc8d585 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -325,17 +325,6 @@ env.Library(
]
)
-env.Library(
- target='uuid_catalog_helper',
- source=[
- 'uuid_catalog_helper.cpp',
- ],
- LIBDEPS_PRIVATE=[
- 'uuid_catalog',
- '$BUILD_DIR/mongo/db/concurrency/lock_manager',
- ]
-)
-
env.CppUnitTest(
target='namespace_uuid_cache_test',
source=[
@@ -353,6 +342,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'uuid_catalog',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
],
@@ -427,7 +417,6 @@ env.Library(
'$BUILD_DIR/mongo/db/views/views_mongod',
],
LIBDEPS_PRIVATE=[
- '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/commands/server_status_core',
'$BUILD_DIR/mongo/db/index/index_build_interceptor',
'$BUILD_DIR/mongo/db/logical_clock',
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index a13ca2d2650..ffd4a5f60dc 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -177,7 +177,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
LOG_FOR_RECOVERY(1) << "openCatalog: dbholder reopening database " << dbName;
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName);
- for (auto&& collNss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName)) {
+ for (auto&& collNss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(dbName)) {
// Note that the collection name already includes the database component.
auto collection = db->getCollection(opCtx, collNss.ns());
invariant(collection,
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 39cb24a20d7..705cc5c6d44 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -328,6 +328,7 @@ public:
virtual Status validate(OperationContext* const opCtx,
const ValidateCmdLevel level,
bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* const results,
BSONObjBuilder* const output) = 0;
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index aa662981b29..fcb62484ea6 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -1273,6 +1273,7 @@ void _validateCatalogEntry(OperationContext* opCtx,
Status CollectionImpl::validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IS));
@@ -1280,7 +1281,8 @@ Status CollectionImpl::validate(OperationContext* opCtx,
try {
ValidateResultsMap indexNsResultsMap;
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
- IndexConsistency indexConsistency(opCtx, this, ns(), _recordStore, background);
+ IndexConsistency indexConsistency(
+ opCtx, this, ns(), _recordStore, std::move(collLk), background);
RecordStoreValidateAdaptor indexValidator = RecordStoreValidateAdaptor(
opCtx, &indexConsistency, level, _indexCatalog.get(), &indexNsResultsMap);
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index cc26c48ae73..615a2499c59 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -237,6 +237,7 @@ public:
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 14e9f8d1550..c7075786eda 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -171,6 +171,7 @@ public:
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
std::abort();
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index a36c4d71770..a6486ebc9b0 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -72,7 +72,7 @@ void CollectionTest::checkValidate(
for (auto level : levels) {
ValidateResults results;
BSONObjBuilder output;
- auto status = coll->validate(opCtx, level, false, &results, &output);
+ auto status = coll->validate(opCtx, level, false, std::move(collLock), &results, &output);
ASSERT_OK(status);
ASSERT_EQ(results.valid, valid);
ASSERT_EQ(results.errors.size(), (long unsigned int)errors);
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 9560ec6f468..19143026e66 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -137,7 +137,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
// different databases for the same name.
lk.unlock();
- if (UUIDCatalog::get(opCtx).getAllCollectionUUIDsFromDb(dbname).empty()) {
+ if (UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbname).empty()) {
audit::logCreateDatabase(opCtx->getClient(), dbname);
if (justCreated)
*justCreated = true;
@@ -202,7 +202,6 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) {
namespace {
void evictDatabaseFromUUIDCatalog(OperationContext* opCtx, Database* db) {
- invariant(opCtx->lockState()->isW());
for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
auto coll = *collIt;
if (!coll) {
@@ -211,7 +210,7 @@ void evictDatabaseFromUUIDCatalog(OperationContext* opCtx, Database* db) {
NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns());
}
- UUIDCatalog::get(opCtx).onCloseDatabase(opCtx, db);
+ UUIDCatalog::get(opCtx).onCloseDatabase(db);
}
} // namespace
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index b0d6b1ca615..366e15101b9 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -50,7 +50,6 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/namespace_uuid_cache.h"
#include "mongo/db/catalog/uuid_catalog.h"
-#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -170,7 +169,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
}
auto& uuidCatalog = UUIDCatalog::get(opCtx);
- for (const auto& nss : uuidCatalog.getAllCollectionNamesFromDb(opCtx, _name)) {
+ for (const auto& nss : uuidCatalog.getAllCollectionNamesFromDb(_name)) {
auto ownedCollection = _createCollectionInstance(opCtx, nss);
invariant(ownedCollection);
@@ -197,7 +196,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
- for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, _name)) {
+ for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
CollectionCatalogEntry* coll =
UUIDCatalog::get(opCtx).lookupCollectionCatalogEntryByNamespace(nss);
CollectionOptions options = coll->getCollectionOptions(opCtx);
@@ -279,24 +278,24 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IS));
- catalog::forEachCollectionFromDb(
- opCtx,
- name(),
- MODE_IS,
- [&](Collection* collection, CollectionCatalogEntry* catalogEntry) -> bool {
- nCollections += 1;
- objects += collection->numRecords(opCtx);
- size += collection->dataSize(opCtx);
+ for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
+ Lock::CollectionLock colLock(opCtx, nss, MODE_IS);
+ Collection* collection = getCollection(opCtx, nss);
- BSONObjBuilder temp;
- storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
- numExtents += temp.obj()["numExtents"].numberInt(); // XXX
+ if (!collection)
+ continue;
+
+ nCollections += 1;
+ objects += collection->numRecords(opCtx);
+ size += collection->dataSize(opCtx);
- indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
- indexSize += collection->getIndexSize(opCtx);
+ BSONObjBuilder temp;
+ storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
+ numExtents += temp.obj()["numExtents"].numberInt(); // XXX
- return true;
- });
+ indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
+ indexSize += collection->getIndexSize(opCtx);
+ }
ViewCatalog::get(this)->iterate(opCtx, [&](const ViewDefinition& view) { nViews += 1; });
@@ -370,7 +369,7 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
const NamespaceString& fullns,
repl::OpTime dropOpTime) const {
- invariant(opCtx->lockState()->isCollectionLockedForMode(fullns, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
LOG(1) << "dropCollection: " << fullns;
@@ -515,10 +514,9 @@ Collection* DatabaseImpl::getCollection(OperationContext* opCtx, const Namespace
}
NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx);
- auto uuid = UUIDCatalog::get(opCtx).lookupUUIDByNSS(nss);
- if (uuid) {
- cache.ensureNamespaceInCache(nss, uuid.get());
- }
+ auto uuid = coll->uuid();
+ invariant(uuid);
+ cache.ensureNamespaceInCache(nss, uuid.get());
return coll;
}
@@ -527,10 +525,7 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
StringData toNS,
bool stayTemp) const {
audit::logRenameCollection(&cc(), fromNS, toNS);
- // TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
- // this function and now it only takes a database IX lock. We can change
- // this invariant to IX once renameCollection only MODE_IX as well.
- // invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
const NamespaceString fromNSS(fromNS);
const NamespaceString toNSS(toNS);
@@ -821,7 +816,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
return;
}
- for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, _name)) {
+ for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
if (nss.isDropPendingNamespace()) {
auto dropOpTime = fassert(40459, nss.getDropPendingNamespaceOpTime());
log() << "Found drop-pending namespace " << nss << " with drop optime " << dropOpTime;
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index b1d76ca11b4..391a5a76048 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -55,6 +55,9 @@ Status _dropView(OperationContext* opCtx,
std::unique_ptr<AutoGetDb>& autoDb,
const NamespaceString& collectionName,
BSONObjBuilder& result) {
+ // TODO(SERVER-39520): No need to relock once createCollection doesn't need X lock.
+ autoDb.reset();
+ autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_IX);
Database* db = autoDb->getDb();
if (!db) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
@@ -97,16 +100,11 @@ Status _dropView(OperationContext* opCtx,
Status _dropCollection(OperationContext* opCtx,
Database* db,
+ Collection* coll,
const NamespaceString& collectionName,
const repl::OpTime& dropOpTime,
DropCollectionSystemCollectionMode systemCollectionMode,
BSONObjBuilder& result) {
- Lock::CollectionLock collLock(opCtx, collectionName, MODE_X);
- Collection* coll = db->getCollection(opCtx, collectionName);
- if (!coll) {
- return Status(ErrorCodes::NamespaceNotFound, "ns not found");
- }
-
if (MONGO_FAIL_POINT(hangDuringDropCollection)) {
log() << "hangDuringDropCollection fail point enabled. Blocking until fail point is "
"disabled.";
@@ -157,7 +155,7 @@ Status dropCollection(OperationContext* opCtx,
return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] {
// TODO(SERVER-39520): Get rid of database MODE_X lock.
- auto autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_IX);
+ auto autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_X);
Database* db = autoDb->getDb();
if (!db) {
@@ -169,7 +167,7 @@ Status dropCollection(OperationContext* opCtx,
return _dropView(opCtx, autoDb, collectionName, result);
} else {
return _dropCollection(
- opCtx, db, collectionName, dropOpTime, systemCollectionMode, result);
+ opCtx, db, coll, collectionName, dropOpTime, systemCollectionMode, result);
}
});
}
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index fbc98f9f1de..78b2d62ab81 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -62,11 +62,13 @@ IndexConsistency::IndexConsistency(OperationContext* opCtx,
Collection* collection,
NamespaceString nss,
RecordStore* recordStore,
+ std::unique_ptr<Lock::CollectionLock> collLk,
const bool background)
: _opCtx(opCtx),
_collection(collection),
_nss(nss),
_recordStore(recordStore),
+ _collLk(std::move(collLk)),
_tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load())) {
diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h
index d22696c5b0c..ac90a4278a0 100644
--- a/src/mongo/db/catalog/index_consistency.h
+++ b/src/mongo/db/catalog/index_consistency.h
@@ -90,6 +90,7 @@ public:
Collection* collection,
NamespaceString nss,
RecordStore* recordStore,
+ std::unique_ptr<Lock::CollectionLock> collLk,
const bool background);
/**
@@ -168,6 +169,7 @@ private:
Collection* _collection;
const NamespaceString _nss;
const RecordStore* _recordStore;
+ std::unique_ptr<Lock::CollectionLock> _collLk;
ElapsedTracker _tracker;
// We map the hashed KeyString values to a bucket which contain the count of how many
diff --git a/src/mongo/db/catalog/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp
index dc83b1d955b..a9046b3b885 100644
--- a/src/mongo/db/catalog/uuid_catalog.cpp
+++ b/src/mongo/db/catalog/uuid_catalog.cpp
@@ -304,10 +304,14 @@ void UUIDCatalog::setCollectionNamespace(OperationContext* opCtx,
});
}
-void UUIDCatalog::onCloseDatabase(OperationContext* opCtx, Database* db) {
- invariant(opCtx->lockState()->isW());
+void UUIDCatalog::onCloseDatabase(Database* db) {
for (auto it = begin(db->name()); it != end(); ++it) {
- deregisterCollectionObject(it.uuid().get());
+ auto coll = *it;
+ if (coll && coll->uuid()) {
+ // While the collection does not actually get dropped, we're going to destroy the
+ // Collection object, so for purposes of the UUIDCatalog it looks the same.
+ deregisterCollectionObject(coll->uuid().get());
+ }
}
auto rid = ResourceId(RESOURCE_DATABASE, db->name());
@@ -392,6 +396,16 @@ boost::optional<CollectionUUID> UUIDCatalog::lookupUUIDByNSS(const NamespaceStri
return boost::none;
}
+std::vector<CollectionCatalogEntry*> UUIDCatalog::getAllCatalogEntriesFromDb(
+ StringData dbName) const {
+ std::vector<UUID> uuids = getAllCollectionUUIDsFromDb(dbName);
+ std::vector<CollectionCatalogEntry*> ret;
+ for (auto& uuid : uuids) {
+ ret.push_back(lookupCollectionCatalogEntryByUUID(uuid));
+ }
+ return ret;
+}
+
std::vector<CollectionUUID> UUIDCatalog::getAllCollectionUUIDsFromDb(StringData dbName) const {
stdx::lock_guard<stdx::mutex> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
@@ -405,18 +419,10 @@ std::vector<CollectionUUID> UUIDCatalog::getAllCollectionUUIDsFromDb(StringData
return ret;
}
-std::vector<NamespaceString> UUIDCatalog::getAllCollectionNamesFromDb(OperationContext* opCtx,
- StringData dbName) const {
- invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_S));
-
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
- auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
-
+std::vector<NamespaceString> UUIDCatalog::getAllCollectionNamesFromDb(StringData dbName) const {
std::vector<NamespaceString> ret;
- for (auto it = _orderedCollections.lower_bound(std::make_pair(dbName.toString(), minUuid));
- it != _orderedCollections.end() && it->first.first == dbName;
- ++it) {
- ret.push_back(it->second->collectionCatalogEntry->ns());
+ for (auto catalogEntry : getAllCatalogEntriesFromDb(dbName)) {
+ ret.push_back(catalogEntry->ns());
}
return ret;
}
diff --git a/src/mongo/db/catalog/uuid_catalog.h b/src/mongo/db/catalog/uuid_catalog.h
index 7fe29428422..07a32558f67 100644
--- a/src/mongo/db/catalog/uuid_catalog.h
+++ b/src/mongo/db/catalog/uuid_catalog.h
@@ -260,7 +260,7 @@ public:
/**
* Implies onDropCollection for all collections in db, but is not transactional.
*/
- void onCloseDatabase(OperationContext* opCtx, Database* db);
+ void onCloseDatabase(Database* db);
/**
* Register the collection catalog entry with `uuid`. The collection object with `uuid` must not
@@ -346,10 +346,14 @@ public:
boost::optional<CollectionUUID> lookupUUIDByNSS(const NamespaceString& nss) const;
/**
- * This function gets the UUIDs of all collections from `dbName`.
+ * This function gets the pointers of all the CollectionCatalogEntries from `dbName`.
*
- * If the caller does not take a strong database lock, some of UUIDs might no longer exist (due
- * to collection drop) after this function returns.
+ * Returns empty vector if the 'dbName' is not known.
+ */
+ std::vector<CollectionCatalogEntry*> getAllCatalogEntriesFromDb(StringData dbName) const;
+
+ /**
+ * This function gets the UUIDs of all collections from `dbName`.
*
* Returns empty vector if the 'dbName' is not known.
*/
@@ -358,13 +362,9 @@ public:
/**
* This function gets the ns of all collections from `dbName`. The result is not sorted.
*
- * Caller must take a strong database lock; otherwise, collections returned could be dropped or
- * renamed.
- *
* Returns empty vector if the 'dbName' is not known.
*/
- std::vector<NamespaceString> getAllCollectionNamesFromDb(OperationContext* opCtx,
- StringData dbName) const;
+ std::vector<NamespaceString> getAllCollectionNamesFromDb(StringData dbName) const;
/**
* This functions gets all the database names. The result is sorted in alphabetical ascending
diff --git a/src/mongo/db/catalog/uuid_catalog_helper.cpp b/src/mongo/db/catalog/uuid_catalog_helper.cpp
deleted file mode 100644
index 1ba42ec7b04..00000000000
--- a/src/mongo/db/catalog/uuid_catalog_helper.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright (C) 2019-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/db/catalog/uuid_catalog_helper.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
-#include "mongo/db/concurrency/d_concurrency.h"
-
-namespace mongo {
-namespace catalog {
-
-void forEachCollectionFromDb(
- OperationContext* opCtx,
- StringData dbName,
- LockMode collLockMode,
- std::function<bool(Collection* collection, CollectionCatalogEntry* catalogEntry)> callback) {
-
- UUIDCatalog& uuidCatalog = UUIDCatalog::get(opCtx);
- for (auto collectionIt = uuidCatalog.begin(dbName); collectionIt != uuidCatalog.end();
- ++collectionIt) {
- auto uuid = collectionIt.uuid().get();
- auto nss = uuidCatalog.lookupNSSByUUID(uuid);
-
- Lock::CollectionLock clk(opCtx, nss, collLockMode);
-
- auto collection = uuidCatalog.lookupCollectionByUUID(uuid);
- auto catalogEntry = uuidCatalog.lookupCollectionCatalogEntryByUUID(uuid);
- if (!collection || !catalogEntry || catalogEntry->ns() != nss)
- continue;
-
- if (!callback(collection, catalogEntry))
- break;
- }
-}
-
-} // namespace catalog
-} // namespace mongo
diff --git a/src/mongo/db/catalog/uuid_catalog_helper.h b/src/mongo/db/catalog/uuid_catalog_helper.h
deleted file mode 100644
index 0530618b102..00000000000
--- a/src/mongo/db/catalog/uuid_catalog_helper.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Copyright (C) 2019-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/catalog/uuid_catalog.h"
-#include "mongo/db/concurrency/lock_manager_defs.h"
-
-namespace mongo {
-
-class Collection;
-class CollectionCatalogEntry;
-
-namespace catalog {
-
-/**
- * Looping through all the collections in the database and run callback function on each one of
- * them. The return value of the callback decides whether we should continue the loop.
- */
-void forEachCollectionFromDb(
- OperationContext* opCtx,
- StringData dbName,
- LockMode collLockMode,
- std::function<bool(Collection* collection, CollectionCatalogEntry* catalogEntry)> callback);
-
-} // namespace catalog
-} // namespace mongo
diff --git a/src/mongo/db/catalog/uuid_catalog_test.cpp b/src/mongo/db/catalog/uuid_catalog_test.cpp
index dedb0582120..dd25f8de15b 100644
--- a/src/mongo/db/catalog/uuid_catalog_test.cpp
+++ b/src/mongo/db/catalog/uuid_catalog_test.cpp
@@ -740,7 +740,7 @@ TEST_F(UUIDCatalogTest, GetAllCollectionNamesAndGetAllDbNames) {
}
std::vector<NamespaceString> dCollList = {d1Coll, d2Coll, d3Coll};
- auto res = catalog.getAllCollectionNamesFromDb(&opCtx, "dbD");
+ auto res = catalog.getAllCollectionNamesFromDb("dbD");
std::sort(res.begin(), res.end());
ASSERT(res == dCollList);
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index aee75fed1f8..20bf489ca71 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -75,17 +75,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
modeDB,
deadline),
_resolvedNss(resolveNamespaceStringOrUUID(opCtx, nsOrUUID)) {
-
- NamespaceString resolvedNssWithLock;
- do {
- _collLock.emplace(opCtx, _resolvedNss, modeColl, deadline);
-
- // We looked up nsOrUUID without a collection lock so it's possible that the
- // collection is dropped now. Look it up again.
- resolvedNssWithLock = resolveNamespaceStringOrUUID(opCtx, nsOrUUID);
- } while (_resolvedNss != resolvedNssWithLock);
- _resolvedNss = resolvedNssWithLock;
-
+ _collLock.emplace(opCtx, _resolvedNss, modeColl, deadline);
// Wait for a configured amount of time after acquiring locks if the failpoint is enabled
MONGO_FAIL_POINT_BLOCK(setAutoGetCollectionWait, customWait) {
const BSONObj& data = customWait.getData();
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 882331f0073..826d195b346 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -265,7 +265,6 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
- '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/catalog/index_key_validate',
'$BUILD_DIR/mongo/db/catalog/multi_index_block',
'$BUILD_DIR/mongo/db/command_can_run_here',
@@ -368,7 +367,6 @@ env.Library(
'$BUILD_DIR/mongo/db/background',
'$BUILD_DIR/mongo/db/catalog/catalog_control',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
- '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/catalog/index_key_validate',
'$BUILD_DIR/mongo/db/cloner',
'$BUILD_DIR/mongo/db/commands',
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index db939ea91b6..6a6ab9cd1a3 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -33,13 +33,11 @@
#include <boost/optional.hpp>
#include <map>
-#include <set>
#include <string>
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/db_raii.h"
@@ -187,8 +185,7 @@ public:
if (opCtx->recoveryUnit()->getTimestampReadSource() ==
RecoveryUnit::ReadSource::kProvided) {
// However, if we are performing a read at a timestamp, then we only need to lock the
- // database in intent mode and then collection in intent mode as well to ensure that
- // none of the collections get dropped.
+ // database in intent mode to ensure that none of the collections get dropped.
lockMode = LockMode::MODE_IS;
// Additionally, if we are performing a read at a timestamp, then we allow oplog
@@ -199,6 +196,11 @@ public:
}
AutoGetDb autoDb(opCtx, ns, lockMode);
Database* db = autoDb.getDb();
+ std::vector<NamespaceString> collections;
+ if (db) {
+ collections = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(db->name());
+ std::sort(collections.begin(), collections.end());
+ }
result.append("host", prettyHostName());
@@ -214,84 +216,51 @@ public:
"system.version",
"system.views"};
- std::map<std::string, std::string> collectionToHashMap;
- std::map<std::string, OptionalCollectionUUID> collectionToUUIDMap;
- std::set<std::string> cappedCollectionSet;
-
- bool noError = true;
- catalog::forEachCollectionFromDb(
- opCtx,
- dbname,
- MODE_IS,
- [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
- auto collNss = collection->ns();
-
- if (collNss.size() - 1 <= dbname.size()) {
- errmsg = str::stream() << "weird fullCollectionName [" << collNss.toString()
- << "]";
- noError = false;
- return false;
- }
+ BSONArrayBuilder cappedCollections;
+ BSONObjBuilder collectionsByUUID;
- // Only include 'system' collections that are replicated.
- bool isReplicatedSystemColl =
- (replicatedSystemCollections.count(collNss.coll().toString()) > 0);
- if (collNss.isSystem() && !isReplicatedSystemColl)
- return true;
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ for (const auto& collNss : collections) {
+ if (collNss.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << collNss.toString() << "]";
+ return false;
+ }
- if (collNss.coll().startsWith("tmp.mr.")) {
- // We skip any incremental map reduce collections as they also aren't
- // replicated.
- return true;
- }
+ // Only include 'system' collections that are replicated.
+ bool isReplicatedSystemColl =
+ (replicatedSystemCollections.count(collNss.coll().toString()) > 0);
+ if (collNss.isSystem() && !isReplicatedSystemColl)
+ continue;
- if (desiredCollections.size() > 0 &&
- desiredCollections.count(collNss.coll().toString()) == 0)
- return true;
+ if (collNss.coll().startsWith("tmp.mr.")) {
+ // We skip any incremental map reduce collections as they also aren't replicated.
+ continue;
+ }
- // Don't include 'drop pending' collections.
- if (collNss.isDropPendingNamespace())
- return true;
+ if (desiredCollections.size() > 0 &&
+ desiredCollections.count(collNss.coll().toString()) == 0)
+ continue;
+ // Don't include 'drop pending' collections.
+ if (collNss.isDropPendingNamespace())
+ continue;
+
+ if (Collection* collection = db->getCollection(opCtx, collNss.ns())) {
if (collection->isCapped()) {
- cappedCollectionSet.insert(collNss.coll().toString());
+ cappedCollections.append(collNss.coll());
}
if (OptionalCollectionUUID uuid = collection->uuid()) {
- collectionToUUIDMap[collNss.coll().toString()] = uuid;
+ uuid->appendToBuilder(&collectionsByUUID, collNss.coll());
}
+ }
- // Compute the hash for this collection.
- std::string hash = _hashCollection(opCtx, db, collNss.toString());
-
- collectionToHashMap[collNss.coll().toString()] = hash;
-
- return true;
- });
- if (!noError)
- return false;
-
- BSONObjBuilder bb(result.subobjStart("collections"));
- BSONArrayBuilder cappedCollections;
- BSONObjBuilder collectionsByUUID;
-
- for (auto elem : cappedCollectionSet) {
- cappedCollections.append(elem);
- }
+ // Compute the hash for this collection.
+ std::string hash = _hashCollection(opCtx, db, collNss.toString());
- for (auto entry : collectionToUUIDMap) {
- auto collName = entry.first;
- auto uuid = entry.second;
- uuid->appendToBuilder(&collectionsByUUID, collName);
- }
-
- for (auto entry : collectionToHashMap) {
- auto collName = entry.first;
- auto hash = entry.second;
- bb.append(collName, hash);
+ bb.append(collNss.coll(), hash);
md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
}
-
bb.done();
result.append("capped", BSONArray(cappedCollections.done()));
@@ -315,7 +284,8 @@ private:
NamespaceString ns(fullCollectionName);
Collection* collection = db->getCollection(opCtx, ns);
- invariant(collection);
+ if (!collection)
+ return "";
boost::optional<Lock::CollectionLock> collLock;
if (opCtx->recoveryUnit()->getTimestampReadSource() ==
@@ -324,7 +294,8 @@ private:
// intent mode. We need to also acquire the collection lock in intent mode to ensure
// reading from the consistent snapshot doesn't overlap with any catalog operations on
// the collection.
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IS));
+ invariant(opCtx->lockState()->isDbLockedForMode(db->name(), MODE_IS));
+ collLock.emplace(opCtx, ns, MODE_IS);
auto minSnapshot = collection->getMinimumVisibleSnapshot();
auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp();
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index e1f20746eb4..8d711c43cf2 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/list_collections_filter.h"
@@ -187,6 +186,7 @@ BSONObj buildCollectionBson(OperationContext* opCtx,
return b.obj();
}
+ Lock::CollectionLock clk(opCtx, nss, MODE_IS);
CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(opCtx);
// While the UUID is stored as a collection option, from the user's perspective it is an
@@ -311,7 +311,6 @@ public:
continue;
}
- Lock::CollectionLock clk(opCtx, nss, MODE_IS);
Collection* collection = db->getCollection(opCtx, nss);
BSONObj collBson =
buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
@@ -321,25 +320,25 @@ public:
}
}
} else {
- mongo::catalog::forEachCollectionFromDb(
- opCtx,
- dbname,
- MODE_IS,
- [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
- if (authorizedCollections &&
- (collection->ns().coll().startsWith("system.") ||
- !as->isAuthorizedForAnyActionOnResource(
- ResourcePattern::forExactNamespace(collection->ns())))) {
- return true;
- }
- BSONObj collBson = buildCollectionBson(
- opCtx, collection, includePendingDrops, nameOnly);
- if (!collBson.isEmpty()) {
- _addWorkingSetMember(
- opCtx, collBson, matcher.get(), ws.get(), root.get());
- }
- return true;
- });
+ for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
+ auto collection = *collIt;
+ if (!collection) {
+ break;
+ }
+
+ if (authorizedCollections &&
+ (collection->ns().coll().startsWith("system.") ||
+ !as->isAuthorizedForAnyActionOnResource(
+ ResourcePattern::forExactNamespace(collection->ns())))) {
+ continue;
+ }
+ BSONObj collBson =
+ buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
+ if (!collBson.isEmpty()) {
+ _addWorkingSetMember(
+ opCtx, collBson, matcher.get(), ws.get(), root.get());
+ }
+ }
}
// Skipping views is only necessary for internal cloning operations.
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index dccec13a8b6..46d3ab65fa8 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -170,7 +170,7 @@ public:
b.append("sizeOnDisk", static_cast<double>(size));
b.appendBool("empty",
- UUIDCatalog::get(opCtx).getAllCollectionUUIDsFromDb(dbname).empty());
+ UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbname).empty());
}
BSONObj curDbObj = b.obj();
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index df8abe7b222..dbc5ff11323 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -123,7 +123,7 @@ public:
}
AutoGetDb ctx(opCtx, nss.db(), MODE_IX);
- Lock::CollectionLock collLk(opCtx, nss, MODE_X);
+ auto collLk = stdx::make_unique<Lock::CollectionLock>(opCtx, nss, MODE_X);
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ViewCatalog::get(ctx.getDb())->lookup(opCtx, nss.ns())) {
@@ -163,7 +163,8 @@ public:
const bool background = false;
ValidateResults results;
- Status status = collection->validate(opCtx, level, background, &results, &result);
+ Status status =
+ collection->validate(opCtx, level, background, std::move(collLk), &results, &result);
if (!status.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(result, status);
}
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 635d9e7a2bb..4d690411b1b 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -142,7 +142,7 @@ Status repairCollections(OperationContext* opCtx,
const std::string& dbName,
stdx::function<void(const std::string& dbName)> onRecordStoreRepair) {
- auto colls = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName);
+ auto colls = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(dbName);
for (const auto& nss : colls) {
opCtx->checkForInterrupt();
@@ -183,7 +183,7 @@ Status repairDatabase(OperationContext* opCtx,
DisableDocumentValidation validationDisabler(opCtx);
// We must hold some form of lock here
- invariant(opCtx->lockState()->isW());
+ invariant(opCtx->lockState()->isLocked());
invariant(dbName.find('.') == std::string::npos);
log() << "repairDatabase " << dbName;
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 7e3ac75d22f..80a520ad5cb 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -188,7 +188,6 @@ Status ensureCollectionProperties(OperationContext* opCtx,
const std::vector<std::string>& dbNames) {
auto databaseHolder = DatabaseHolder::get(opCtx);
auto downgradeError = Status{ErrorCodes::MustDowngrade, mustDowngradeErrorMsg};
- invariant(opCtx->lockState()->isW());
for (const auto& dbName : dbNames) {
auto db = databaseHolder->openDb(opCtx, dbName);
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index b04e3ea9170..7b48bbb7e3c 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -582,7 +582,8 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
Lock::DBLock lk(_opCtx.get(), nss.db(), MODE_IX);
auto lock = stdx::make_unique<Lock::CollectionLock>(_opCtx.get(), nss, MODE_X);
- ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, false, &validateResults, &bob));
+ ASSERT_OK(collection->validate(
+ _opCtx.get(), kValidateFull, false, std::move(lock), &validateResults, &bob));
ASSERT_TRUE(validateResults.valid);
std::string dataHash = computeDataHash(collection);
diff --git a/src/mongo/db/storage/kv/SConscript b/src/mongo/db/storage/kv/SConscript
index 77505aa4747..0504a5b7191 100644
--- a/src/mongo/db/storage/kv/SConscript
+++ b/src/mongo/db/storage/kv/SConscript
@@ -60,7 +60,6 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/logical_clock',
'$BUILD_DIR/mongo/db/storage/storage_repair_observer',
- '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
],
)
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 534a83c9e40..d64df979a5c 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -581,7 +581,7 @@ Status KVCatalog::_replaceEntry(OperationContext* opCtx,
}
Status KVCatalog::_removeEntry(OperationContext* opCtx, StringData ns) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(NamespaceString(ns), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X));
stdx::lock_guard<stdx::mutex> lk(_identsLock);
const NSToIdentMap::iterator it = _idents.find(ns.toString());
if (it == _idents.end()) {
@@ -793,10 +793,7 @@ Status KVCatalog::renameCollection(OperationContext* opCtx,
bool stayTemp) {
const NamespaceString fromNss(fromNS);
const NamespaceString toNss(toNS);
- // TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
- // this function and now it only takes a database IX lock. We can change
- // this invariant to IX once renameCollection only MODE_IX as well.
- // invariant(opCtx->lockState()->isDbLockedForMode(fromNss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(fromNss.db(), MODE_X));
const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
@@ -839,7 +836,7 @@ private:
Status KVCatalog::dropCollection(OperationContext* opCtx, StringData ns) {
NamespaceString nss(ns);
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
CollectionCatalogEntry* const entry =
UUIDCatalog::get(opCtx).lookupCollectionCatalogEntryByNamespace(nss);
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 5e409902648..1386a0104bf 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/catalog/catalog_control.h"
#include "mongo/db/catalog/uuid_catalog.h"
-#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/logical_clock.h"
@@ -533,8 +532,7 @@ Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
}
}
- std::vector<NamespaceString> toDrop =
- UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, db);
+ std::vector<NamespaceString> toDrop = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(db);
// Do not timestamp any of the following writes. This will remove entries from the catalog as
// well as drop any underlying tables. It's not expected for dropping tables to be reversible
@@ -913,22 +911,20 @@ void KVStorageEngine::TimestampMonitor::removeListener(TimestampListener* listen
int64_t KVStorageEngine::sizeOnDiskForDb(OperationContext* opCtx, StringData dbName) {
int64_t size = 0;
+ auto catalogEntries = UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbName);
- catalog::forEachCollectionFromDb(
- opCtx, dbName, MODE_IS, [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
- size += catalogEntry->getRecordStore()->storageSize(opCtx);
+ for (auto catalogEntry : catalogEntries) {
+ size += catalogEntry->getRecordStore()->storageSize(opCtx);
- std::vector<std::string> indexNames;
- catalogEntry->getAllIndexes(opCtx, &indexNames);
+ std::vector<std::string> indexNames;
+ catalogEntry->getAllIndexes(opCtx, &indexNames);
- for (size_t i = 0; i < indexNames.size(); i++) {
- std::string ident =
- _catalog->getIndexIdent(opCtx, catalogEntry->ns().ns(), indexNames[i]);
- size += _engine->getIdentSize(opCtx, ident);
- }
-
- return true;
- });
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ std::string ident =
+ _catalog->getIndexIdent(opCtx, catalogEntry->ns().ns(), indexNames[i]);
+ size += _engine->getIdentSize(opCtx, ident);
+ }
+ }
return size;
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 6b5b7044484..68bbcd7e819 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -111,20 +111,22 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a"
- << "dup")),
- nullOpDebug,
- true));
- ASSERT_OK(coll->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "a"
- << "dup")),
- nullOpDebug,
- true));
+ coll->insertDocument(&_opCtx,
+ InsertStatement(BSON("_id" << 1 << "a"
+ << "dup")),
+ nullOpDebug,
+ true)
+ .transitional_ignore();
+ coll->insertDocument(&_opCtx,
+ InsertStatement(BSON("_id" << 2 << "a"
+ << "dup")),
+ nullOpDebug,
+ true)
+ .transitional_ignore();
wunit.commit();
}
@@ -166,7 +168,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
@@ -223,7 +225,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
// Drop all indexes including id index.
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
@@ -231,8 +233,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- ASSERT_OK(
- coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug));
+ coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug)
+ .transitional_ignore();
}
wunit.commit();
}
@@ -265,7 +267,11 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ {
+ // TODO SERVER-39520: Remove this DBLock
+ Lock::DBLock lock(&_opCtx, db->name(), MODE_X);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
+ }
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
@@ -275,8 +281,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- ASSERT_OK(coll->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true));
+ coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true)
+ .transitional_ignore();
}
wunit.commit();
}
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index aa90d235361..d114992fb13 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -61,10 +61,10 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
databaseHolder->dropDb(opCtx, db);
}
}
-bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const string& ns) {
+bool collectionExists(OldClientContext* ctx, const string& ns) {
auto nss = NamespaceString(ns);
std::vector<NamespaceString> collections =
- UUIDCatalog::get(getGlobalServiceContext()).getAllCollectionNamesFromDb(opCtx, nss.db());
+ UUIDCatalog::get(getGlobalServiceContext()).getAllCollectionNamesFromDb(nss.db());
return std::count(collections.begin(), collections.end(), nss) > 0;
}
@@ -73,11 +73,11 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
OldClientContext ctx(opCtx, nss.ns());
{
WriteUnitOfWork uow(opCtx);
- ASSERT(!collectionExists(opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(opCtx, nss, collectionOptions, false));
- ASSERT(collectionExists(opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
uow.commit();
}
}
@@ -174,20 +174,20 @@ public:
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
} else {
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
}
}
};
@@ -211,30 +211,30 @@ public:
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
uow.commit();
}
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
// END OF SETUP / START OF TEST
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
ASSERT_OK(ctx.db()->dropCollection(&opCtx, ns));
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
} else {
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
}
}
};
@@ -261,34 +261,34 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(!collectionExists(&ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, source, collectionOptions, defaultIndexes));
uow.commit();
}
- ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(collectionExists(&ctx, source.ns()));
+ ASSERT(!collectionExists(&ctx, target.ns()));
// END OF SETUP / START OF TEST
{
WriteUnitOfWork uow(&opCtx);
ASSERT_OK(renameCollection(&opCtx, source, target));
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(collectionExists(&ctx, source.ns()));
+ ASSERT(!collectionExists(&ctx, target.ns()));
} else {
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
}
}
};
@@ -320,8 +320,8 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(!collectionExists(&ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
@@ -334,8 +334,8 @@ public:
uow.commit();
}
- ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
assertOnlyRecord(&opCtx, source, sourceDoc);
assertOnlyRecord(&opCtx, target, targetDoc);
@@ -351,21 +351,21 @@ public:
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
ASSERT_OK(renameCollection(&opCtx, source, target));
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
assertOnlyRecord(&opCtx, target, sourceDoc);
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
assertOnlyRecord(&opCtx, source, sourceDoc);
assertOnlyRecord(&opCtx, target, targetDoc);
} else {
- ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
- ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
+ ASSERT(!collectionExists(&ctx, source.ns()));
+ ASSERT(collectionExists(&ctx, target.ns()));
assertOnlyRecord(&opCtx, target, sourceDoc);
}
}
@@ -390,14 +390,14 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
insertRecord(&opCtx, nss, oldDoc);
uow.commit();
}
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
assertOnlyRecord(&opCtx, nss, oldDoc);
// END OF SETUP / START OF TEST
@@ -411,18 +411,18 @@ public:
result,
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, newDoc);
assertOnlyRecord(&opCtx, nss, newDoc);
if (!rollback) {
uow.commit();
}
}
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
if (rollback) {
assertOnlyRecord(&opCtx, nss, oldDoc);
} else {
@@ -446,14 +446,14 @@ public:
BSONObj doc = BSON("_id"
<< "example string");
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
{
WriteUnitOfWork uow(&opCtx);
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
@@ -464,13 +464,13 @@ public:
result,
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
if (!rollback) {
uow.commit();
}
}
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
}
};
@@ -489,14 +489,14 @@ public:
BSONObj doc = BSON("_id"
<< "foo");
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
{
WriteUnitOfWork uow(&opCtx);
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
uow.commit();
@@ -509,14 +509,14 @@ public:
WriteUnitOfWork uow(&opCtx);
ASSERT_OK(truncateCollection(&opCtx, nss));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
assertEmpty(&opCtx, nss);
if (!rollback) {
uow.commit();
}
}
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
if (rollback) {
assertOnlyRecord(&opCtx, nss, doc);
} else {
@@ -753,11 +753,11 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(!collectionExists(&ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, false));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
+ ASSERT(collectionExists(&ctx, nss.ns()));
Collection* coll = ctx.db()->getCollection(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -770,9 +770,9 @@ public:
}
} // uow
if (rollback) {
- ASSERT(!collectionExists(&opCtx, &ctx, ns));
+ ASSERT(!collectionExists(&ctx, ns));
} else {
- ASSERT(collectionExists(&opCtx, &ctx, ns));
+ ASSERT(collectionExists(&ctx, ns));
ASSERT(indexReady(&opCtx, nss, idxNameA));
ASSERT(indexReady(&opCtx, nss, idxNameB));
ASSERT(indexReady(&opCtx, nss, idxNameC));
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 8ebebbb676a..2a84eef2168 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -94,6 +94,7 @@ protected:
->validate(&_opCtx,
_full ? kValidateFull : kValidateIndex,
_background,
+ std::move(lock),
&results,
&output));