summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/txns/drop_collection_not_blocked_by_txn.js32
-rw-r--r--jstests/core/txns/transactions_profiling_with_drops.js87
-rw-r--r--jstests/replsets/transient_txn_error_labels_with_write_concern.js44
-rw-r--r--src/mongo/db/catalog/SConscript13
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp2
-rw-r--r--src/mongo/db/catalog/collection.h1
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_impl.h1
-rw-r--r--src/mongo/db/catalog/collection_mock.h1
-rw-r--r--src/mongo/db/catalog/collection_test.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp5
-rw-r--r--src/mongo/db/catalog/database_impl.cpp51
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp14
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp2
-rw-r--r--src/mongo/db/catalog/index_consistency.h2
-rw-r--r--src/mongo/db/catalog/uuid_catalog.cpp34
-rw-r--r--src/mongo/db/catalog/uuid_catalog.h18
-rw-r--r--src/mongo/db/catalog/uuid_catalog_helper.cpp63
-rw-r--r--src/mongo/db/catalog/uuid_catalog_helper.h53
-rw-r--r--src/mongo/db/catalog/uuid_catalog_test.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp12
-rw-r--r--src/mongo/db/commands/SConscript2
-rw-r--r--src/mongo/db/commands/dbhash.cpp111
-rw-r--r--src/mongo/db/commands/list_collections.cpp41
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/validate.cpp5
-rw-r--r--src/mongo/db/repair_database.cpp4
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp1
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp3
-rw-r--r--src/mongo/db/storage/kv/SConscript1
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp9
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp28
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp42
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp102
-rw-r--r--src/mongo/dbtests/validate_tests.cpp1
35 files changed, 505 insertions, 290 deletions
diff --git a/jstests/core/txns/drop_collection_not_blocked_by_txn.js b/jstests/core/txns/drop_collection_not_blocked_by_txn.js
new file mode 100644
index 00000000000..626a7128a5c
--- /dev/null
+++ b/jstests/core/txns/drop_collection_not_blocked_by_txn.js
@@ -0,0 +1,32 @@
+/**
+ * Test that drop collection only takes database IX lock and will not be blocked by transactions.
+ *
+ * @tags: [uses_transactions, requires_db_locking, assumes_unsharded_collection]
+ */
+
+(function() {
+ "use strict";
+
+ let rst = new ReplSetTest({nodes: 1});
+ rst.startSet();
+ rst.initiate();
+
+ let db = rst.getPrimary().getDB("test");
+
+ assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+ assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
+
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase("test");
+
+ session.startTransaction();
+ // This holds a database IX lock and a collection IX lock on "a".
+ sessionDb.a.insert({y: 1});
+
+ // This only requires database IX lock.
+ assert.commandWorked(db.runCommand({drop: "b"}));
+
+ session.commitTransaction();
+
+ rst.stopSet();
+})();
diff --git a/jstests/core/txns/transactions_profiling_with_drops.js b/jstests/core/txns/transactions_profiling_with_drops.js
index 418bc45330b..4c2c1c0047a 100644
--- a/jstests/core/txns/transactions_profiling_with_drops.js
+++ b/jstests/core/txns/transactions_profiling_with_drops.js
@@ -7,20 +7,17 @@
const dbName = "test";
const collName = "transactions_profiling_with_drops";
- const otherCollName = "other";
const adminDB = db.getSiblingDB("admin");
const testDB = db.getSiblingDB(dbName);
- const otherColl = testDB[otherCollName];
const session = db.getMongo().startSession({causalConsistency: false});
const sessionDb = session.getDatabase(dbName);
const sessionColl = sessionDb[collName];
sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}});
assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"}));
- assert.commandWorked(otherColl.insert({_id: "doc"}, {w: "majority"}));
assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1}));
- jsTest.log("Test read profiling with drops.");
+ jsTest.log("Test read profiling with operation holding database X lock.");
jsTest.log("Start transaction.");
session.startTransaction();
@@ -32,39 +29,50 @@
profilerHasSingleMatchingEntryOrThrow(
{profileDB: testDB, filter: {"command.comment": "read success"}});
- jsTest.log("Start a drop, which will hang.");
- let awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test").runCommand({drop: "other", writeConcern: {w: "majority"}});
+ // Lock 'test' database in X mode.
+ let lockShell = startParallelShell(function() {
+ assert.commandFailed(db.adminCommand({
+ sleep: 1,
+ secs: 500,
+ lock: "w",
+ lockTarget: "test",
+ $comment: "transaction_profiling_with_drops lock sleep"
+ }));
});
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": otherCollName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]));
+ const waitForCommand = function(opFilter) {
+ let opId = -1;
+ assert.soon(function() {
+ const curopRes = testDB.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(opFilter);
+
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
});
+ return opId;
+ };
+
+ // Wait for sleep to appear in currentOp
+ let opId = waitForCommand(
+ op => (op["ns"] == "admin.$cmd" &&
+ op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep"));
jsTest.log("Run a slow read. Profiling in the transaction should fail.");
assert.sameMembers(
[{_id: "doc"}],
sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray());
session.commitTransaction();
- awaitDrop();
+
+ assert.commandWorked(testDB.killOp(opId));
+ lockShell();
+
profilerHasZeroMatchingEntriesOrThrow(
{profileDB: testDB, filter: {"command.comment": "read failure"}});
- jsTest.log("Test write profiling with drops.");
-
- // Recreate the "other" collection so it can be dropped again.
- assert.commandWorked(otherColl.insert({_id: "doc"}, {w: "majority"}));
+ jsTest.log("Test write profiling with operation holding database X lock.");
jsTest.log("Start transaction.");
session.startTransaction();
@@ -75,31 +83,24 @@
profilerHasSingleMatchingEntryOrThrow(
{profileDB: testDB, filter: {"command.collation": {locale: "en"}}});
- jsTest.log("Start a drop, which will hang.");
- awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test").runCommand({drop: "other", writeConcern: {w: "majority"}});
+ // Lock 'test' database in X mode.
+ lockShell = startParallelShell(function() {
+ assert.commandFailed(db.getSiblingDB("test").adminCommand(
+ {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"}));
});
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": otherCollName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]));
- });
+ // Wait for sleep to appear in currentOp
+ opId = waitForCommand(
+ op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep"));
jsTest.log("Run a slow write. Profiling in the transaction should fail.");
assert.commandWorked(sessionColl.update(
{$where: "sleep(1000); return true;"}, {$inc: {bad: 1}}, {collation: {locale: "fr"}}));
session.commitTransaction();
- awaitDrop();
+
+ assert.commandWorked(testDB.killOp(opId));
+ lockShell();
+
profilerHasZeroMatchingEntriesOrThrow(
{profileDB: testDB, filter: {"command.collation": {locale: "fr"}}});
diff --git a/jstests/replsets/transient_txn_error_labels_with_write_concern.js b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
index 55dc269831e..b422bb96ccc 100644
--- a/jstests/replsets/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
@@ -73,16 +73,35 @@
jsTest.log(
"If the noop write for NoSuchTransaction cannot occur, the error is not transient");
- assert.commandWorked(testDB.getSiblingDB("local").createCollection("todrop"));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
- // Create a pending drop on a collection in the local database. This will hold an X lock on
- // the local database.
- let awaitDrop = startParallelShell(() => assert(db.getSiblingDB("local")["todrop"].drop()),
- rst.ports[0]);
- checkLog.contains(testDB.getMongo(), "hangDuringDropCollection fail point enabled");
+
+ // Lock 'local' database in X mode.
+ let lockShell = startParallelShell(function() {
+ assert.commandFailed(db.adminCommand({
+ sleep: 1,
+ secs: 500,
+ lock: "w",
+ lockTarget: "local",
+ $comment: "transient_txn_error_labels_with_write_concern lock sleep"
+ }));
+ }, rst.ports[0]);
+
+ // Wait for sleep to appear in currentOp
+ let opId = -1;
+ assert.soon(function() {
+ const curopRes = testDB.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(
+ op => (op["ns"] == "admin.$cmd" &&
+ op["command"]["$comment"] ==
+ "transient_txn_error_labels_with_write_concern lock sleep"));
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
+ });
+
// The server will attempt to perform a noop write, since the command returns
- // NoSuchTransaction. The noop write will time out acquiring a lock on the local database.
+ // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
// This should not be a TransientTransactionError, since the server has not successfully
// replicated a write to confirm that it is primary.
// Use a txnNumber that is one higher than the server has tracked.
@@ -94,9 +113,10 @@
}));
assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
- awaitDrop();
+
+ assert.commandWorked(testDB.killOp(opId));
+ lockShell();
+
rst.awaitReplication();
}
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index e2eacc8d585..591691e1b13 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -325,6 +325,17 @@ env.Library(
]
)
+env.Library(
+ target='uuid_catalog_helper',
+ source=[
+ 'uuid_catalog_helper.cpp',
+ ],
+ LIBDEPS_PRIVATE=[
+ 'uuid_catalog',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
+ ]
+)
+
env.CppUnitTest(
target='namespace_uuid_cache_test',
source=[
@@ -342,7 +353,6 @@ env.CppUnitTest(
],
LIBDEPS=[
'uuid_catalog',
- '$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
],
@@ -417,6 +427,7 @@ env.Library(
'$BUILD_DIR/mongo/db/views/views_mongod',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/commands/server_status_core',
'$BUILD_DIR/mongo/db/index/index_build_interceptor',
'$BUILD_DIR/mongo/db/logical_clock',
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index ffd4a5f60dc..a13ca2d2650 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -177,7 +177,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
LOG_FOR_RECOVERY(1) << "openCatalog: dbholder reopening database " << dbName;
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName);
- for (auto&& collNss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(dbName)) {
+ for (auto&& collNss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName)) {
// Note that the collection name already includes the database component.
auto collection = db->getCollection(opCtx, collNss.ns());
invariant(collection,
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 705cc5c6d44..39cb24a20d7 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -328,7 +328,6 @@ public:
virtual Status validate(OperationContext* const opCtx,
const ValidateCmdLevel level,
bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* const results,
BSONObjBuilder* const output) = 0;
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index fcb62484ea6..aa662981b29 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -1273,7 +1273,6 @@ void _validateCatalogEntry(OperationContext* opCtx,
Status CollectionImpl::validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IS));
@@ -1281,8 +1280,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
try {
ValidateResultsMap indexNsResultsMap;
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
- IndexConsistency indexConsistency(
- opCtx, this, ns(), _recordStore, std::move(collLk), background);
+ IndexConsistency indexConsistency(opCtx, this, ns(), _recordStore, background);
RecordStoreValidateAdaptor indexValidator = RecordStoreValidateAdaptor(
opCtx, &indexConsistency, level, _indexCatalog.get(), &indexNsResultsMap);
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 615a2499c59..cc26c48ae73 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -237,7 +237,6 @@ public:
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index c7075786eda..14e9f8d1550 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -171,7 +171,6 @@ public:
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
std::abort();
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index a6486ebc9b0..a36c4d71770 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -72,7 +72,7 @@ void CollectionTest::checkValidate(
for (auto level : levels) {
ValidateResults results;
BSONObjBuilder output;
- auto status = coll->validate(opCtx, level, false, std::move(collLock), &results, &output);
+ auto status = coll->validate(opCtx, level, false, &results, &output);
ASSERT_OK(status);
ASSERT_EQ(results.valid, valid);
ASSERT_EQ(results.errors.size(), (long unsigned int)errors);
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 19143026e66..9560ec6f468 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -137,7 +137,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
// different databases for the same name.
lk.unlock();
- if (UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbname).empty()) {
+ if (UUIDCatalog::get(opCtx).getAllCollectionUUIDsFromDb(dbname).empty()) {
audit::logCreateDatabase(opCtx->getClient(), dbname);
if (justCreated)
*justCreated = true;
@@ -202,6 +202,7 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) {
namespace {
void evictDatabaseFromUUIDCatalog(OperationContext* opCtx, Database* db) {
+ invariant(opCtx->lockState()->isW());
for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
auto coll = *collIt;
if (!coll) {
@@ -210,7 +211,7 @@ void evictDatabaseFromUUIDCatalog(OperationContext* opCtx, Database* db) {
NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns());
}
- UUIDCatalog::get(opCtx).onCloseDatabase(db);
+ UUIDCatalog::get(opCtx).onCloseDatabase(opCtx, db);
}
} // namespace
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 366e15101b9..b0d6b1ca615 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/namespace_uuid_cache.h"
#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -169,7 +170,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
}
auto& uuidCatalog = UUIDCatalog::get(opCtx);
- for (const auto& nss : uuidCatalog.getAllCollectionNamesFromDb(_name)) {
+ for (const auto& nss : uuidCatalog.getAllCollectionNamesFromDb(opCtx, _name)) {
auto ownedCollection = _createCollectionInstance(opCtx, nss);
invariant(ownedCollection);
@@ -196,7 +197,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
- for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
+ for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, _name)) {
CollectionCatalogEntry* coll =
UUIDCatalog::get(opCtx).lookupCollectionCatalogEntryByNamespace(nss);
CollectionOptions options = coll->getCollectionOptions(opCtx);
@@ -278,24 +279,24 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IS));
- for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
- Lock::CollectionLock colLock(opCtx, nss, MODE_IS);
- Collection* collection = getCollection(opCtx, nss);
+ catalog::forEachCollectionFromDb(
+ opCtx,
+ name(),
+ MODE_IS,
+ [&](Collection* collection, CollectionCatalogEntry* catalogEntry) -> bool {
+ nCollections += 1;
+ objects += collection->numRecords(opCtx);
+ size += collection->dataSize(opCtx);
- if (!collection)
- continue;
-
- nCollections += 1;
- objects += collection->numRecords(opCtx);
- size += collection->dataSize(opCtx);
+ BSONObjBuilder temp;
+ storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
+ numExtents += temp.obj()["numExtents"].numberInt(); // XXX
- BSONObjBuilder temp;
- storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
- numExtents += temp.obj()["numExtents"].numberInt(); // XXX
+ indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
+ indexSize += collection->getIndexSize(opCtx);
- indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
- indexSize += collection->getIndexSize(opCtx);
- }
+ return true;
+ });
ViewCatalog::get(this)->iterate(opCtx, [&](const ViewDefinition& view) { nViews += 1; });
@@ -369,7 +370,7 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
const NamespaceString& fullns,
repl::OpTime dropOpTime) const {
- invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(fullns, MODE_X));
LOG(1) << "dropCollection: " << fullns;
@@ -514,9 +515,10 @@ Collection* DatabaseImpl::getCollection(OperationContext* opCtx, const Namespace
}
NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx);
- auto uuid = coll->uuid();
- invariant(uuid);
- cache.ensureNamespaceInCache(nss, uuid.get());
+ auto uuid = UUIDCatalog::get(opCtx).lookupUUIDByNSS(nss);
+ if (uuid) {
+ cache.ensureNamespaceInCache(nss, uuid.get());
+ }
return coll;
}
@@ -525,7 +527,10 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
StringData toNS,
bool stayTemp) const {
audit::logRenameCollection(&cc(), fromNS, toNS);
- invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
+ // TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
+ // this function and now it only takes a database IX lock. We can change
+ // this invariant to IX once renameCollection only MODE_IX as well.
+ // invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
const NamespaceString fromNSS(fromNS);
const NamespaceString toNSS(toNS);
@@ -816,7 +821,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
return;
}
- for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(_name)) {
+ for (const auto& nss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, _name)) {
if (nss.isDropPendingNamespace()) {
auto dropOpTime = fassert(40459, nss.getDropPendingNamespaceOpTime());
log() << "Found drop-pending namespace " << nss << " with drop optime " << dropOpTime;
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 391a5a76048..b1d76ca11b4 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -55,9 +55,6 @@ Status _dropView(OperationContext* opCtx,
std::unique_ptr<AutoGetDb>& autoDb,
const NamespaceString& collectionName,
BSONObjBuilder& result) {
- // TODO(SERVER-39520): No need to relock once createCollection doesn't need X lock.
- autoDb.reset();
- autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_IX);
Database* db = autoDb->getDb();
if (!db) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
@@ -100,11 +97,16 @@ Status _dropView(OperationContext* opCtx,
Status _dropCollection(OperationContext* opCtx,
Database* db,
- Collection* coll,
const NamespaceString& collectionName,
const repl::OpTime& dropOpTime,
DropCollectionSystemCollectionMode systemCollectionMode,
BSONObjBuilder& result) {
+ Lock::CollectionLock collLock(opCtx, collectionName, MODE_X);
+ Collection* coll = db->getCollection(opCtx, collectionName);
+ if (!coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
+
if (MONGO_FAIL_POINT(hangDuringDropCollection)) {
log() << "hangDuringDropCollection fail point enabled. Blocking until fail point is "
"disabled.";
@@ -155,7 +157,7 @@ Status dropCollection(OperationContext* opCtx,
return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] {
// TODO(SERVER-39520): Get rid of database MODE_X lock.
- auto autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_X);
+ auto autoDb = std::make_unique<AutoGetDb>(opCtx, collectionName.db(), MODE_IX);
Database* db = autoDb->getDb();
if (!db) {
@@ -167,7 +169,7 @@ Status dropCollection(OperationContext* opCtx,
return _dropView(opCtx, autoDb, collectionName, result);
} else {
return _dropCollection(
- opCtx, db, coll, collectionName, dropOpTime, systemCollectionMode, result);
+ opCtx, db, collectionName, dropOpTime, systemCollectionMode, result);
}
});
}
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index 78b2d62ab81..fbc98f9f1de 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -62,13 +62,11 @@ IndexConsistency::IndexConsistency(OperationContext* opCtx,
Collection* collection,
NamespaceString nss,
RecordStore* recordStore,
- std::unique_ptr<Lock::CollectionLock> collLk,
const bool background)
: _opCtx(opCtx),
_collection(collection),
_nss(nss),
_recordStore(recordStore),
- _collLk(std::move(collLk)),
_tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load())) {
diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h
index ac90a4278a0..d22696c5b0c 100644
--- a/src/mongo/db/catalog/index_consistency.h
+++ b/src/mongo/db/catalog/index_consistency.h
@@ -90,7 +90,6 @@ public:
Collection* collection,
NamespaceString nss,
RecordStore* recordStore,
- std::unique_ptr<Lock::CollectionLock> collLk,
const bool background);
/**
@@ -169,7 +168,6 @@ private:
Collection* _collection;
const NamespaceString _nss;
const RecordStore* _recordStore;
- std::unique_ptr<Lock::CollectionLock> _collLk;
ElapsedTracker _tracker;
// We map the hashed KeyString values to a bucket which contain the count of how many
diff --git a/src/mongo/db/catalog/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp
index a9046b3b885..dc83b1d955b 100644
--- a/src/mongo/db/catalog/uuid_catalog.cpp
+++ b/src/mongo/db/catalog/uuid_catalog.cpp
@@ -304,14 +304,10 @@ void UUIDCatalog::setCollectionNamespace(OperationContext* opCtx,
});
}
-void UUIDCatalog::onCloseDatabase(Database* db) {
+void UUIDCatalog::onCloseDatabase(OperationContext* opCtx, Database* db) {
+ invariant(opCtx->lockState()->isW());
for (auto it = begin(db->name()); it != end(); ++it) {
- auto coll = *it;
- if (coll && coll->uuid()) {
- // While the collection does not actually get dropped, we're going to destroy the
- // Collection object, so for purposes of the UUIDCatalog it looks the same.
- deregisterCollectionObject(coll->uuid().get());
- }
+ deregisterCollectionObject(it.uuid().get());
}
auto rid = ResourceId(RESOURCE_DATABASE, db->name());
@@ -396,16 +392,6 @@ boost::optional<CollectionUUID> UUIDCatalog::lookupUUIDByNSS(const NamespaceStri
return boost::none;
}
-std::vector<CollectionCatalogEntry*> UUIDCatalog::getAllCatalogEntriesFromDb(
- StringData dbName) const {
- std::vector<UUID> uuids = getAllCollectionUUIDsFromDb(dbName);
- std::vector<CollectionCatalogEntry*> ret;
- for (auto& uuid : uuids) {
- ret.push_back(lookupCollectionCatalogEntryByUUID(uuid));
- }
- return ret;
-}
-
std::vector<CollectionUUID> UUIDCatalog::getAllCollectionUUIDsFromDb(StringData dbName) const {
stdx::lock_guard<stdx::mutex> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
@@ -419,10 +405,18 @@ std::vector<CollectionUUID> UUIDCatalog::getAllCollectionUUIDsFromDb(StringData
return ret;
}
-std::vector<NamespaceString> UUIDCatalog::getAllCollectionNamesFromDb(StringData dbName) const {
+std::vector<NamespaceString> UUIDCatalog::getAllCollectionNamesFromDb(OperationContext* opCtx,
+ StringData dbName) const {
+ invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_S));
+
+ stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
+
std::vector<NamespaceString> ret;
- for (auto catalogEntry : getAllCatalogEntriesFromDb(dbName)) {
- ret.push_back(catalogEntry->ns());
+ for (auto it = _orderedCollections.lower_bound(std::make_pair(dbName.toString(), minUuid));
+ it != _orderedCollections.end() && it->first.first == dbName;
+ ++it) {
+ ret.push_back(it->second->collectionCatalogEntry->ns());
}
return ret;
}
diff --git a/src/mongo/db/catalog/uuid_catalog.h b/src/mongo/db/catalog/uuid_catalog.h
index 07a32558f67..7fe29428422 100644
--- a/src/mongo/db/catalog/uuid_catalog.h
+++ b/src/mongo/db/catalog/uuid_catalog.h
@@ -260,7 +260,7 @@ public:
/**
* Implies onDropCollection for all collections in db, but is not transactional.
*/
- void onCloseDatabase(Database* db);
+ void onCloseDatabase(OperationContext* opCtx, Database* db);
/**
* Register the collection catalog entry with `uuid`. The collection object with `uuid` must not
@@ -346,15 +346,11 @@ public:
boost::optional<CollectionUUID> lookupUUIDByNSS(const NamespaceString& nss) const;
/**
- * This function gets the pointers of all the CollectionCatalogEntries from `dbName`.
- *
- * Returns empty vector if the 'dbName' is not known.
- */
- std::vector<CollectionCatalogEntry*> getAllCatalogEntriesFromDb(StringData dbName) const;
-
- /**
* This function gets the UUIDs of all collections from `dbName`.
*
+ * If the caller does not take a strong database lock, some of UUIDs might no longer exist (due
+ * to collection drop) after this function returns.
+ *
* Returns empty vector if the 'dbName' is not known.
*/
std::vector<CollectionUUID> getAllCollectionUUIDsFromDb(StringData dbName) const;
@@ -362,9 +358,13 @@ public:
/**
* This function gets the ns of all collections from `dbName`. The result is not sorted.
*
+ * Caller must take a strong database lock; otherwise, collections returned could be dropped or
+ * renamed.
+ *
* Returns empty vector if the 'dbName' is not known.
*/
- std::vector<NamespaceString> getAllCollectionNamesFromDb(StringData dbName) const;
+ std::vector<NamespaceString> getAllCollectionNamesFromDb(OperationContext* opCtx,
+ StringData dbName) const;
/**
* This functions gets all the database names. The result is sorted in alphabetical ascending
diff --git a/src/mongo/db/catalog/uuid_catalog_helper.cpp b/src/mongo/db/catalog/uuid_catalog_helper.cpp
new file mode 100644
index 00000000000..1ba42ec7b04
--- /dev/null
+++ b/src/mongo/db/catalog/uuid_catalog_helper.cpp
@@ -0,0 +1,63 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/catalog/uuid_catalog_helper.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/concurrency/d_concurrency.h"
+
+namespace mongo {
+namespace catalog {
+
+void forEachCollectionFromDb(
+ OperationContext* opCtx,
+ StringData dbName,
+ LockMode collLockMode,
+ std::function<bool(Collection* collection, CollectionCatalogEntry* catalogEntry)> callback) {
+
+ UUIDCatalog& uuidCatalog = UUIDCatalog::get(opCtx);
+ for (auto collectionIt = uuidCatalog.begin(dbName); collectionIt != uuidCatalog.end();
+ ++collectionIt) {
+ auto uuid = collectionIt.uuid().get();
+ auto nss = uuidCatalog.lookupNSSByUUID(uuid);
+
+ Lock::CollectionLock clk(opCtx, nss, collLockMode);
+
+ auto collection = uuidCatalog.lookupCollectionByUUID(uuid);
+ auto catalogEntry = uuidCatalog.lookupCollectionCatalogEntryByUUID(uuid);
+ if (!collection || !catalogEntry || catalogEntry->ns() != nss)
+ continue;
+
+ if (!callback(collection, catalogEntry))
+ break;
+ }
+}
+
+} // namespace catalog
+} // namespace mongo
diff --git a/src/mongo/db/catalog/uuid_catalog_helper.h b/src/mongo/db/catalog/uuid_catalog_helper.h
new file mode 100644
index 00000000000..0530618b102
--- /dev/null
+++ b/src/mongo/db/catalog/uuid_catalog_helper.h
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/concurrency/lock_manager_defs.h"
+
+namespace mongo {
+
+class Collection;
+class CollectionCatalogEntry;
+
+namespace catalog {
+
+/**
+ * Looping through all the collections in the database and run callback function on each one of
+ * them. The return value of the callback decides whether we should continue the loop.
+ */
+void forEachCollectionFromDb(
+ OperationContext* opCtx,
+ StringData dbName,
+ LockMode collLockMode,
+ std::function<bool(Collection* collection, CollectionCatalogEntry* catalogEntry)> callback);
+
+} // namespace catalog
+} // namespace mongo
diff --git a/src/mongo/db/catalog/uuid_catalog_test.cpp b/src/mongo/db/catalog/uuid_catalog_test.cpp
index dd25f8de15b..dedb0582120 100644
--- a/src/mongo/db/catalog/uuid_catalog_test.cpp
+++ b/src/mongo/db/catalog/uuid_catalog_test.cpp
@@ -740,7 +740,7 @@ TEST_F(UUIDCatalogTest, GetAllCollectionNamesAndGetAllDbNames) {
}
std::vector<NamespaceString> dCollList = {d1Coll, d2Coll, d3Coll};
- auto res = catalog.getAllCollectionNamesFromDb("dbD");
+ auto res = catalog.getAllCollectionNamesFromDb(&opCtx, "dbD");
std::sort(res.begin(), res.end());
ASSERT(res == dCollList);
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 20bf489ca71..aee75fed1f8 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -75,7 +75,17 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
modeDB,
deadline),
_resolvedNss(resolveNamespaceStringOrUUID(opCtx, nsOrUUID)) {
- _collLock.emplace(opCtx, _resolvedNss, modeColl, deadline);
+
+ NamespaceString resolvedNssWithLock;
+ do {
+ _collLock.emplace(opCtx, _resolvedNss, modeColl, deadline);
+
+ // We looked up nsOrUUID without a collection lock so it's possible that the
+ // collection is dropped now. Look it up again.
+ resolvedNssWithLock = resolveNamespaceStringOrUUID(opCtx, nsOrUUID);
+ } while (_resolvedNss != resolvedNssWithLock);
+ _resolvedNss = resolvedNssWithLock;
+
// Wait for a configured amount of time after acquiring locks if the failpoint is enabled
MONGO_FAIL_POINT_BLOCK(setAutoGetCollectionWait, customWait) {
const BSONObj& data = customWait.getData();
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 826d195b346..882331f0073 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -265,6 +265,7 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/catalog/index_key_validate',
'$BUILD_DIR/mongo/db/catalog/multi_index_block',
'$BUILD_DIR/mongo/db/command_can_run_here',
@@ -367,6 +368,7 @@ env.Library(
'$BUILD_DIR/mongo/db/background',
'$BUILD_DIR/mongo/db/catalog/catalog_control',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
'$BUILD_DIR/mongo/db/catalog/index_key_validate',
'$BUILD_DIR/mongo/db/cloner',
'$BUILD_DIR/mongo/db/commands',
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 6a6ab9cd1a3..db939ea91b6 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -33,11 +33,13 @@
#include <boost/optional.hpp>
#include <map>
+#include <set>
#include <string>
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/db_raii.h"
@@ -185,7 +187,8 @@ public:
if (opCtx->recoveryUnit()->getTimestampReadSource() ==
RecoveryUnit::ReadSource::kProvided) {
// However, if we are performing a read at a timestamp, then we only need to lock the
- // database in intent mode to ensure that none of the collections get dropped.
+ // database in intent mode and then collection in intent mode as well to ensure that
+ // none of the collections get dropped.
lockMode = LockMode::MODE_IS;
// Additionally, if we are performing a read at a timestamp, then we allow oplog
@@ -196,11 +199,6 @@ public:
}
AutoGetDb autoDb(opCtx, ns, lockMode);
Database* db = autoDb.getDb();
- std::vector<NamespaceString> collections;
- if (db) {
- collections = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(db->name());
- std::sort(collections.begin(), collections.end());
- }
result.append("host", prettyHostName());
@@ -216,51 +214,84 @@ public:
"system.version",
"system.views"};
- BSONArrayBuilder cappedCollections;
- BSONObjBuilder collectionsByUUID;
-
- BSONObjBuilder bb(result.subobjStart("collections"));
- for (const auto& collNss : collections) {
- if (collNss.size() - 1 <= dbname.size()) {
- errmsg = str::stream() << "weird fullCollectionName [" << collNss.toString() << "]";
- return false;
- }
+ std::map<std::string, std::string> collectionToHashMap;
+ std::map<std::string, OptionalCollectionUUID> collectionToUUIDMap;
+ std::set<std::string> cappedCollectionSet;
+
+ bool noError = true;
+ catalog::forEachCollectionFromDb(
+ opCtx,
+ dbname,
+ MODE_IS,
+ [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
+ auto collNss = collection->ns();
+
+ if (collNss.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << collNss.toString()
+ << "]";
+ noError = false;
+ return false;
+ }
- // Only include 'system' collections that are replicated.
- bool isReplicatedSystemColl =
- (replicatedSystemCollections.count(collNss.coll().toString()) > 0);
- if (collNss.isSystem() && !isReplicatedSystemColl)
- continue;
+ // Only include 'system' collections that are replicated.
+ bool isReplicatedSystemColl =
+ (replicatedSystemCollections.count(collNss.coll().toString()) > 0);
+ if (collNss.isSystem() && !isReplicatedSystemColl)
+ return true;
- if (collNss.coll().startsWith("tmp.mr.")) {
- // We skip any incremental map reduce collections as they also aren't replicated.
- continue;
- }
+ if (collNss.coll().startsWith("tmp.mr.")) {
+ // We skip any incremental map reduce collections as they also aren't
+ // replicated.
+ return true;
+ }
- if (desiredCollections.size() > 0 &&
- desiredCollections.count(collNss.coll().toString()) == 0)
- continue;
+ if (desiredCollections.size() > 0 &&
+ desiredCollections.count(collNss.coll().toString()) == 0)
+ return true;
- // Don't include 'drop pending' collections.
- if (collNss.isDropPendingNamespace())
- continue;
+ // Don't include 'drop pending' collections.
+ if (collNss.isDropPendingNamespace())
+ return true;
- if (Collection* collection = db->getCollection(opCtx, collNss.ns())) {
if (collection->isCapped()) {
- cappedCollections.append(collNss.coll());
+ cappedCollectionSet.insert(collNss.coll().toString());
}
if (OptionalCollectionUUID uuid = collection->uuid()) {
- uuid->appendToBuilder(&collectionsByUUID, collNss.coll());
+ collectionToUUIDMap[collNss.coll().toString()] = uuid;
}
- }
- // Compute the hash for this collection.
- std::string hash = _hashCollection(opCtx, db, collNss.toString());
+ // Compute the hash for this collection.
+ std::string hash = _hashCollection(opCtx, db, collNss.toString());
- bb.append(collNss.coll(), hash);
+ collectionToHashMap[collNss.coll().toString()] = hash;
+
+ return true;
+ });
+ if (!noError)
+ return false;
+
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ BSONArrayBuilder cappedCollections;
+ BSONObjBuilder collectionsByUUID;
+
+ for (auto elem : cappedCollectionSet) {
+ cappedCollections.append(elem);
+ }
+
+ for (auto entry : collectionToUUIDMap) {
+ auto collName = entry.first;
+ auto uuid = entry.second;
+ uuid->appendToBuilder(&collectionsByUUID, collName);
+ }
+
+ for (auto entry : collectionToHashMap) {
+ auto collName = entry.first;
+ auto hash = entry.second;
+ bb.append(collName, hash);
md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
}
+
bb.done();
result.append("capped", BSONArray(cappedCollections.done()));
@@ -284,8 +315,7 @@ private:
NamespaceString ns(fullCollectionName);
Collection* collection = db->getCollection(opCtx, ns);
- if (!collection)
- return "";
+ invariant(collection);
boost::optional<Lock::CollectionLock> collLock;
if (opCtx->recoveryUnit()->getTimestampReadSource() ==
@@ -294,8 +324,7 @@ private:
// intent mode. We need to also acquire the collection lock in intent mode to ensure
// reading from the consistent snapshot doesn't overlap with any catalog operations on
// the collection.
- invariant(opCtx->lockState()->isDbLockedForMode(db->name(), MODE_IS));
- collLock.emplace(opCtx, ns, MODE_IS);
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IS));
auto minSnapshot = collection->getMinimumVisibleSnapshot();
auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp();
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 8d711c43cf2..e1f20746eb4 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/list_collections_filter.h"
@@ -186,7 +187,6 @@ BSONObj buildCollectionBson(OperationContext* opCtx,
return b.obj();
}
- Lock::CollectionLock clk(opCtx, nss, MODE_IS);
CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(opCtx);
// While the UUID is stored as a collection option, from the user's perspective it is an
@@ -311,6 +311,7 @@ public:
continue;
}
+ Lock::CollectionLock clk(opCtx, nss, MODE_IS);
Collection* collection = db->getCollection(opCtx, nss);
BSONObj collBson =
buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
@@ -320,25 +321,25 @@ public:
}
}
} else {
- for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
- auto collection = *collIt;
- if (!collection) {
- break;
- }
-
- if (authorizedCollections &&
- (collection->ns().coll().startsWith("system.") ||
- !as->isAuthorizedForAnyActionOnResource(
- ResourcePattern::forExactNamespace(collection->ns())))) {
- continue;
- }
- BSONObj collBson =
- buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
- if (!collBson.isEmpty()) {
- _addWorkingSetMember(
- opCtx, collBson, matcher.get(), ws.get(), root.get());
- }
- }
+ mongo::catalog::forEachCollectionFromDb(
+ opCtx,
+ dbname,
+ MODE_IS,
+ [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
+ if (authorizedCollections &&
+ (collection->ns().coll().startsWith("system.") ||
+ !as->isAuthorizedForAnyActionOnResource(
+ ResourcePattern::forExactNamespace(collection->ns())))) {
+ return true;
+ }
+ BSONObj collBson = buildCollectionBson(
+ opCtx, collection, includePendingDrops, nameOnly);
+ if (!collBson.isEmpty()) {
+ _addWorkingSetMember(
+ opCtx, collBson, matcher.get(), ws.get(), root.get());
+ }
+ return true;
+ });
}
// Skipping views is only necessary for internal cloning operations.
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 46d3ab65fa8..dccec13a8b6 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -170,7 +170,7 @@ public:
b.append("sizeOnDisk", static_cast<double>(size));
b.appendBool("empty",
- UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbname).empty());
+ UUIDCatalog::get(opCtx).getAllCollectionUUIDsFromDb(dbname).empty());
}
BSONObj curDbObj = b.obj();
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index dbc5ff11323..df8abe7b222 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -123,7 +123,7 @@ public:
}
AutoGetDb ctx(opCtx, nss.db(), MODE_IX);
- auto collLk = stdx::make_unique<Lock::CollectionLock>(opCtx, nss, MODE_X);
+ Lock::CollectionLock collLk(opCtx, nss, MODE_X);
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ViewCatalog::get(ctx.getDb())->lookup(opCtx, nss.ns())) {
@@ -163,8 +163,7 @@ public:
const bool background = false;
ValidateResults results;
- Status status =
- collection->validate(opCtx, level, background, std::move(collLk), &results, &result);
+ Status status = collection->validate(opCtx, level, background, &results, &result);
if (!status.isOK()) {
return CommandHelpers::appendCommandStatusNoThrow(result, status);
}
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 4d690411b1b..635d9e7a2bb 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -142,7 +142,7 @@ Status repairCollections(OperationContext* opCtx,
const std::string& dbName,
stdx::function<void(const std::string& dbName)> onRecordStoreRepair) {
- auto colls = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(dbName);
+ auto colls = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName);
for (const auto& nss : colls) {
opCtx->checkForInterrupt();
@@ -183,7 +183,7 @@ Status repairDatabase(OperationContext* opCtx,
DisableDocumentValidation validationDisabler(opCtx);
// We must hold some form of lock here
- invariant(opCtx->lockState()->isLocked());
+ invariant(opCtx->lockState()->isW());
invariant(dbName.find('.') == std::string::npos);
log() << "repairDatabase " << dbName;
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 80a520ad5cb..7e3ac75d22f 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -188,6 +188,7 @@ Status ensureCollectionProperties(OperationContext* opCtx,
const std::vector<std::string>& dbNames) {
auto databaseHolder = DatabaseHolder::get(opCtx);
auto downgradeError = Status{ErrorCodes::MustDowngrade, mustDowngradeErrorMsg};
+ invariant(opCtx->lockState()->isW());
for (const auto& dbName : dbNames) {
auto db = databaseHolder->openDb(opCtx, dbName);
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index 7b48bbb7e3c..b04e3ea9170 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -582,8 +582,7 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
Lock::DBLock lk(_opCtx.get(), nss.db(), MODE_IX);
auto lock = stdx::make_unique<Lock::CollectionLock>(_opCtx.get(), nss, MODE_X);
- ASSERT_OK(collection->validate(
- _opCtx.get(), kValidateFull, false, std::move(lock), &validateResults, &bob));
+ ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, false, &validateResults, &bob));
ASSERT_TRUE(validateResults.valid);
std::string dataHash = computeDataHash(collection);
diff --git a/src/mongo/db/storage/kv/SConscript b/src/mongo/db/storage/kv/SConscript
index 0504a5b7191..77505aa4747 100644
--- a/src/mongo/db/storage/kv/SConscript
+++ b/src/mongo/db/storage/kv/SConscript
@@ -60,6 +60,7 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/logical_clock',
'$BUILD_DIR/mongo/db/storage/storage_repair_observer',
+ '$BUILD_DIR/mongo/db/catalog/uuid_catalog_helper',
],
)
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index d64df979a5c..534a83c9e40 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -581,7 +581,7 @@ Status KVCatalog::_replaceEntry(OperationContext* opCtx,
}
Status KVCatalog::_removeEntry(OperationContext* opCtx, StringData ns) {
- invariant(opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(NamespaceString(ns), MODE_X));
stdx::lock_guard<stdx::mutex> lk(_identsLock);
const NSToIdentMap::iterator it = _idents.find(ns.toString());
if (it == _idents.end()) {
@@ -793,7 +793,10 @@ Status KVCatalog::renameCollection(OperationContext* opCtx,
bool stayTemp) {
const NamespaceString fromNss(fromNS);
const NamespaceString toNss(toNS);
- invariant(opCtx->lockState()->isDbLockedForMode(fromNss.db(), MODE_X));
+ // TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
+ // this function and now it only takes a database IX lock. We can change
+ // this invariant to IX once renameCollection only MODE_IX as well.
+ // invariant(opCtx->lockState()->isDbLockedForMode(fromNss.db(), MODE_X));
const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
@@ -836,7 +839,7 @@ private:
Status KVCatalog::dropCollection(OperationContext* opCtx, StringData ns) {
NamespaceString nss(ns);
- invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
CollectionCatalogEntry* const entry =
UUIDCatalog::get(opCtx).lookupCollectionCatalogEntryByNamespace(nss);
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 1386a0104bf..5e409902648 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/catalog/catalog_control.h"
#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/catalog/uuid_catalog_helper.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/logical_clock.h"
@@ -532,7 +533,8 @@ Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
}
}
- std::vector<NamespaceString> toDrop = UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(db);
+ std::vector<NamespaceString> toDrop =
+ UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, db);
// Do not timestamp any of the following writes. This will remove entries from the catalog as
// well as drop any underlying tables. It's not expected for dropping tables to be reversible
@@ -911,20 +913,22 @@ void KVStorageEngine::TimestampMonitor::removeListener(TimestampListener* listen
int64_t KVStorageEngine::sizeOnDiskForDb(OperationContext* opCtx, StringData dbName) {
int64_t size = 0;
- auto catalogEntries = UUIDCatalog::get(opCtx).getAllCatalogEntriesFromDb(dbName);
- for (auto catalogEntry : catalogEntries) {
- size += catalogEntry->getRecordStore()->storageSize(opCtx);
+ catalog::forEachCollectionFromDb(
+ opCtx, dbName, MODE_IS, [&](Collection* collection, CollectionCatalogEntry* catalogEntry) {
+ size += catalogEntry->getRecordStore()->storageSize(opCtx);
- std::vector<std::string> indexNames;
- catalogEntry->getAllIndexes(opCtx, &indexNames);
+ std::vector<std::string> indexNames;
+ catalogEntry->getAllIndexes(opCtx, &indexNames);
- for (size_t i = 0; i < indexNames.size(); i++) {
- std::string ident =
- _catalog->getIndexIdent(opCtx, catalogEntry->ns().ns(), indexNames[i]);
- size += _engine->getIdentSize(opCtx, ident);
- }
- }
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ std::string ident =
+ _catalog->getIndexIdent(opCtx, catalogEntry->ns().ns(), indexNames[i]);
+ size += _engine->getIdentSize(opCtx, ident);
+ }
+
+ return true;
+ });
return size;
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 68bbcd7e819..6b5b7044484 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -111,22 +111,20 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns).transitional_ignore();
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a"
- << "dup")),
- nullOpDebug,
- true)
- .transitional_ignore();
- coll->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "a"
- << "dup")),
- nullOpDebug,
- true)
- .transitional_ignore();
+ ASSERT_OK(coll->insertDocument(&_opCtx,
+ InsertStatement(BSON("_id" << 1 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
+ ASSERT_OK(coll->insertDocument(&_opCtx,
+ InsertStatement(BSON("_id" << 2 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
wunit.commit();
}
@@ -168,7 +166,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns).transitional_ignore();
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
@@ -225,7 +223,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns).transitional_ignore();
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
coll = db->createCollection(&_opCtx, _ns);
// Drop all indexes including id index.
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
@@ -233,8 +231,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug)
- .transitional_ignore();
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug));
}
wunit.commit();
}
@@ -267,11 +265,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- {
- // TODO SERVER-39520: Remove this DBLock
- Lock::DBLock lock(&_opCtx, db->name(), MODE_X);
- db->dropCollection(&_opCtx, _ns).transitional_ignore();
- }
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
@@ -281,8 +275,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true)
- .transitional_ignore();
+ ASSERT_OK(coll->insertDocument(
+ &_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true));
}
wunit.commit();
}
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index d114992fb13..aa90d235361 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -61,10 +61,10 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
databaseHolder->dropDb(opCtx, db);
}
}
-bool collectionExists(OldClientContext* ctx, const string& ns) {
+bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const string& ns) {
auto nss = NamespaceString(ns);
std::vector<NamespaceString> collections =
- UUIDCatalog::get(getGlobalServiceContext()).getAllCollectionNamesFromDb(nss.db());
+ UUIDCatalog::get(getGlobalServiceContext()).getAllCollectionNamesFromDb(opCtx, nss.db());
return std::count(collections.begin(), collections.end(), nss) > 0;
}
@@ -73,11 +73,11 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
OldClientContext ctx(opCtx, nss.ns());
{
WriteUnitOfWork uow(opCtx);
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(opCtx, &ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(opCtx, nss, collectionOptions, false));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(opCtx, &ctx, nss.ns()));
uow.commit();
}
}
@@ -174,20 +174,20 @@ public:
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
} else {
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
}
}
};
@@ -211,30 +211,30 @@ public:
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
uow.commit();
}
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
// END OF SETUP / START OF TEST
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
ASSERT_OK(ctx.db()->dropCollection(&opCtx, ns));
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
} else {
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
}
}
};
@@ -261,34 +261,34 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(!collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, source, collectionOptions, defaultIndexes));
uow.commit();
}
- ASSERT(collectionExists(&ctx, source.ns()));
- ASSERT(!collectionExists(&ctx, target.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
// END OF SETUP / START OF TEST
{
WriteUnitOfWork uow(&opCtx);
ASSERT_OK(renameCollection(&opCtx, source, target));
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&ctx, source.ns()));
- ASSERT(!collectionExists(&ctx, target.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
} else {
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
}
}
};
@@ -320,8 +320,8 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(!collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(options, CollectionOptions::parseForCommand));
@@ -334,8 +334,8 @@ public:
uow.commit();
}
- ASSERT(collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
assertOnlyRecord(&opCtx, source, sourceDoc);
assertOnlyRecord(&opCtx, target, targetDoc);
@@ -351,21 +351,21 @@ public:
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
ASSERT_OK(renameCollection(&opCtx, source, target));
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
assertOnlyRecord(&opCtx, target, sourceDoc);
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
assertOnlyRecord(&opCtx, source, sourceDoc);
assertOnlyRecord(&opCtx, target, targetDoc);
} else {
- ASSERT(!collectionExists(&ctx, source.ns()));
- ASSERT(collectionExists(&ctx, target.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, source.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, target.ns()));
assertOnlyRecord(&opCtx, target, sourceDoc);
}
}
@@ -390,14 +390,14 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
insertRecord(&opCtx, nss, oldDoc);
uow.commit();
}
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
assertOnlyRecord(&opCtx, nss, oldDoc);
// END OF SETUP / START OF TEST
@@ -411,18 +411,18 @@ public:
result,
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
insertRecord(&opCtx, nss, newDoc);
assertOnlyRecord(&opCtx, nss, newDoc);
if (!rollback) {
uow.commit();
}
}
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
if (rollback) {
assertOnlyRecord(&opCtx, nss, oldDoc);
} else {
@@ -446,14 +446,14 @@ public:
BSONObj doc = BSON("_id"
<< "example string");
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
{
WriteUnitOfWork uow(&opCtx);
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
@@ -464,13 +464,13 @@ public:
result,
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
if (!rollback) {
uow.commit();
}
}
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
}
};
@@ -489,14 +489,14 @@ public:
BSONObj doc = BSON("_id"
<< "foo");
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
{
WriteUnitOfWork uow(&opCtx);
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
uow.commit();
@@ -509,14 +509,14 @@ public:
WriteUnitOfWork uow(&opCtx);
ASSERT_OK(truncateCollection(&opCtx, nss));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
assertEmpty(&opCtx, nss);
if (!rollback) {
uow.commit();
}
}
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
if (rollback) {
assertOnlyRecord(&opCtx, nss, doc);
} else {
@@ -753,11 +753,11 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT(!collectionExists(&ctx, nss.ns()));
+ ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
CollectionOptions collectionOptions;
ASSERT_OK(collectionOptions.parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, false));
- ASSERT(collectionExists(&ctx, nss.ns()));
+ ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
Collection* coll = ctx.db()->getCollection(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -770,9 +770,9 @@ public:
}
} // uow
if (rollback) {
- ASSERT(!collectionExists(&ctx, ns));
+ ASSERT(!collectionExists(&opCtx, &ctx, ns));
} else {
- ASSERT(collectionExists(&ctx, ns));
+ ASSERT(collectionExists(&opCtx, &ctx, ns));
ASSERT(indexReady(&opCtx, nss, idxNameA));
ASSERT(indexReady(&opCtx, nss, idxNameB));
ASSERT(indexReady(&opCtx, nss, idxNameC));
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 2a84eef2168..8ebebbb676a 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -94,7 +94,6 @@ protected:
->validate(&_opCtx,
_full ? kValidateFull : kValidateIndex,
_background,
- std::move(lock),
&results,
&output));