summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2019-03-07 19:33:27 -0500
committerGeert Bosch <geert@mongodb.com>2019-03-07 19:33:27 -0500
commit7f6279afcc08a84cd980ea193b9962eab0ae4f25 (patch)
treec905df0551e07f69808c0b46d6ef3d5b00eeec72 /src/mongo/db
parenta76788e89bf54abacccefeba62d4e3775f20c555 (diff)
downloadmongo-7f6279afcc08a84cd980ea193b9962eab0ae4f25.tar.gz
SERVER-40020 Make isCollectionLockHeldForMode take NamespaceString, not StringData
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp26
-rw-r--r--src/mongo/db/catalog/collection_info_cache_impl.cpp8
-rw-r--r--src/mongo/db/catalog/database_catalog_entry.h3
-rw-r--r--src/mongo/db/catalog/database_impl.cpp2
-rw-r--r--src/mongo/db/catalog/database_test.cpp4
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.cpp2
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp2
-rw-r--r--src/mongo/db/catalog_raii_test.cpp6
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp14
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp7
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/concurrency/locker.h2
-rw-r--r--src/mongo/db/concurrency/locker_noop.h2
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp4
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp19
-rw-r--r--src/mongo/db/repl/sync_tail_test_fixture.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp2
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp8
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp10
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp16
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h2
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp27
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.h4
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp29
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp27
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp4
36 files changed, 132 insertions, 135 deletions
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index ff34f9f3a3e..c3e886d27eb 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -45,7 +45,7 @@ namespace mongo {
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
Collection* collection,
const CompactOptions* compactOptions) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns().toString(), MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
DisableDocumentValidation validationDisabler(opCtx);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index bb8cc7e4bb6..a11d2461ba1 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -265,7 +265,7 @@ bool CollectionImpl::requiresIdIndex() const {
std::unique_ptr<SeekableRecordCursor> CollectionImpl::getCursor(OperationContext* opCtx,
bool forward) const {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IS));
invariant(ok());
return _recordStore->getCursor(opCtx, forward);
@@ -275,7 +275,7 @@ std::unique_ptr<SeekableRecordCursor> CollectionImpl::getCursor(OperationContext
bool CollectionImpl::findDoc(OperationContext* opCtx,
RecordId loc,
Snapshotted<BSONObj>* out) const {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IS));
RecordData rd;
if (!_recordStore->findRecord(opCtx, loc, &rd))
@@ -456,7 +456,7 @@ Status CollectionImpl::insertDocumentForBulkLoader(OperationContext* opCtx,
return status;
}
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
// TODO SERVER-30638: using timestamp 0 for these inserts, which are non-oplog so we don't yet
// care about their correct timestamps.
@@ -494,7 +494,7 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
const vector<InsertStatement>::const_iterator begin,
const vector<InsertStatement>::const_iterator end,
OpDebug* opDebug) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
const size_t count = std::distance(begin, end);
if (isCapped() && _indexCatalog->haveAnyIndexes() && count > 1) {
@@ -636,7 +636,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
}
}
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
invariant(oldDoc.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(newDoc.isOwned());
@@ -710,7 +710,7 @@ StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages(
const char* damageSource,
const mutablebson::DamageVector& damages,
CollectionUpdateArgs* args) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
invariant(oldRec.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(updateWithDamagesSupported());
@@ -779,7 +779,7 @@ uint64_t CollectionImpl::getIndexSize(OperationContext* opCtx, BSONObjBuilder* d
* 4) re-write indexes
*/
Status CollectionImpl::truncate(OperationContext* opCtx) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(ns());
invariant(_indexCatalog->numIndexesInProgress(opCtx) == 0);
@@ -813,7 +813,7 @@ Status CollectionImpl::truncate(OperationContext* opCtx) {
}
void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
invariant(isCapped());
BackgroundOperation::assertNoBgOpInProgForNs(ns());
invariant(_indexCatalog->numIndexesInProgress(opCtx) == 0);
@@ -822,7 +822,7 @@ void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx, RecordId end,
}
Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDoc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
// Make owned early so that the parsed match expression refers to the owned object.
if (!validatorDoc.isOwned())
@@ -873,7 +873,7 @@ StringData CollectionImpl::getValidationAction() const {
}
Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData newLevel) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
auto levelSW = _parseValidationLevel(newLevel);
if (!levelSW.isOK()) {
@@ -891,7 +891,7 @@ Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData ne
}
Status CollectionImpl::setValidationAction(OperationContext* opCtx, StringData newAction) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
auto actionSW = _parseValidationAction(newAction);
if (!actionSW.isOK()) {
@@ -912,7 +912,7 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
BSONObj newValidator,
StringData newLevel,
StringData newAction) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
opCtx->recoveryUnit()->onRollback([
this,
@@ -1268,7 +1268,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IS));
try {
ValidateResultsMap indexNsResultsMap;
diff --git a/src/mongo/db/catalog/collection_info_cache_impl.cpp b/src/mongo/db/catalog/collection_info_cache_impl.cpp
index 5a715d26748..36a5f8acf10 100644
--- a/src/mongo/db/catalog/collection_info_cache_impl.cpp
+++ b/src/mongo/db/catalog/collection_info_cache_impl.cpp
@@ -69,7 +69,7 @@ CollectionInfoCacheImpl::~CollectionInfoCacheImpl() {
const UpdateIndexData& CollectionInfoCacheImpl::getIndexKeys(OperationContext* opCtx) const {
// This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_IS));
invariant(_keysComputed);
return _indexedPaths;
}
@@ -205,7 +205,7 @@ void CollectionInfoCacheImpl::updatePlanCacheIndexEntries(OperationContext* opCt
void CollectionInfoCacheImpl::init(OperationContext* opCtx) {
// Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
const bool includeUnfinishedIndexes = false;
std::unique_ptr<IndexCatalog::IndexIterator> ii =
@@ -220,7 +220,7 @@ void CollectionInfoCacheImpl::init(OperationContext* opCtx) {
void CollectionInfoCacheImpl::addedIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
// Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(desc);
rebuildIndexData(opCtx);
@@ -230,7 +230,7 @@ void CollectionInfoCacheImpl::addedIndex(OperationContext* opCtx, const IndexDes
void CollectionInfoCacheImpl::droppedIndex(OperationContext* opCtx, StringData indexName) {
// Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
rebuildIndexData(opCtx);
_indexUsageTracker.unregisterIndex(indexName);
diff --git a/src/mongo/db/catalog/database_catalog_entry.h b/src/mongo/db/catalog/database_catalog_entry.h
index bdc11797461..d68ccbc863c 100644
--- a/src/mongo/db/catalog/database_catalog_entry.h
+++ b/src/mongo/db/catalog/database_catalog_entry.h
@@ -34,6 +34,7 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
+#include "mongo/db/namespace_string.h"
namespace mongo {
@@ -96,7 +97,7 @@ public:
IndexCatalogEntry* index) = 0;
virtual Status createCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
bool allocateDefaultSpace) = 0;
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index b8b445649b5..e8db845ce50 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -851,7 +851,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
}
massertStatusOK(
- _dbEntry->createCollection(opCtx, ns, optionsWithUUID, true /*allocateDefaultSpace*/));
+ _dbEntry->createCollection(opCtx, nss, optionsWithUUID, true /*allocateDefaultSpace*/));
opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns));
Collection* collection = _getOrCreateCollectionInstance(opCtx, nss);
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 9b44575217e..e1743a76488 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -508,7 +508,7 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineNow) {
Lock::DBLock dbLock(_opCtx.get(), nss.db(), MODE_X);
ASSERT(_opCtx.get()->lockState()->isDbLockedForMode(nss.db(), MODE_X));
Lock::CollectionLock collLock(_opCtx.get()->lockState(), nss.toString(), MODE_X);
- ASSERT(_opCtx.get()->lockState()->isCollectionLockedForMode(nss.toString(), MODE_X));
+ ASSERT(_opCtx.get()->lockState()->isCollectionLockedForMode(nss, MODE_X));
try {
AutoGetCollectionForReadCommand db(
_opCtx.get(), nss, AutoGetCollection::kViewsForbidden, Date_t::now());
@@ -522,7 +522,7 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineMin) {
Lock::DBLock dbLock(_opCtx.get(), nss.db(), MODE_X);
ASSERT(_opCtx.get()->lockState()->isDbLockedForMode(nss.db(), MODE_X));
Lock::CollectionLock collLock(_opCtx.get()->lockState(), nss.toString(), MODE_X);
- ASSERT(_opCtx.get()->lockState()->isCollectionLockedForMode(nss.toString(), MODE_X));
+ ASSERT(_opCtx.get()->lockState()->isCollectionLockedForMode(nss, MODE_X));
try {
AutoGetCollectionForReadCommand db(
_opCtx.get(), nss, AutoGetCollection::kViewsForbidden, Date_t());
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 64a8b795149..ebf96a0eb71 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -85,7 +85,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
_registerIndexBuild(buildUUID);
const auto& nss = collection->ns();
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X),
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X),
str::stream() << "Unable to set up index build " << buildUUID << ": collection "
<< nss.ns()
<< " is not locked in exclusive mode.");
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 5fdffe0aad3..7f7d98b17c5 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -290,7 +290,7 @@ std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexes(
StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationContext* opCtx,
BSONObj spec) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(_collection->numRecords(opCtx) == 0,
str::stream() << "Collection must be empty. Collection: " << _collection->ns()
<< " UUID: "
@@ -738,7 +738,7 @@ BSONObj IndexCatalogImpl::getDefaultIdIndexSpec() const {
void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
bool includingIdIndex,
stdx::function<void(const IndexDescriptor*)> onDropFn) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
@@ -812,7 +812,7 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx, bool includingIdI
}
Status IndexCatalogImpl::dropIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
invariant(_buildingIndexes.size() == 0);
@@ -1123,7 +1123,7 @@ std::vector<std::shared_ptr<const IndexCatalogEntry>> IndexCatalogImpl::getAllRe
const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
const IndexDescriptor* oldDesc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(!BackgroundOperation::inProgForNs(_collection->ns()));
invariant(_buildingIndexes.size() == 0);
diff --git a/src/mongo/db/catalog/index_timestamp_helper.cpp b/src/mongo/db/catalog/index_timestamp_helper.cpp
index 9d1407b26a5..524d5bbc121 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.cpp
+++ b/src/mongo/db/catalog/index_timestamp_helper.cpp
@@ -84,7 +84,7 @@ void IndexTimestampHelper::setGhostCommitTimestampForWrite(OperationContext* opC
const auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp();
// If a lock that blocks writes is held, there can be no uncommitted writes, so there is no
// need to check snapshot visibility, especially if a caller is not reading with a timestamp.
- invariant(mySnapshot || opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_S),
+ invariant(mySnapshot || opCtx->lockState()->isCollectionLockedForMode(nss, MODE_S),
"a write-blocking lock is required when applying a ghost timestamp without a read "
"timestamp");
invariant(!mySnapshot || *mySnapshot <= commitTimestamp,
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index c9aa1964cba..439b660eaa7 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -455,7 +455,7 @@ void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const
* catalog. The caller must hold the appropriate locks from the lock manager.
*/
Collection* _getCollection_inlock(OperationContext* opCtx, const NamespaceString& nss) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IS));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
auto databaseHolder = DatabaseHolder::get(opCtx);
auto* db = databaseHolder->getDb(opCtx, nss.db());
if (!db) {
diff --git a/src/mongo/db/catalog_raii_test.cpp b/src/mongo/db/catalog_raii_test.cpp
index 5e6aa5a32b7..b8f34187e44 100644
--- a/src/mongo/db/catalog_raii_test.cpp
+++ b/src/mongo/db/catalog_raii_test.cpp
@@ -132,7 +132,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionCollLockDeadline) {
Lock::DBLock dbLock1(client1.second.get(), nss.db(), MODE_IX);
ASSERT(client1.second->lockState()->isDbLockedForMode(nss.db(), MODE_IX));
Lock::CollectionLock collLock1(client1.second.get()->lockState(), nss.toString(), MODE_X);
- ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss.toString(), MODE_X));
+ ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss, MODE_X));
failsWithLockTimeout(
[&] {
AutoGetCollection coll(client2.second.get(),
@@ -180,7 +180,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionDeadlineNow) {
Lock::DBLock dbLock1(client1.second.get(), nss.db(), MODE_IX);
ASSERT(client1.second->lockState()->isDbLockedForMode(nss.db(), MODE_IX));
Lock::CollectionLock collLock1(client1.second.get()->lockState(), nss.toString(), MODE_X);
- ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss.toString(), MODE_X));
+ ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss, MODE_X));
failsWithLockTimeout(
[&] {
@@ -198,7 +198,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionDeadlineMin) {
Lock::DBLock dbLock1(client1.second.get(), nss.db(), MODE_IX);
ASSERT(client1.second->lockState()->isDbLockedForMode(nss.db(), MODE_IX));
Lock::CollectionLock collLock1(client1.second.get()->lockState(), nss.toString(), MODE_X);
- ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss.toString(), MODE_X));
+ ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss, MODE_X));
failsWithLockTimeout(
[&] {
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 5700ad537a1..7b0dbb11cce 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -220,7 +220,7 @@ std::vector<BSONObj> resolveDefaultsAndRemoveExistingIndexes(OperationContext* o
void checkUniqueIndexConstraints(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& newIdxKey) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
const auto metadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
if (!metadata->isSharded())
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 29852943be9..4c919292b80 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -1168,7 +1168,7 @@ TEST_F(DConcurrencyTestFixture, IsDbLockedForXMode) {
}
TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
- const std::string ns("db1.coll");
+ const NamespaceString ns("db1.coll");
auto opCtx = makeOperationContext();
opCtx->swapLockState(stdx::make_unique<LockerImpl>());
@@ -1177,7 +1177,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IS);
{
- Lock::CollectionLock collLock(lockState, ns, MODE_IS);
+ Lock::CollectionLock collLock(lockState, ns.ns(), MODE_IS);
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
@@ -1189,7 +1189,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
}
{
- Lock::CollectionLock collLock(lockState, ns, MODE_S);
+ Lock::CollectionLock collLock(lockState, ns.ns(), MODE_S);
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
@@ -1199,7 +1199,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
}
TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
- const std::string ns("db1.coll");
+ const NamespaceString ns("db1.coll");
auto opCtx = makeOperationContext();
opCtx->swapLockState(stdx::make_unique<LockerImpl>());
@@ -1208,7 +1208,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IX);
{
- Lock::CollectionLock collLock(lockState, ns, MODE_IX);
+ Lock::CollectionLock collLock(lockState, ns.ns(), MODE_IX);
// TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
@@ -1219,7 +1219,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
}
{
- Lock::CollectionLock collLock(lockState, ns, MODE_X);
+ Lock::CollectionLock collLock(lockState, ns.ns(), MODE_X);
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
@@ -1751,7 +1751,7 @@ TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) {
Lock::DBLock DBL1(opctx1, "testdb"_sd, MODE_IX, Date_t::max());
ASSERT(opctx1->lockState()->isDbLockedForMode("testdb"_sd, MODE_IX));
Lock::CollectionLock CL1(opctx1->lockState(), "testdb.test"_sd, MODE_X, Date_t::max());
- ASSERT(opctx1->lockState()->isCollectionLockedForMode("testdb.test"_sd, MODE_X));
+ ASSERT(opctx1->lockState()->isCollectionLockedForMode(NamespaceString("testdb.test"), MODE_X));
Date_t t1 = Date_t::now();
Lock::DBLock DBL2(opctx2, "testdb"_sd, MODE_IX, Date_t::max());
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 345652a66c8..9a791148931 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -561,15 +561,14 @@ bool LockerImpl::isDbLockedForMode(StringData dbName, LockMode mode) const {
return isLockHeldForMode(resIdDb, mode);
}
-bool LockerImpl::isCollectionLockedForMode(StringData ns, LockMode mode) const {
- invariant(nsIsFull(ns));
+bool LockerImpl::isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const {
+ invariant(nss.coll().size());
if (isW())
return true;
if (isR() && isSharedLockMode(mode))
return true;
- const NamespaceString nss(ns);
const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
LockMode dbMode = getLockMode(resIdDb);
@@ -585,7 +584,7 @@ bool LockerImpl::isCollectionLockedForMode(StringData ns, LockMode mode) const {
return isSharedLockMode(mode);
case MODE_IX:
case MODE_IS: {
- const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
+ const ResourceId resIdColl(RESOURCE_COLLECTION, nss.ns());
return isLockHeldForMode(resIdColl, mode);
} break;
case LockModesCount:
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 91c6e651d4f..b9ecdc3cbfd 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -179,7 +179,7 @@ public:
virtual LockMode getLockMode(ResourceId resId) const;
virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const;
virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const;
+ virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const;
virtual ResourceId getWaitingResource() const;
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index ae951b4715b..158d073ae35 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -295,7 +295,7 @@ public:
// hierarchy is properly locked and because of this they are very expensive to call.
// Do not use them in performance critical code paths.
virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const = 0;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const = 0;
+ virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const = 0;
/**
* Returns the resource that this locker is waiting/blocked on (if any). If the locker is
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 7e46259cbc5..5e53bd6f6de 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -154,7 +154,7 @@ public:
return true;
}
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const {
return true;
}
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 439794e390d..f4eb823e47e 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -314,12 +314,12 @@ void IndexBuildInterceptor::_tryYield(OperationContext* opCtx) {
// Never yield while holding locks that prevent writes to the collection: only yield while
// holding intent locks. This check considers all locks in the hierarchy that would cover this
// mode.
- if (opCtx->lockState()->isCollectionLockedForMode(_indexCatalogEntry->ns(), MODE_S)) {
+ const NamespaceString nss(_indexCatalogEntry->ns());
+ if (opCtx->lockState()->isCollectionLockedForMode(nss, MODE_S)) {
return;
}
DEV {
- const NamespaceString nss(_indexCatalogEntry->ns());
- invariant(!opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
+ invariant(!opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
invariant(!opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
}
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 3b8c0c4800f..daca155a742 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -88,7 +88,7 @@ StatusWith<UUID> getCollectionUUID(OperationContext* opCtx, const NamespaceStrin
void checkShardKeyRestrictions(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& newIdxKey) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
const auto metadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
if (!metadata->isSharded())
@@ -1081,7 +1081,7 @@ std::vector<BSONObj> IndexBuildsCoordinator::_addDefaultsAndFilterExistingIndexe
Collection* collection,
const NamespaceString& nss,
const std::vector<BSONObj>& indexSpecs) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
invariant(collection);
// During secondary oplog application, the index specs have already been normalized in the
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 773321d696c..28f10d4e401 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -105,7 +105,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
Collection* coll, OperationContext* opCtx, long long sampleSize, long long numRecords) {
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
- invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns().ns(), MODE_IS));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
static const double kMaxSampleRatioForRandCursor = 0.05;
if (sampleSize > numRecords * kMaxSampleRatioForRandCursor || numRecords <= 100) {
@@ -305,7 +305,7 @@ void PipelineD::prepareCursorSource(Collection* collection,
}
// We are going to generate an input cursor, so we need to be holding the collection lock.
- dassert(expCtx->opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IS));
+ dassert(expCtx->opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
if (!sources.empty()) {
auto sampleStage = dynamic_cast<DocumentSourceSample*>(sources.front().get());
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index 88b0da30c31..5ce9ede3350 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -452,7 +452,7 @@ std::shared_ptr<CappedInsertNotifier> PlanExecutorImpl::_getCappedInsertNotifier
// We can only wait if we have a collection; otherwise we should retry immediately when
// we hit EOF.
- dassert(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IS));
+ dassert(_opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
auto databaseHolder = DatabaseHolder::get(_opCtx);
auto db = databaseHolder->getDb(_opCtx, _nss.db());
invariant(db);
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index b383b1b0565..16166e15ca8 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1434,26 +1434,17 @@ Status applyOperation_inlock(OperationContext* opCtx,
collection);
requestNss = collection->ns();
dassert(opCtx->lockState()->isCollectionLockedForMode(
- requestNss.ns(), supportsDocLocking() ? MODE_IX : MODE_X),
- requestNss.ns());
+ requestNss, supportsDocLocking() ? MODE_IX : MODE_X));
} else {
uassert(ErrorCodes::InvalidNamespace,
"'ns' must be of type String",
fieldNs.type() == BSONType::String);
const StringData ns = fieldNs.valuestrsafe();
requestNss = NamespaceString(ns);
- if (nsIsFull(ns)) {
- if (supportsDocLocking()) {
- // WiredTiger, and others requires MODE_IX since the applier threads driving
- // this allow writes to the same collection on any thread.
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IX),
- requestNss.ns());
- } else {
- // mmapV1 ensures that all operations to the same collection are executed from
- // the same worker thread, so it takes an exclusive lock (MODE_X)
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X), requestNss.ns());
- }
- }
+ invariant(requestNss.coll().size());
+ dassert(opCtx->lockState()->isCollectionLockedForMode(
+ requestNss, supportsDocLocking() ? MODE_IX : MODE_X),
+ requestNss.ns());
collection = db->getCollection(opCtx, requestNss);
}
diff --git a/src/mongo/db/repl/sync_tail_test_fixture.cpp b/src/mongo/db/repl/sync_tail_test_fixture.cpp
index 7d7037d2ba3..e42d1a7d04f 100644
--- a/src/mongo/db/repl/sync_tail_test_fixture.cpp
+++ b/src/mongo/db/repl/sync_tail_test_fixture.cpp
@@ -153,7 +153,8 @@ void SyncTailTest::_testSyncApplyCrudOperation(ErrorCodes::Error expectedError,
ASSERT_TRUE(opCtx);
ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", MODE_IX));
ASSERT_FALSE(opCtx->lockState()->isDbLockedForMode("test", MODE_X));
- ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode("test.t", MODE_IX));
+ ASSERT_TRUE(
+ opCtx->lockState()->isCollectionLockedForMode(NamespaceString("test.t"), MODE_IX));
ASSERT_FALSE(opCtx->writesAreReplicated());
ASSERT_TRUE(documentValidationDisabled(opCtx));
};
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index c3dfb5618c4..05fbb152475 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -90,7 +90,7 @@ void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx,
CollectionMetadata newMetadata) {
invariant(!newMetadata.isSharded() || !isNamespaceAlwaysUnsharded(_nss),
str::stream() << "Namespace " << _nss.ns() << " must never be sharded.");
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
_metadataManager->setFilteringMetadata(std::move(newMetadata));
}
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index b9037015042..1ce7bd0f7b2 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -141,7 +141,7 @@ CollectionShardingState::CollectionShardingState(NamespaceString nss)
CollectionShardingState* CollectionShardingState::get(OperationContext* opCtx,
const NamespaceString& nss) {
// Collection lock must be held to have a reference to the collection's sharding state
- dassert(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
auto& collectionsMap = CollectionShardingStateMap::get(opCtx->getServiceContext());
return &collectionsMap->getOrCreate(nss);
@@ -266,17 +266,17 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx)
}
void CollectionShardingState::enterCriticalSectionCatchUpPhase(OperationContext* opCtx, CSRLock&) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
_critSec.enterCriticalSectionCatchUpPhase();
}
void CollectionShardingState::enterCriticalSectionCommitPhase(OperationContext* opCtx, CSRLock&) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
_critSec.enterCriticalSectionCommitPhase();
}
void CollectionShardingState::exitCriticalSection(OperationContext* opCtx, CSRLock&) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IX));
_critSec.exitCriticalSection();
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index f9a2fbe6144..20114169c40 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -341,7 +341,7 @@ void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx,
const BSONObj& insertedDoc,
const repl::OpTime& opTime,
const bool fromPreparedTransactionCommit) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IX));
BSONElement idElement = insertedDoc["_id"];
if (idElement.eoo()) {
@@ -377,7 +377,7 @@ void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* opCtx,
const repl::OpTime& opTime,
const repl::OpTime& prePostImageOpTime,
const bool fromPreparedTransactionCommit) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IX));
BSONElement idElement = updatedDoc["_id"];
if (idElement.eoo()) {
@@ -413,7 +413,7 @@ void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* opCtx,
const repl::OpTime& opTime,
const repl::OpTime& preImageOpTime,
const bool fromPreparedTransactionCommit) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IX));
BSONElement idElement = deletedDocId["_id"];
if (idElement.eoo()) {
@@ -518,7 +518,7 @@ uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
Collection* collection,
BSONArrayBuilder* arrBuilder) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IS));
ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
@@ -557,7 +557,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
Database* db,
BSONObjBuilder* builder) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IS));
stdx::lock_guard<stdx::mutex> sl(_mutex);
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index e257deaee70..43653b40a2a 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -73,7 +73,7 @@ public:
: _opCtx(opCtx), _nss(nss) {}
void commit(boost::optional<Timestamp>) override {
- invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IX));
CatalogCacheLoader::get(_opCtx).notifyOfCollectionVersionUpdate(_nss);
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 0642ba44ccb..8df05d06f0f 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -407,28 +407,28 @@ void KVCatalog::getAllCollections(std::vector<std::string>* out) const {
}
Status KVCatalog::newCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
KVPrefix prefix) {
- invariant(opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
- const string ident = _newUniqueIdent(ns, "collection");
+ const string ident = _newUniqueIdent(nss.ns(), "collection");
stdx::lock_guard<stdx::mutex> lk(_identsLock);
- Entry& old = _idents[ns.toString()];
+ Entry& old = _idents[nss.toString()];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists, "collection already exists");
}
- opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, ns));
+ opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, nss.ns()));
BSONObj obj;
{
BSONObjBuilder b;
- b.append("ns", ns);
+ b.append("ns", nss.ns());
b.append("ident", ident);
BSONCollectionCatalogEntry::MetaData md;
- md.ns = ns.toString();
+ md.ns = nss.ns();
md.options = options;
md.prefix = prefix;
b.append("md", md.toBSON());
@@ -439,7 +439,7 @@ Status KVCatalog::newCollection(OperationContext* opCtx,
return res.getStatus();
old = Entry(ident, res.getValue());
- LOG(1) << "stored meta data for " << ns << " @ " << res.getValue();
+ LOG(1) << "stored meta data for " << nss.ns() << " @ " << res.getValue();
return Status::OK();
}
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index bb54bb80019..514d7fef206 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -66,7 +66,7 @@ public:
* @return error or ident for instance
*/
Status newCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& ns,
const CollectionOptions& options,
KVPrefix prefix);
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
index 80c1718ff57..0dc09150287 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
@@ -83,7 +83,7 @@ public:
WriteUnitOfWork wuow(opCtx.get());
const bool allocateDefaultSpace = true;
ASSERT_OK(dbEntry->createCollection(
- opCtx.get(), _nss.ns(), CollectionOptions(), allocateDefaultSpace));
+ opCtx.get(), _nss, CollectionOptions(), allocateDefaultSpace));
wuow.commit();
}
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
index 8860427123b..7709125ea7d 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
@@ -226,30 +226,29 @@ RecordStore* KVDatabaseCatalogEntryBase::getRecordStore(StringData ns) const {
}
Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
bool allocateDefaultSpace) {
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
- if (ns.empty()) {
- return Status(ErrorCodes::BadValue, "Collection namespace cannot be empty");
- }
+ invariant(nss.coll().size() > 0);
- if (_collections.count(ns.toString())) {
- invariant(_collections[ns.toString()]);
+ if (_collections.count(nss.toString())) {
+ invariant(_collections[nss.toString()]);
return Status(ErrorCodes::NamespaceExists, "collection already exists");
}
- KVPrefix prefix = KVPrefix::getNextPrefix(NamespaceString(ns));
+ KVPrefix prefix = KVPrefix::getNextPrefix(nss);
// need to create it
- Status status = _engine->getCatalog()->newCollection(opCtx, ns, options, prefix);
+ Status status = _engine->getCatalog()->newCollection(opCtx, nss, options, prefix);
if (!status.isOK())
return status;
- string ident = _engine->getCatalog()->getCollectionIdent(ns);
+ string ident = _engine->getCatalog()->getCollectionIdent(nss.ns());
- status = _engine->getEngine()->createGroupedRecordStore(opCtx, ns, ident, options, prefix);
+ status =
+ _engine->getEngine()->createGroupedRecordStore(opCtx, nss.ns(), ident, options, prefix);
if (!status.isOK())
return status;
@@ -263,13 +262,13 @@ Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* opCtx,
}
}
- opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns, ident));
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, nss.ns(), ident));
- auto rs = _engine->getEngine()->getGroupedRecordStore(opCtx, ns, ident, options, prefix);
+ auto rs = _engine->getEngine()->getGroupedRecordStore(opCtx, nss.ns(), ident, options, prefix);
invariant(rs);
- _collections[ns.toString()] =
- new KVCollectionCatalogEntry(_engine, _engine->getCatalog(), ns, ident, std::move(rs));
+ _collections[nss.toString()] = new KVCollectionCatalogEntry(
+ _engine, _engine->getCatalog(), nss.ns(), ident, std::move(rs));
return Status::OK();
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
index f9307a11f7f..8e13dee9277 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
@@ -35,6 +35,8 @@
#include "mongo/db/catalog/database_catalog_entry.h"
+#include "mongo/db/namespace_string.h"
+
namespace mongo {
class KVStorageEngineInterface;
@@ -68,7 +70,7 @@ public:
IndexCatalogEntry* index) override = 0;
Status createCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
bool allocateDefaultSpace) override;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
index f9f701f8096..d3fbb33b16f 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
#include "mongo/stdx/memory.h"
+#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -50,24 +51,13 @@ TEST_F(ServiceContextTest, CreateCollectionValidNamespace) {
storageEngine.finishInit();
KVDatabaseCatalogEntryMock dbEntry("mydb", &storageEngine);
OperationContextNoop ctx;
- ASSERT_OK(dbEntry.createCollection(&ctx, "mydb.mycoll", CollectionOptions(), true));
+ ASSERT_OK(
+ dbEntry.createCollection(&ctx, NamespaceString("mydb.mycoll"), CollectionOptions(), true));
std::list<std::string> collectionNamespaces;
dbEntry.getCollectionNamespaces(&collectionNamespaces);
ASSERT_FALSE(collectionNamespaces.empty());
}
-TEST_F(ServiceContextTest, CreateCollectionEmptyNamespace) {
- KVStorageEngine storageEngine(
- new DevNullKVEngine(), KVStorageEngineOptions{}, kvDatabaseCatalogEntryMockFactory);
- storageEngine.finishInit();
- KVDatabaseCatalogEntryMock dbEntry("mydb", &storageEngine);
- OperationContextNoop ctx;
- ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "", CollectionOptions(), true));
- std::list<std::string> collectionNamespaces;
- dbEntry.getCollectionNamespaces(&collectionNamespaces);
- ASSERT_TRUE(collectionNamespaces.empty());
-}
-
/**
* Derived class of devnull KV engine where createRecordStore is overridden to fail
* on an empty namespace (provided by the test).
@@ -93,11 +83,22 @@ TEST_F(ServiceContextTest, CreateCollectionInvalidRecordStore) {
storageEngine.finishInit();
KVDatabaseCatalogEntryMock dbEntry("fail", &storageEngine);
OperationContextNoop ctx;
- ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "fail.me", CollectionOptions(), true));
+ ASSERT_NOT_OK(
+ dbEntry.createCollection(&ctx, NamespaceString("fail.me"), CollectionOptions(), true));
std::list<std::string> collectionNamespaces;
dbEntry.getCollectionNamespaces(&collectionNamespaces);
ASSERT_TRUE(collectionNamespaces.empty());
}
+DEATH_TEST_F(ServiceContextTest, CreateCollectionEmptyNamespace, "Invariant failure") {
+ KVStorageEngine storageEngine(
+ new DevNullKVEngine(), KVStorageEngineOptions{}, kvDatabaseCatalogEntryMockFactory);
+ storageEngine.finishInit();
+ KVDatabaseCatalogEntryMock dbEntry("mydb", &storageEngine);
+ OperationContextNoop ctx;
+ Status status = dbEntry.createCollection(&ctx, NamespaceString(""), CollectionOptions(), true);
+}
+
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 9a18913ac5e..3e1e507ced6 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -299,8 +299,8 @@ TEST(KVCatalogTest, Coll1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed));
ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
uow.commit();
}
@@ -319,7 +319,9 @@ TEST(KVCatalogTest, Coll1) {
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
catalog->dropCollection(&opCtx, "a.b").transitional_ignore();
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)
+ catalog
+ ->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed)
.transitional_ignore();
uow.commit();
}
@@ -344,8 +346,8 @@ TEST(KVCatalogTest, Idx1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed));
ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
uow.commit();
@@ -428,8 +430,8 @@ TEST(KVCatalogTest, DirectoryPerDb1) {
{ // collection
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed));
ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/");
ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
uow.commit();
@@ -476,8 +478,8 @@ TEST(KVCatalogTest, Split1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed));
ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "collection/");
ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
uow.commit();
@@ -524,8 +526,8 @@ TEST(KVCatalogTest, DirectoryPerAndSplit1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed));
ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/");
ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
uow.commit();
@@ -578,7 +580,8 @@ TEST(KVCatalogTest, RestartForPrefixes) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions(), abCollPrefix));
+ ASSERT_OK(catalog->newCollection(
+ &opCtx, NamespaceString("a.b"), CollectionOptions(), abCollPrefix));
ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
uow.commit();
diff --git a/src/mongo/db/storage/kv/kv_storage_engine_test.cpp b/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
index 9f5d748e7f9..935bef73175 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
@@ -70,7 +70,7 @@ public:
StatusWith<std::string> createCollection(OperationContext* opCtx, NamespaceString ns) {
AutoGetDb db(opCtx, ns.db(), LockMode::MODE_X);
DatabaseCatalogEntry* dbce = _storageEngine->getDatabaseCatalogEntry(opCtx, ns.db());
- auto ret = dbce->createCollection(opCtx, ns.ns(), CollectionOptions(), false);
+ auto ret = dbce->createCollection(opCtx, ns, CollectionOptions(), false);
if (!ret.isOK()) {
return ret;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 425a1d470fd..8fa398b4fb0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -2025,7 +2025,7 @@ std::unique_ptr<SeekableRecordCursor> StandardWiredTigerRecordStore::getCursor(
// If we already have a snapshot we don't know what it can see, unless we know no one
// else could be writing (because we hold an exclusive lock).
invariant(!wru->inActiveTxn() ||
- opCtx->lockState()->isCollectionLockedForMode(_ns, MODE_X));
+ opCtx->lockState()->isCollectionLockedForMode(NamespaceString(_ns), MODE_X));
wru->setIsOplogReader();
}
@@ -2076,7 +2076,7 @@ std::unique_ptr<SeekableRecordCursor> PrefixedWiredTigerRecordStore::getCursor(
// If we already have a snapshot we don't know what it can see, unless we know no one
// else could be writing (because we hold an exclusive lock).
invariant(!wru->inActiveTxn() ||
- opCtx->lockState()->isCollectionLockedForMode(_ns, MODE_X));
+ opCtx->lockState()->isCollectionLockedForMode(NamespaceString(_ns), MODE_X));
wru->setIsOplogReader();
}