summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2019-07-11 18:37:42 -0400
committerXiangyu Yao <xiangyu.yao@mongodb.com>2019-07-15 13:30:02 -0400
commit70a3df4dd5d102b2370a871c9182be281100a487 (patch)
tree4101519c5505800900caf30b1730c5fc6cdaeb5a
parentbb4ba528dd4254c33ac77026f94bc287b590ff3c (diff)
downloadmongo-70a3df4dd5d102b2370a871c9182be281100a487.tar.gz
SERVER-42194 Make Collection always hold a UUID (rather than optional UUID)
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp3
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp8
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp3
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp6
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp19
-rw-r--r--src/mongo/db/catalog/collection_impl.h6
-rw-r--r--src/mongo/db/catalog/collection_mock.h4
-rw-r--r--src/mongo/db/catalog/database_impl.cpp15
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp2
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp17
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp4
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/dbcheck.cpp10
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp6
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h2
-rw-r--r--src/mongo/db/index_builder.cpp2
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp1
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp6
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/dbcheck.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.h2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp85
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp4
-rw-r--r--src/mongo/db/system_index.cpp6
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp147
41 files changed, 192 insertions, 229 deletions
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 889ca73ddd2..c7ceead85a0 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -91,8 +91,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
}
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- collection->uuid().get());
+ IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(collection->uuid());
WriteUnitOfWork wuow(opCtx);
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 95c1381f91e..ab925ab0a97 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -174,11 +174,9 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
str::stream() << "failed to get valid collection pointer for namespace "
<< collNss);
- auto uuid = collection->uuid();
- invariant(uuid);
-
- if (minVisibleTimestampMap.count(*uuid) > 0) {
- collection->setMinimumVisibleSnapshot(minVisibleTimestampMap.find(*uuid)->second);
+ if (minVisibleTimestampMap.count(collection->uuid()) > 0) {
+ collection->setMinimumVisibleSnapshot(
+ minVisibleTimestampMap.find(collection->uuid())->second);
}
// If this is the oplog collection, re-establish the replication system's cached pointer
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 9aac38954e8..45c57acc17a 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -268,8 +268,7 @@ Status _collModInternal(OperationContext* opCtx,
// progress.
BackgroundOperation::assertNoBgOpInProgForNs(nss);
if (coll) {
- IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- coll->uuid().get());
+ IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(coll->uuid());
}
// If db/collection/view does not exist, short circuit and return.
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 15057e3124d..750d2ea9411 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -206,7 +206,7 @@ public:
*/
virtual void setNs(NamespaceString nss) = 0;
- virtual OptionalCollectionUUID uuid() const = 0;
+ virtual UUID uuid() const = 0;
virtual const IndexCatalog* getIndexCatalog() const = 0;
virtual IndexCatalog* getIndexCatalog() = 0;
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 32bf6ab8047..e9985074fd8 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -271,7 +271,7 @@ public:
auto coll = std::make_unique<CollectionMock>(nss);
auto uuid = coll->uuid();
- catalog.registerCollection(uuid.get(), std::move(coll));
+ catalog.registerCollection(uuid, std::move(coll));
}
int numEntries = 0;
@@ -289,7 +289,7 @@ public:
void tearDown() {
for (auto it = catalog.begin("resourceDb"); it != catalog.end(); ++it) {
auto coll = *it;
- auto uuid = coll->uuid().get();
+ auto uuid = coll->uuid();
if (!coll) {
break;
}
@@ -358,7 +358,7 @@ TEST_F(CollectionCatalogResourceTest, LookupMissingCollectionResource) {
TEST_F(CollectionCatalogResourceTest, RemoveCollection) {
const std::string collNs = "resourceDb.coll1";
auto coll = catalog.lookupCollectionByNamespace(NamespaceString(collNs));
- catalog.deregisterCollection(coll->uuid().get());
+ catalog.deregisterCollection(coll->uuid());
auto rid = ResourceId(RESOURCE_COLLECTION, collNs);
ASSERT(!catalog.lookupResourceName(rid));
}
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index ab41a04327c..eae4770532d 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -127,7 +127,7 @@ StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
// If the storage engine doesn't support compacting in place, make sure no background operations
// or indexes are running.
- const UUID collectionUUID = collection->uuid().get();
+ const UUID collectionUUID = collection->uuid();
BackgroundOperation::assertNoBgOpInProgForNs(collectionNss);
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(collectionUUID);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index fdde33beeb5..f9a02f46309 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -196,7 +196,7 @@ using logger::LogComponent;
CollectionImpl::CollectionImpl(OperationContext* opCtx,
const NamespaceString& nss,
- OptionalCollectionUUID uuid,
+ UUID uuid,
std::unique_ptr<RecordStore> recordStore)
: _magic(kMagicNumber),
_ns(nss),
@@ -324,13 +324,15 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
if (ns().isSystem() && !ns().isDropPendingNamespace()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators not allowed on system collection " << ns()
- << (_uuid ? " with UUID " + _uuid->toString() : "")};
+ << " with UUID "
+ << _uuid};
}
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collection " << ns().ns()
- << (_uuid ? " with UUID " + _uuid->toString() : "")
+ << " with UUID "
+ << _uuid
<< " in the "
<< ns().db()
<< " internal database"};
@@ -701,8 +703,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
args->updatedDoc = newDoc;
- invariant(uuid());
- OplogUpdateEntryArgs entryArgs(*args, ns(), *uuid());
+ OplogUpdateEntryArgs entryArgs(*args, ns(), _uuid);
getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, entryArgs);
return {oldLocation};
@@ -732,8 +733,7 @@ StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages(
if (newRecStatus.isOK()) {
args->updatedDoc = newRecStatus.getValue().toBson();
- invariant(uuid());
- OplogUpdateEntryArgs entryArgs(*args, ns(), *uuid());
+ OplogUpdateEntryArgs entryArgs(*args, ns(), _uuid);
getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, entryArgs);
}
return newRecStatus;
@@ -1285,7 +1285,7 @@ void _validateCatalogEntry(OperationContext* opCtx,
BSONObj validatorDoc,
ValidateResults* results) {
CollectionOptions options = DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, coll->ns());
- addErrorIfUnequal(options.uuid, coll->uuid(), "UUID", results);
+ addErrorIfUnequal(*options.uuid, coll->uuid(), "UUID", results);
const CollatorInterface* collation = coll->getDefaultCollator();
addErrorIfUnequal(options.collation.isEmpty(), !collation, "simple collation", results);
if (!options.collation.isEmpty() && collation)
@@ -1333,8 +1333,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
opCtx, &indexConsistency, level, _indexCatalog.get(), &indexNsResultsMap);
// Validate the record store
- std::string uuidString = str::stream()
- << " (UUID: " << (uuid() ? uuid()->toString() : "none") << ")";
+ std::string uuidString = str::stream() << " (UUID: " << _uuid << ")";
log(LogComponent::kIndex) << "validating collection " << ns() << uuidString;
_validateRecordStore(
opCtx, _recordStore.get(), level, background, &indexValidator, results, output);
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index a9a2c6fbfde..46b56f805e1 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -47,7 +47,7 @@ public:
explicit CollectionImpl(OperationContext* opCtx,
const NamespaceString& nss,
- OptionalCollectionUUID uuid,
+ UUID uuid,
std::unique_ptr<RecordStore> recordStore);
~CollectionImpl();
@@ -78,7 +78,7 @@ public:
void setNs(NamespaceString nss) final;
- OptionalCollectionUUID uuid() const {
+ UUID uuid() const {
return _uuid;
}
@@ -389,7 +389,7 @@ private:
int _magic;
NamespaceString _ns;
- OptionalCollectionUUID _uuid;
+ UUID _uuid;
// The RecordStore may be null during a repair operation.
std::unique_ptr<RecordStore> _recordStore; // owned
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index cb78c52b72f..19eb457bd7a 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -273,7 +273,7 @@ public:
std::abort();
}
- OptionalCollectionUUID uuid() const {
+ UUID uuid() const {
return _uuid;
}
@@ -282,7 +282,7 @@ public:
}
private:
- OptionalCollectionUUID _uuid = UUID::gen();
+ UUID _uuid = UUID::gen();
NamespaceString _ns;
std::unique_ptr<IndexCatalog> _indexCatalog;
};
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 1388714f3f7..d5c57cc8db4 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -348,13 +348,12 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto numRecords = collection->numRecords(opCtx);
auto uuid = collection->uuid();
- auto uuidString = uuid ? uuid.get().toString() : "no UUID";
// Make sure no indexes builds are in progress.
// Use massert() to be consistent with IndexCatalog::dropAllIndexes().
auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(opCtx);
massert(ErrorCodes::BackgroundOperationInProgressForNamespace,
- str::stream() << "cannot drop collection " << nss << " (" << uuidString << ") when "
+ str::stream() << "cannot drop collection " << nss << " (" << uuid << ") when "
<< numIndexesInProgress
<< " index builds in progress.",
numIndexesInProgress == 0);
@@ -385,7 +384,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
_dropCollectionIndexes(opCtx, nss, collection);
auto commitTimestamp = opCtx->recoveryUnit()->getCommitTimestamp();
- log() << "dropCollection: " << nss << " (" << uuidString
+ log() << "dropCollection: " << nss << " (" << uuid
<< ") - storage engine will take ownership of drop-pending collection with optime "
<< dropOpTime << " and commit timestamp " << commitTimestamp;
if (dropOpTime.isNull()) {
@@ -425,7 +424,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
// Rename collection using drop-pending namespace generated from drop optime.
auto dpns = nss.makeDropPendingNamespace(dropOpTime);
const bool stayTemp = true;
- log() << "dropCollection: " << nss << " (" << uuidString
+ log() << "dropCollection: " << nss << " (" << uuid
<< ") - renaming to drop-pending collection: " << dpns << " with drop optime "
<< dropOpTime;
{
@@ -454,7 +453,7 @@ void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
const NamespaceString& nss,
Collection* collection) const {
- UUID uuid = *collection->uuid();
+ UUID uuid = collection->uuid();
log() << "Finishing collection drop for " << nss << " (" << uuid << ").";
auto status = DurableCatalog::get(opCtx)->dropCollection(opCtx, nss);
@@ -507,8 +506,8 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
<< toNss);
}
- log() << "renameCollection: renaming collection " << collToRename->uuid()->toString()
- << " from " << fromNss << " to " << toNss;
+ log() << "renameCollection: renaming collection " << collToRename->uuid() << " from " << fromNss
+ << " to " << toNss;
Top::get(opCtx->getServiceContext()).collectionDropped(fromNss);
@@ -651,7 +650,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
});
auto& catalog = CollectionCatalog::get(opCtx);
- auto uuid = ownedCollection->uuid().get();
+ auto uuid = ownedCollection->uuid();
catalog.registerCollection(uuid, std::move(ownedCollection));
opCtx->recoveryUnit()->onRollback([uuid, &catalog] { catalog.deregisterCollection(uuid); });
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 5a8c2828f63..b5f30ae2a3e 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -130,7 +130,7 @@ Status _dropCollection(OperationContext* opCtx,
int numIndexes = coll->getIndexCatalog()->numIndexesTotal(opCtx);
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(coll->uuid().get());
+ IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(coll->uuid());
Status status =
systemCollectionMode == DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops
? db->dropCollection(opCtx, collectionName, dropOpTime)
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 2602c6e59d5..fd471f8ad50 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -189,7 +189,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
BackgroundOperation::assertNoBgOpInProgForNs(nss.ns());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- db->getCollection(opCtx, nss)->uuid().get());
+ db->getCollection(opCtx, nss)->uuid());
writeConflictRetry(opCtx, "dropDatabase_collection", nss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index b74a55f00cf..cd56e85e2b6 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -228,7 +228,7 @@ Status dropIndexes(OperationContext* opCtx,
BackgroundOperation::assertNoBgOpInProgForNs(nss);
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- collection->uuid().get());
+ collection->uuid());
WriteUnitOfWork wunit(opCtx);
OldClientContext ctx(opCtx, nss.ns());
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index cbc9217ee12..fe816ce7dc4 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -112,7 +112,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
<< ": indexes: " << indexes.size();
} else {
log() << "Index build initialized: " << buildUUID << ": " << nss << " ("
- << *collection->uuid() << " ): indexes: " << indexes.size();
+ << collection->uuid() << " ): indexes: " << indexes.size();
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index a0a467bccc2..571a5705201 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1064,9 +1064,7 @@ int IndexCatalogImpl::numIndexesReady(OperationContext* opCtx) const {
log() << " index: " << i;
}
- if (_collection->uuid()) {
- log() << "collection uuid: " << _collection->uuid();
- }
+ log() << "collection uuid: " << _collection->uuid();
invariant(itIndexes.size() == completedIndexes.size(),
"The number of ready indexes reported in the collection metadata catalog did "
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 6bb769f533a..c3065c1a13b 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -77,7 +77,7 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx, Collection* col
if (_collectionUUID) {
// init() was previously called with a collection pointer, so ensure that the same
// collection is being provided for clean up and the interface in not being abused.
- invariant(_collectionUUID.get() == collection->uuid().get());
+ invariant(_collectionUUID.get() == collection->uuid());
}
if (!_needToCleanup && !_indexes.empty()) {
@@ -201,18 +201,15 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
str::stream() << "Index build aborted: " << _abortReason
<< ". Cannot initialize index builder: "
<< collection->ns()
- << (collection->uuid()
- ? (" (" + collection->uuid()->toString() + "): ")
- : ": ")
+ << " ("
+ << collection->uuid()
+ << "): "
<< indexSpecs.size()
<< " provided. First index spec: "
<< (indexSpecs.empty() ? BSONObj() : indexSpecs[0])};
}
- // UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt.
- if (collection->uuid()) {
- _collectionUUID = collection->uuid().get();
- }
+ _collectionUUID = collection->uuid();
_buildIsCleanedUp = false;
@@ -366,7 +363,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
// UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt.
if (_collectionUUID) {
- invariant(_collectionUUID.get() == collection->uuid().get());
+ invariant(_collectionUUID.get() == collection->uuid());
}
// Refrain from persisting any multikey updates as a result from building the index. Instead,
@@ -707,7 +704,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
OnCommitFn onCommit) {
// UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt.
if (_collectionUUID) {
- invariant(_collectionUUID.get() == collection->uuid().get());
+ invariant(_collectionUUID.get() == collection->uuid());
}
if (State::kAborted == _getState()) {
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 2492fbac0d4..08047b91a31 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -117,8 +117,7 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
}
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- sourceColl->uuid().get());
+ IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(sourceColl->uuid());
Collection* targetColl = db->getCollection(opCtx, target);
@@ -255,7 +254,7 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx,
BackgroundOperation::assertNoBgOpInProgForNs(targetColl->ns().ns());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- targetColl->uuid().get());
+ targetColl->uuid());
auto status = db->dropCollection(opCtx, targetColl->ns(), renameOpTime);
if (!status.isOK())
@@ -381,7 +380,7 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
if (uuidToDrop && uuidToDrop != targetColl->uuid()) {
// We need to rename the targetColl to a temporary name.
auto status = renameTargetCollectionToTmp(
- opCtx, source, sourceColl->uuid().get(), db, target, targetColl->uuid().get());
+ opCtx, source, sourceColl->uuid(), db, target, targetColl->uuid());
if (!status.isOK())
return status;
targetColl = nullptr;
@@ -462,8 +461,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
"Cannot rename collections between a replicated and an unreplicated database"};
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- sourceColl->uuid().get());
+ IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(sourceColl->uuid());
auto targetDB = DatabaseHolder::get(opCtx)->getDb(opCtx, target.db());
@@ -585,7 +583,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
for (const auto& indexToCopy : indexesToCopy) {
opObserver->onCreateIndex(opCtx,
tmpName,
- *(tmpColl->uuid()),
+ tmpColl->uuid(),
indexToCopy,
false // fromMigrate
);
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 0b9c2d1639d..903461b9133 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -1163,9 +1163,9 @@ TEST_F(RenameCollectionTest, CollectionCatalogMappingRemainsIntactThroughRename)
auto& catalog = CollectionCatalog::get(_opCtx.get());
Collection* sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
ASSERT(sourceColl);
- ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(*sourceColl->uuid()));
+ ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(sourceColl->uuid()));
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
- ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(*sourceColl->uuid()));
+ ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(sourceColl->uuid()));
}
TEST_F(RenameCollectionTest, FailRenameCollectionFromReplicatedToUnreplicatedDB) {
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 3ddd265ea1e..4656d7b379a 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -418,7 +418,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
if (opCtx->writesAreReplicated()) {
for (auto&& infoObj : indexInfoObjs) {
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- opCtx, collection->ns(), *(collection->uuid()), infoObj, false);
+ opCtx, collection->ns(), collection->uuid(), infoObj, false);
}
}
wunit.commit();
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index c34aebd5bc6..6d0a870a691 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -581,7 +581,7 @@ bool runCreateIndexes(OperationContext* opCtx,
collection,
[opCtx, &ns, collection](const BSONObj& spec) {
opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- opCtx, ns, *(collection->uuid()), spec, false);
+ opCtx, ns, collection->uuid(), spec, false);
},
MultiIndexBlock::kNoopOnCommitFn));
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 6cbfc7b6bf9..484cac84eb9 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -339,12 +339,6 @@ private:
return false;
}
- auto uuid = collection->uuid();
- // Check if UUID exists.
- if (!uuid) {
- return false;
- }
-
auto[prev, next] = getPrevAndNextUUIDs(opCtx, collection);
// Find and report collection metadata.
@@ -353,7 +347,7 @@ private:
DbCheckOplogCollection entry;
entry.setNss(collection->ns());
- entry.setUuid(*collection->uuid());
+ entry.setUuid(collection->uuid());
if (prev) {
entry.setPrev(*prev);
}
@@ -375,7 +369,7 @@ private:
collectionInfo.options = entry.getOptions();
auto hle = dbCheckCollectionEntry(
- collection->ns(), *collection->uuid(), collectionInfo, collectionInfo, optime);
+ collection->ns(), collection->uuid(), collectionInfo, collectionInfo, optime);
HealthLog::get(opCtx).log(*hle);
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 1b4b9491d7f..25702078378 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -145,9 +145,8 @@ public:
}
BackgroundOperation::assertNoBgOpInProgForNs(toReIndexNss.ns());
- invariant(collection->uuid());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- collection->uuid().get());
+ collection->uuid());
// This is necessary to set up CurOp and update the Top stats.
OldClientContext ctx(opCtx, toReIndexNss.ns());
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 244acfbd221..7d0d52be379 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -190,7 +190,7 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
->canAcceptWritesFor(cleanupOpCtx, tempNamespace));
BackgroundOperation::assertNoBgOpInProgForNs(tempNamespace.ns());
IndexBuildsCoordinator::get(cleanupOpCtx)
- ->assertNoIndexBuildInProgForCollection(collection->uuid().get());
+ ->assertNoIndexBuildInProgForCollection(collection->uuid());
WriteUnitOfWork wunit(cleanupOpCtx);
uassertStatusOK(db->dropCollection(cleanupOpCtx, tempNamespace));
wunit.commit();
@@ -209,7 +209,7 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
if (auto collection = db->getCollection(cleanupOpCtx, incLong)) {
BackgroundOperation::assertNoBgOpInProgForNs(incLong.ns());
IndexBuildsCoordinator::get(cleanupOpCtx)
- ->assertNoIndexBuildInProgForCollection(collection->uuid().get());
+ ->assertNoIndexBuildInProgForCollection(collection->uuid());
WriteUnitOfWork wunit(cleanupOpCtx);
uassertStatusOK(db->dropCollection(cleanupOpCtx, incLong));
wunit.commit();
@@ -623,7 +623,7 @@ void State::prepTempCollection() {
// Log the createIndex operation.
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- _opCtx, _config.tempNamespace, *(tempColl->uuid()), indexToInsert, false);
+ _opCtx, _config.tempNamespace, tempColl->uuid(), indexToInsert, false);
}
wuow.commit();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index ab63dec4dbe..7293a0b01cb 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -571,7 +571,9 @@ Status runAggregate(OperationContext* opCtx,
// This is a regular aggregation. Lock the collection or view.
ctx.emplace(opCtx, nss, AutoGetCollection::ViewMode::kViewsPermitted);
collatorToUse.emplace(resolveCollator(opCtx, request, ctx->getCollection()));
- uuid = ctx->getCollection() ? ctx->getCollection()->uuid() : boost::none;
+ if (ctx->getCollection()) {
+ uuid = ctx->getCollection()->uuid();
+ }
}
Collection* collection = ctx ? ctx->getCollection() : nullptr;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 51373ac0841..f11cb7c1bd4 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -172,7 +172,7 @@ public:
BackgroundOperation::assertNoBgOpInProgForNs(fullNs.ns());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- collection->uuid().get());
+ collection->uuid());
collection->cappedTruncateAfter(opCtx, end, inc);
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index 677336e1fb2..896d9eb5181 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -58,7 +58,7 @@ public:
RequiresCollectionStageBase(const char* stageType, OperationContext* opCtx, CollectionT coll)
: PlanStage(stageType, opCtx),
_collection(coll),
- _collectionUUID(_collection->uuid().get()),
+ _collectionUUID(_collection->uuid()),
_databaseEpoch(getDatabaseEpoch(_collection)),
_nss(_collection->ns()) {
invariant(_collection);
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 6f5804d17c4..c741814a663 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -182,7 +182,7 @@ Status IndexBuilder::_build(OperationContext* opCtx,
coll,
[opCtx, coll, &ns](const BSONObj& indexSpec) {
opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- opCtx, ns, *(coll->uuid()), indexSpec, false);
+ opCtx, ns, coll->uuid(), indexSpec, false);
},
MultiIndexBlock::kNoopOnCommitFn);
if (!status.isOK()) {
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index baba0f332ff..a0e20599e65 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -247,7 +247,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
str::stream() << "Collection '" << nss << "' was not found.");
}
- UUID collectionUUID = *collection->uuid();
+ UUID collectionUUID = collection->uuid();
stdx::unique_lock<stdx::mutex> lk(_mutex);
auto collectionIt = _collectionIndexBuilds.find(collectionUUID);
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 870c10a7c1f..ca72ef06a95 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -623,7 +623,6 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
}
Collection* coll = db->getCollection(opCtx, nss);
- invariant(coll->uuid());
invariant(coll->uuid() == uuid);
invariant(DurableCatalog::get(opCtx)->isEqualToMetadataUUID(opCtx, nss, uuid));
}
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index f9f2bdb64bb..609db294b31 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -208,12 +208,6 @@ Status ensureCollectionProperties(OperationContext* opCtx,
break;
}
- // We expect all collections to have UUIDs in MongoDB 4.2
- if (!coll->uuid()) {
- error() << "collection " << coll->ns() << " is missing a UUID";
- return downgradeError;
- }
-
// All user-created replicated collections created since MongoDB 4.0 have _id indexes.
auto requiresIndex = coll->requiresIdIndex() && coll->ns().isReplicated();
auto collOptions = DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, coll->ns());
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 54971bbdee8..df72f0faea8 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -183,7 +183,7 @@ Status _applyOps(OperationContext* opCtx,
// Append completed op, including UUID if available, to 'opsBuilder'.
if (opsBuilder) {
- if (opObj.hasField("ui") || !(collection && collection->uuid())) {
+ if (opObj.hasField("ui") || !collection) {
// No changes needed to operation document.
opsBuilder->append(opObj);
} else {
@@ -191,7 +191,7 @@ Status _applyOps(OperationContext* opCtx,
auto uuid = collection->uuid();
BSONObjBuilder opBuilder;
opBuilder.appendElements(opObj);
- uuid->appendToBuilder(&opBuilder, "ui");
+ uuid.appendToBuilder(&opBuilder, "ui");
opsBuilder->append(opBuilder.obj());
}
}
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index 8a3827db897..cd546957b8d 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -241,7 +241,7 @@ std::string hashCollectionInfo(const DbCheckCollectionInformation& info) {
std::pair<boost::optional<UUID>, boost::optional<UUID>> getPrevAndNextUUIDs(
OperationContext* opCtx, Collection* collection) {
const CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
- const UUID uuid = *collection->uuid();
+ const UUID uuid = collection->uuid();
std::vector<CollectionUUID> collectionUUIDs =
catalog.getAllCollectionUUIDsFromDb(collection->ns().db());
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 5d5b2f16ef5..5ee855adb88 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -268,7 +268,7 @@ void createIndexForApplyOps(OperationContext* opCtx,
Lock::TempRelease release(opCtx->lockState());
// TempRelease cannot fail because no recursive locks should be taken.
invariant(!opCtx->lockState()->isLocked());
- auto collUUID = *indexCollection->uuid();
+ auto collUUID = indexCollection->uuid();
auto indexBuildUUID = UUID::gen();
auto indexBuildsCoordinator = IndexBuildsCoordinator::get(opCtx);
diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h
index 8f03742bfd4..f4c4ce5a13a 100644
--- a/src/mongo/db/repl/rollback_test_fixture.h
+++ b/src/mongo/db/repl/rollback_test_fixture.h
@@ -177,7 +177,7 @@ public:
nsOrUUID.uuid() == _setCollectionCountStatusUUID) {
return *_setCollectionCountStatus;
}
- _newCounts[nsOrUUID.uuid().get()] = newCount;
+ _newCounts[*nsOrUUID.uuid()] = newCount;
return Status::OK();
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index a1b76201cdf..a94dc1c42fb 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -784,7 +784,7 @@ void dropCollection(OperationContext* opCtx,
RemoveSaver removeSaver("rollback", "", nss.ns());
log() << "Rolling back createCollection on " << nss
<< ": Preparing to write documents to a rollback file for a collection " << nss
- << " with uuid " << *(collection->uuid()) << " to "
+ << " with uuid " << collection->uuid() << " to "
<< removeSaver.file().generic_string();
// Performs a collection scan and writes all documents in the collection to disk
@@ -860,12 +860,12 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
LOG(2) << "Attempted to rename collection from " << info.renameFrom << " to " << info.renameTo
<< " but " << info.renameTo << " exists already. Temporarily renaming collection "
- << info.renameTo << " with UUID " << collection->uuid().get() << " out of the way to "
+ << info.renameTo << " with UUID " << collection->uuid() << " out of the way to "
<< tempNss;
// Renaming the collection that was clashing with the attempted rename
// operation to a different collection name.
- auto uuid = collection->uuid().get();
+ auto uuid = collection->uuid();
auto renameStatus = renameCollectionForRollback(opCtx, tempNss, uuid);
if (!renameStatus.isOK()) {
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 84a3a8d803b..a49055ba339 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -89,7 +89,7 @@ OplogInterfaceMock::Operation makeDropIndexOplogEntry(Collection* collection,
BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
<< "ui"
- << collection->uuid().get()
+ << collection->uuid()
<< "ns"
<< "test.$cmd"
<< "o"
@@ -116,7 +116,7 @@ OplogInterfaceMock::Operation makeCreateIndexOplogEntry(Collection* collection,
<< "ns"
<< "test.$cmd"
<< "ui"
- << collection->uuid().get()
+ << collection->uuid()
<< "o"
<< indexSpec),
RecordId(time));
@@ -385,11 +385,11 @@ TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsNonCapped
options.uuid = UUID::gen();
auto coll = _createCollection(_opCtx.get(), "test.t", options);
_testRollbackDelete(
- _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid().get(), BSONObj());
+ _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid(), BSONObj());
ASSERT_EQUALS(
0,
_testRollbackDelete(
- _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid().get(), BSONObj()));
+ _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid(), BSONObj()));
}
TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsCapped) {
@@ -401,7 +401,7 @@ TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsCapped) {
ASSERT_EQUALS(
0,
_testRollbackDelete(
- _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid().get(), BSONObj()));
+ _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid(), BSONObj()));
}
TEST_F(RSRollbackTest, RollbackDeleteRestoreDocument) {
@@ -410,12 +410,10 @@ TEST_F(RSRollbackTest, RollbackDeleteRestoreDocument) {
options.uuid = UUID::gen();
auto coll = _createCollection(_opCtx.get(), "test.t", options);
BSONObj doc = BSON("_id" << 0 << "a" << 1);
- _testRollbackDelete(
- _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid().get(), doc);
- ASSERT_EQUALS(
- 1,
- _testRollbackDelete(
- _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid().get(), doc));
+ _testRollbackDelete(_opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid(), doc);
+ ASSERT_EQUALS(1,
+ _testRollbackDelete(
+ _opCtx.get(), _coordinator, _replicationProcess.get(), coll->uuid(), doc));
}
TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
@@ -680,13 +678,8 @@ TEST_F(RSRollbackTest, RollingBackCreateIndexAndRenameWithLongName) {
// The old collection name plus the index name is not too long, but the new collection name
// plus the index name is too long.
auto newName = NamespaceString("test", "collcollcollcollcoll");
- auto renameCollectionOperation =
- makeRenameCollectionOplogEntry(newName,
- nss,
- collection->uuid().get(),
- boost::none,
- false,
- OpTime(Timestamp(Seconds(2), 0), 1));
+ auto renameCollectionOperation = makeRenameCollectionOplogEntry(
+ newName, nss, collection->uuid(), boost::none, false, OpTime(Timestamp(Seconds(2), 0), 1));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -796,7 +789,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "ns"
<< "test.$cmd"
<< "ui"
- << collection->uuid().get()
+ << collection->uuid()
<< "o"
<< command),
RecordId(2));
@@ -1028,7 +1021,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
<< "ui"
- << coll->uuid().get()
+ << coll->uuid()
<< "ns"
<< "test.t"
<< "o"
@@ -1075,7 +1068,7 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionInSameDatabaseCommand) {
CollectionOptions options;
options.uuid = UUID::gen();
auto collection = _createCollection(_opCtx.get(), "test.y", options);
- UUID collectionUUID = collection->uuid().get();
+ UUID collectionUUID = collection->uuid();
OpTime renameTime = OpTime(Timestamp(2, 0), 5);
@@ -1133,7 +1126,7 @@ TEST_F(RSRollbackTest,
// Create the collection and save its UUID.
auto collection = _createCollection(_opCtx.get(), renameToNss, options);
- auto collectionUUID = collection->uuid().get();
+ auto collectionUUID = collection->uuid();
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -1186,12 +1179,12 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionInDatabaseWithDropTargetTrueComma
droppedCollOptions.uuid = UUID::gen();
auto droppedColl = _createCollection(_opCtx.get(), dpns, droppedCollOptions);
_dropPendingCollectionReaper->addDropPendingNamespace(dropTime, dpns);
- auto droppedCollectionUUID = droppedColl->uuid().get();
+ auto droppedCollectionUUID = droppedColl->uuid();
CollectionOptions renamedCollOptions;
renamedCollOptions.uuid = UUID::gen();
auto renamedCollection = _createCollection(_opCtx.get(), "test.y", renamedCollOptions);
- auto renamedCollectionUUID = renamedCollection->uuid().get();
+ auto renamedCollectionUUID = renamedCollection->uuid();
auto commonOperation = makeOpAndRecordId(1);
auto renameCollectionOperation = makeRenameCollectionOplogEntry(NamespaceString("test.x"),
@@ -1227,11 +1220,11 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionInDatabaseWithDropTargetTrueComma
AutoGetCollectionForReadCommand renamedColl(_opCtx.get(), NamespaceString("test.x"));
ASSERT_TRUE(renamedColl.getCollection());
- ASSERT_EQUALS(renamedColl.getCollection()->uuid().get(), renamedCollectionUUID);
+ ASSERT_EQUALS(renamedColl.getCollection()->uuid(), renamedCollectionUUID);
AutoGetCollectionForReadCommand droppedColl(_opCtx.get(), NamespaceString("test.y"));
ASSERT_TRUE(droppedColl.getCollection());
- ASSERT_EQUALS(droppedColl.getCollection()->uuid().get(), droppedCollectionUUID);
+ ASSERT_EQUALS(droppedColl.getCollection()->uuid(), droppedCollectionUUID);
}
}
@@ -1244,10 +1237,10 @@ void _testRollbackRenamingCollectionsToEachOther(OperationContext* opCtx,
createOplog(opCtx);
auto collection1 = RollbackTest::_createCollection(opCtx, "test.y", coll1Options);
- auto collection1UUID = collection1->uuid().get();
+ auto collection1UUID = collection1->uuid();
auto collection2 = RollbackTest::_createCollection(opCtx, "test.x", coll2Options);
- auto collection2UUID = collection2->uuid().get();
+ auto collection2UUID = collection2->uuid();
ASSERT_NOT_EQUALS(collection1UUID, collection2UUID);
@@ -1291,11 +1284,11 @@ void _testRollbackRenamingCollectionsToEachOther(OperationContext* opCtx,
AutoGetCollectionForReadCommand coll1(opCtx, NamespaceString("test.x"));
ASSERT_TRUE(coll1.getCollection());
- ASSERT_EQUALS(coll1.getCollection()->uuid().get(), collection1UUID);
+ ASSERT_EQUALS(coll1.getCollection()->uuid(), collection1UUID);
AutoGetCollectionForReadCommand coll2(opCtx, NamespaceString("test.y"));
ASSERT_TRUE(coll2.getCollection());
- ASSERT_EQUALS(coll2.getCollection()->uuid().get(), collection2UUID);
+ ASSERT_EQUALS(coll2.getCollection()->uuid(), collection2UUID);
}
}
@@ -1336,14 +1329,14 @@ TEST_F(RSRollbackTest, RollbackDropCollectionThenRenameCollectionToDroppedCollec
CollectionOptions renamedCollOptions;
renamedCollOptions.uuid = UUID::gen();
auto renamedCollection = _createCollection(_opCtx.get(), "test.x", renamedCollOptions);
- auto renamedCollectionUUID = renamedCollection->uuid().get();
+ auto renamedCollectionUUID = renamedCollection->uuid();
OpTime dropTime = OpTime(Timestamp(2, 0), 5);
auto dpns = NamespaceString("test.x").makeDropPendingNamespace(dropTime);
CollectionOptions droppedCollOptions;
droppedCollOptions.uuid = UUID::gen();
auto droppedCollection = _createCollection(_opCtx.get(), dpns, droppedCollOptions);
- auto droppedCollectionUUID = droppedCollection->uuid().get();
+ auto droppedCollectionUUID = droppedCollection->uuid();
_dropPendingCollectionReaper->addDropPendingNamespace(dropTime, dpns);
auto commonOperation = makeOpAndRecordId(1);
@@ -1393,11 +1386,11 @@ TEST_F(RSRollbackTest, RollbackDropCollectionThenRenameCollectionToDroppedCollec
AutoGetCollectionForReadCommand autoCollX(_opCtx.get(), NamespaceString("test.x"));
ASSERT_TRUE(autoCollX.getCollection());
- ASSERT_EQUALS(autoCollX.getCollection()->uuid().get(), droppedCollectionUUID);
+ ASSERT_EQUALS(autoCollX.getCollection()->uuid(), droppedCollectionUUID);
AutoGetCollectionForReadCommand autoCollY(_opCtx.get(), NamespaceString("test.y"));
ASSERT_TRUE(autoCollY.getCollection());
- ASSERT_EQUALS(autoCollY.getCollection()->uuid().get(), renamedCollectionUUID);
+ ASSERT_EQUALS(autoCollY.getCollection()->uuid(), renamedCollectionUUID);
}
}
@@ -1407,12 +1400,12 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionThenCreateNewCollectionWithOldNam
CollectionOptions renamedCollOptions;
renamedCollOptions.uuid = UUID::gen();
auto renamedCollection = _createCollection(_opCtx.get(), "test.y", renamedCollOptions);
- auto renamedCollectionUUID = renamedCollection->uuid().get();
+ auto renamedCollectionUUID = renamedCollection->uuid();
CollectionOptions createdCollOptions;
createdCollOptions.uuid = UUID::gen();
auto createdCollection = _createCollection(_opCtx.get(), "test.x", createdCollOptions);
- auto createdCollectionUUID = createdCollection->uuid().get();
+ auto createdCollectionUUID = createdCollection->uuid();
auto commonOperation = makeOpAndRecordId(1);
@@ -1457,7 +1450,7 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionThenCreateNewCollectionWithOldNam
AutoGetCollectionForReadCommand renamedColl(_opCtx.get(), NamespaceString("test.x"));
ASSERT_TRUE(renamedColl.getCollection());
- ASSERT_EQUALS(renamedColl.getCollection()->uuid().get(), renamedCollectionUUID);
+ ASSERT_EQUALS(renamedColl.getCollection()->uuid(), renamedCollectionUUID);
AutoGetCollectionForReadCommand createdColl(_opCtx.get(), NamespaceString("test.y"));
ASSERT_FALSE(createdColl.getCollection());
@@ -1474,7 +1467,7 @@ TEST_F(RSRollbackTest, RollbackCollModCommandFailsIfRBIDChangesWhileSyncingColle
auto collModOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
<< "ui"
- << coll->uuid().get()
+ << coll->uuid()
<< "ns"
<< "test.t"
<< "o"
@@ -1585,7 +1578,7 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
_opCtx.get(), InsertStatement(BSON("_id" << 4)), nullOpDebug, false));
wuow.commit();
}
- UUID uuid = coll->uuid().get();
+ UUID uuid = coll->uuid();
const auto commonOperation = makeOpAndRecordId(1);
const auto applyOpsOperation =
std::make_pair(makeApplyOpsOplogEntry(Timestamp(Seconds(2), 0),
@@ -1743,7 +1736,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
<< "ui"
- << coll->uuid().get()
+ << coll->uuid()
<< "ns"
<< "test.t"
<< "o"
@@ -1780,7 +1773,7 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
<< "validationLevel"
<< "strict");
auto collectionModificationOperation =
- makeCommandOp(Timestamp(Seconds(2), 0), coll->uuid().get(), "test.t", collModCmd, 2);
+ makeCommandOp(Timestamp(Seconds(2), 0), coll->uuid(), "test.t", collModCmd, 2);
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -1944,7 +1937,7 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
<< "validationLevel"
<< "strict");
auto collectionModificationOperation =
- makeCommandOp(Timestamp(Seconds(2), 0), coll->uuid().get(), "test.t", collModCmd, 2);
+ makeCommandOp(Timestamp(Seconds(2), 0), coll->uuid(), "test.t", collModCmd, 2);
class RollbackSourceLocal : public RollbackSourceMock {
@@ -2418,7 +2411,7 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
auto txnTable = _createCollection(_opCtx.get(), "config.transactions", options);
auto commonOperation = makeOpAndRecordId(10);
- UUID uuid = *coll->uuid();
+ UUID uuid = coll->uuid();
auto lsid = makeLogicalSessionIdForTest();
auto commitTxnEntry =
BSON("ts" << Timestamp(Seconds(10), 12) << "t" << 10LL << "op"
@@ -2536,7 +2529,7 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
} rollbackSource(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock({commonOperation, operationBeforeCommonPoint})),
- *txnTable->uuid());
+ txnTable->uuid());
ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({commitTxnOperation,
@@ -2562,7 +2555,7 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
auto txnTable = _createCollection(_opCtx.get(), "config.transactions", options);
auto commonOperation = makeOpAndRecordId(10);
- UUID uuid = *coll->uuid();
+ UUID uuid = coll->uuid();
auto lsid = makeLogicalSessionIdForTest();
auto commitTxnEntry =
BSON("ts" << Timestamp(Seconds(10), 12) << "t" << 10LL << "op"
@@ -2633,7 +2626,7 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
private:
UUID _txnTableUuid;
} rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({commonOperation})),
- *txnTable->uuid());
+ txnTable->uuid());
auto status = syncRollback(
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 0c11fb1432b..90f18f7eb49 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -2767,7 +2767,7 @@ TEST_F(SyncTailTxnTableTest, RetryableWriteThenMultiStatementTxnWriteOnSameSessi
<< "ns"
<< nss().ns()
<< "ui"
- << *uuid
+ << uuid
<< "o"
<< BSON("_id" << 2)))
<< "partialTxn"
@@ -2826,7 +2826,7 @@ TEST_F(SyncTailTxnTableTest, MultiStatementTxnWriteThenRetryableWriteOnSameSessi
<< "ns"
<< nss().ns()
<< "ui"
- << *uuid
+ << uuid
<< "o"
<< BSON("_id" << 2)))
<< "partialTxn"
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 01d36506bcf..481b0d2a707 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -605,7 +605,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
<< "Cannot create collection "
<< nss.ns()
<< " because we already have an identically named collection with UUID "
- << (collection->uuid() ? collection->uuid()->toString() : "(none)")
+ << collection->uuid()
<< ", which differs from the donor's UUID "
<< (donorUUID ? donorUUID->toString() : "(none)")
<< ". Manually drop the collection on this shard if it contains data from "
@@ -674,7 +674,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
// before the index is added to the index catalog for correct rollback operation.
// See SERVER-35780 and SERVER-35070.
serviceContext->getOpObserver()->onCreateIndex(
- opCtx, collection->ns(), *(collection->uuid()), spec, true /* fromMigrate */);
+ opCtx, collection->ns(), collection->uuid(), spec, true /* fromMigrate */);
// Since the collection is empty, we can add and commit the index catalog entry
// within a single WUOW.
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index ccd90364e31..e292320ba53 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -160,9 +160,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
autoColl.getCollection());
boost::optional<UUID> collectionUUID;
- if (autoColl.getCollection()->uuid()) {
- collectionUUID = autoColl.getCollection()->uuid().value();
- }
+ collectionUUID = autoColl.getCollection()->uuid();
auto optMetadata =
CollectionShardingState::get(opCtx, getNss())->getCurrentMetadataIfKnown();
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index d6f6ebca0d2..a09cc9c64dc 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -182,7 +182,7 @@ Status verifySystemIndexes(OperationContext* opCtx) {
if (indexes.empty()) {
try {
systemUsersFuture = generateSystemIndexForExistingCollection(
- opCtx, collection->uuid().get(), systemUsers, v3SystemUsersIndexSpec);
+ opCtx, collection->uuid(), systemUsers, v3SystemUsersIndexSpec);
} catch (...) {
return exceptionToStatus();
}
@@ -200,7 +200,7 @@ Status verifySystemIndexes(OperationContext* opCtx) {
if (indexes.empty()) {
try {
systemRolesFuture = generateSystemIndexForExistingCollection(
- opCtx, collection->uuid().get(), systemRoles, v3SystemRolesIndexSpec);
+ opCtx, collection->uuid(), systemRoles, v3SystemRolesIndexSpec);
} catch (...) {
return exceptionToStatus();
}
@@ -252,7 +252,7 @@ void createSystemIndexes(OperationContext* opCtx, Collection* collection) {
}
if (!indexSpec.isEmpty()) {
opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- opCtx, ns, *(collection->uuid()), indexSpec, false /* fromMigrate */);
+ opCtx, ns, collection->uuid(), indexSpec, false /* fromMigrate */);
// Note that the opObserver is called prior to creating the index. This ensures the index
// write gets the same storage timestamp as the oplog entry.
fassert(40456,
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 76e74a1d6e0..4a0d996cd55 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -279,7 +279,7 @@ public:
coll,
[&](const BSONObj& indexSpec) {
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- _opCtx, coll->ns(), *(coll->uuid()), indexSpec, false);
+ _opCtx, coll->ns(), coll->uuid(), indexSpec, false);
},
MultiIndexBlock::kNoopOnCommitFn));
// The timestamping repsponsibility is placed on the caller rather than the
@@ -713,7 +713,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << idx))
<< BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
@@ -778,7 +778,7 @@ public:
oplogEntryBuilder << "v" << 2 << "op"
<< "i"
- << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get();
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid();
auto oplogEntry = oplogEntryBuilder.done();
ASSERT_OK(repl::SyncTail::syncApply(
@@ -834,7 +834,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << num))})
.getStatus());
@@ -899,7 +899,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o2"
<< BSON("_id" << 0)
<< "o"
@@ -944,7 +944,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op"
@@ -952,7 +952,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 0))}));
@@ -998,7 +998,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 0)),
BSON("op"
@@ -1006,7 +1006,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 1))});
ASSERT_OK(swResult);
@@ -1057,7 +1057,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("op"
@@ -1065,7 +1065,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << autoColl.getCollection()->uuid().get()
+ << autoColl.getCollection()->uuid()
<< "o"
<< BSON("_id" << 0))});
ASSERT_OK(swResult);
@@ -1212,34 +1212,33 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss2).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult =
- doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss1.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
- << doc1),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << uuid2
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- BSON("ts" << insert2Ts << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss2.ns()
- << "ui"
- << uuid2
- << "o"
- << doc2),
- });
+ auto swResult = doNonAtomicApplyOps(dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "i"
+ << "ns"
+ << nss1.ns()
+ << "ui"
+ << autoColl.getCollection()->uuid()
+ << "o"
+ << doc1),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui"
+ << uuid2
+ << "ns"
+ << nss2.getCommandNS().ns()
+ << "o"
+ << BSON("create" << nss2.coll())),
+ BSON("ts" << insert2Ts << "t" << 1LL << "op"
+ << "i"
+ << "ns"
+ << nss2.ns()
+ << "ui"
+ << uuid2
+ << "o"
+ << doc2),
+ });
ASSERT_OK(swResult);
}
@@ -1329,7 +1328,7 @@ public:
UUID uuid = UUID::gen();
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- uuid = autoColl.getCollection()->uuid().get();
+ uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
auto indexSpec =
@@ -1414,7 +1413,7 @@ public:
UUID uuid = UUID::gen();
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- uuid = autoColl.getCollection()->uuid().get();
+ uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
auto indexSpec =
@@ -2019,7 +2018,7 @@ public:
if (SimulatePrimary) {
// The timestamping responsibility for each index is placed on the caller.
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- _opCtx, nss, *(autoColl.getCollection()->uuid()), indexSpec, false);
+ _opCtx, nss, autoColl.getCollection()->uuid(), indexSpec, false);
} else {
ASSERT_OK(_opCtx->recoveryUnit()->setTimestamp(
_clock->getClusterTime().asTimestamp()));
@@ -2200,7 +2199,7 @@ public:
if (SimulatePrimary) {
// The timestamping responsibility for each index is placed on the caller.
_opCtx->getServiceContext()->getOpObserver()->onCreateIndex(
- _opCtx, nss, *(autoColl.getCollection()->uuid()), indexSpec, false);
+ _opCtx, nss, autoColl.getCollection()->uuid(), indexSpec, false);
} else {
ASSERT_OK(_opCtx->recoveryUnit()->setTimestamp(
_clock->getClusterTime().asTimestamp()));
@@ -2495,7 +2494,7 @@ public:
UUID uuid = UUID::gen();
{
AutoGetCollectionForRead autoColl(_opCtx, ns);
- uuid = autoColl.getCollection()->uuid().get();
+ uuid = autoColl.getCollection()->uuid();
ASSERT_EQ(itCount(autoColl.getCollection()), 0);
}
@@ -2609,7 +2608,7 @@ public:
// Create the collection and insert a document.
reset(nss);
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- collUUID = *(autoColl.getCollection()->uuid());
+ collUUID = autoColl.getCollection()->uuid();
WriteUnitOfWork wuow(_opCtx);
insertDocument(autoColl.getCollection(),
InsertStatement(doc, setupStart.asTimestamp(), presentTerm));
@@ -2807,7 +2806,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto coll = autoColl.getCollection();
ASSERT(coll);
- ui = coll->uuid().get();
+ ui = coll->uuid();
}
presentTs = _clock->getClusterTime().asTimestamp();
@@ -2977,18 +2976,17 @@ public:
assertFilteredDocumentAtTimestamp(coll, query2, nullTs, doc2);
// Implicit commit oplog entry should exist at commitEntryTs.
- const auto commitFilter =
- BSON("ts" << commitEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc2))
- << "count"
- << 2));
+ const auto commitFilter = BSON(
+ "ts" << commitEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns"
+ << nss.ns()
+ << "ui"
+ << coll->uuid()
+ << "o"
+ << doc2))
+ << "count"
+ << 2));
assertOplogDocumentExistsAtTimestamp(commitFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, firstOplogEntryTs, false);
@@ -3011,7 +3009,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << coll->uuid().get()
+ << coll->uuid()
<< "o"
<< doc))
<< "partialTxn"
@@ -3190,7 +3188,7 @@ public:
<< "ns"
<< nss.ns()
<< "ui"
- << coll->uuid().get()
+ << coll->uuid()
<< "o"
<< doc))
<< "partialTxn"
@@ -3202,20 +3200,19 @@ public:
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, commitEntryTs, true);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, nullTs, true);
// The prepare oplog entry should exist at prepareEntryTs and onwards.
- const auto prepareOplogEntryFilter =
- BSON("ts" << prepareEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc2))
- << "prepare"
- << true
- << "count"
- << 2));
+ const auto prepareOplogEntryFilter = BSON(
+ "ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns"
+ << nss.ns()
+ << "ui"
+ << coll->uuid()
+ << "o"
+ << doc2))
+ << "prepare"
+ << true
+ << "count"
+ << 2));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, firstOplogEntryTs, false);
@@ -3324,7 +3321,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto coll = autoColl.getCollection();
ASSERT(coll);
- ui = coll->uuid().get();
+ ui = coll->uuid();
}
// The prepare oplog entry should exist at firstOplogEntryTs and onwards.