diff options
Diffstat (limited to 'src')
77 files changed, 481 insertions, 323 deletions
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index 83ef0383da8..b0dafbe1d5a 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -344,14 +344,14 @@ public: virtual bool requiresIdIndex() const = 0; - virtual Snapshotted<BSONObj> docFor(OperationContext* opCtx, RecordId loc) const = 0; + virtual Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const = 0; /** * @param out - contents set to the right docs if exists, or nothing. * @return true iff loc exists */ virtual bool findDoc(OperationContext* opCtx, - RecordId loc, + const RecordId& loc, Snapshotted<BSONObj>* out) const = 0; virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx, @@ -363,7 +363,7 @@ public: */ virtual void deleteDocument(OperationContext* opCtx, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false, @@ -388,7 +388,7 @@ public: virtual void deleteDocument(OperationContext* opCtx, Snapshotted<BSONObj> doc, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false, @@ -449,7 +449,7 @@ public: * @return the post update location of the doc (may or may not be the same as oldLocation) */ virtual RecordId updateDocument(OperationContext* opCtx, - RecordId oldLocation, + const RecordId& oldLocation, const Snapshotted<BSONObj>& oldDoc, const BSONObj& newDoc, bool indexesAffected, @@ -467,7 +467,7 @@ public: */ virtual StatusWith<RecordData> updateDocumentWithDamages( OperationContext* opCtx, - RecordId loc, + const RecordId& loc, const Snapshotted<RecordData>& oldRec, const char* damageSource, const mutablebson::DamageVector& damages, @@ -495,7 +495,7 @@ public: * on the collection. */ virtual void cappedTruncateAfter(OperationContext* opCtx, - RecordId end, + const RecordId& end, bool inclusive) const = 0; /** @@ -816,7 +816,7 @@ public: const CollectionPtr& yieldableCollection, PlanYieldPolicy::YieldPolicy yieldPolicy, ScanDirection scanDirection, - boost::optional<RecordId> resumeAfterRecordId = boost::none) const = 0; + const boost::optional<RecordId>& resumeAfterRecordId = boost::none) const = 0; virtual void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) = 0; diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 67864f91184..3a5401e50b7 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -437,7 +437,7 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx, const CollectionOptions& options, std::unique_ptr<RecordStore> recordStore) : _ns(nss), - _catalogId(catalogId), + _catalogId(std::move(catalogId)), _uuid(options.uuid.get()), _shared(std::make_shared<SharedState>(this, std::move(recordStore), options)), _indexCatalog(std::make_unique<IndexCatalogImpl>()) {} @@ -447,7 +447,7 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx, RecordId catalogId, std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata, std::unique_ptr<RecordStore> recordStore) - : CollectionImpl(opCtx, nss, catalogId, metadata->options, std::move(recordStore)) { + : CollectionImpl(opCtx, nss, std::move(catalogId), metadata->options, std::move(recordStore)) { _metadata = std::move(metadata); } @@ -467,7 +467,8 @@ std::shared_ptr<Collection> CollectionImpl::FactoryImpl::make( RecordId catalogId, const CollectionOptions& options, std::unique_ptr<RecordStore> rs) const { - return std::make_shared<CollectionImpl>(opCtx, nss, catalogId, options, std::move(rs)); + return std::make_shared<CollectionImpl>( + opCtx, nss, std::move(catalogId), options, std::move(rs)); } std::shared_ptr<Collection> CollectionImpl::FactoryImpl::make( @@ -477,7 +478,7 @@ std::shared_ptr<Collection> CollectionImpl::FactoryImpl::make( std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata, std::unique_ptr<RecordStore> rs) const { return std::make_shared<CollectionImpl>( - opCtx, nss, catalogId, std::move(metadata), std::move(rs)); + opCtx, nss, std::move(catalogId), std::move(metadata), std::move(rs)); } std::shared_ptr<Collection> CollectionImpl::clone() const { @@ -601,7 +602,7 @@ std::unique_ptr<SeekableRecordCursor> CollectionImpl::getCursor(OperationContext bool CollectionImpl::findDoc(OperationContext* opCtx, - RecordId loc, + const RecordId& loc, Snapshotted<BSONObj>* out) const { RecordData rd; if (!_shared->_recordStore->findRecord(opCtx, loc, &rd)) @@ -1013,12 +1014,13 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx, if (MONGO_unlikely(corruptDocumentOnInsert.shouldFail())) { // Insert a truncated record that is half the expected size of the source document. - records.emplace_back(Record{recordId, RecordData(doc.objdata(), doc.objsize() / 2)}); + records.emplace_back( + Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize() / 2)}); timestamps.emplace_back(it->oplogSlot.getTimestamp()); continue; } - records.emplace_back(Record{recordId, RecordData(doc.objdata(), doc.objsize())}); + records.emplace_back(Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize())}); timestamps.emplace_back(it->oplogSlot.getTimestamp()); } @@ -1036,8 +1038,9 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx, invariant(loc < RecordId::maxLong()); } - BsonRecord bsonRecord = {loc, Timestamp(it->oplogSlot.getTimestamp()), &(it->doc)}; - bsonRecords.push_back(bsonRecord); + BsonRecord bsonRecord = { + std::move(loc), Timestamp(it->oplogSlot.getTimestamp()), &(it->doc)}; + bsonRecords.emplace_back(std::move(bsonRecord)); } int64_t keysInserted = 0; @@ -1207,7 +1210,7 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx, &unusedKeysDeleted); // We're about to delete the record our cursor is positioned on, so advance the cursor. - RecordId toDelete = record->id; + RecordId toDelete = std::move(record->id); record = cursor->next(); _shared->_recordStore->deleteRecord(opCtx, toDelete); @@ -1218,15 +1221,16 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx, if (!record) { _shared->_cappedFirstRecord = RecordId(); } else { - _shared->_cappedFirstRecord = record->id; + _shared->_cappedFirstRecord = std::move(record->id); } } else { // Update the next record to be deleted. The next record must exist as we're using the same // snapshot the insert was performed on and we can't delete newly inserted records. invariant(record); - opCtx->recoveryUnit()->onCommit([this, recordId = record->id](boost::optional<Timestamp>) { - _shared->_cappedFirstRecord = recordId; - }); + opCtx->recoveryUnit()->onCommit( + [this, recordId = std::move(record->id)](boost::optional<Timestamp>) { + _shared->_cappedFirstRecord = std::move(recordId); + }); } wuow.commit(); @@ -1270,7 +1274,7 @@ Status CollectionImpl::SharedState::aboutToDeleteCapped(OperationContext* opCtx, void CollectionImpl::deleteDocument(OperationContext* opCtx, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn, @@ -1284,7 +1288,7 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx, void CollectionImpl::deleteDocument(OperationContext* opCtx, Snapshotted<BSONObj> doc, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn, @@ -1369,7 +1373,7 @@ bool compareSafeContentElem(const BSONObj& oldDoc, const BSONObj& newDoc) { } RecordId CollectionImpl::updateDocument(OperationContext* opCtx, - RecordId oldLocation, + const RecordId& oldLocation, const Snapshotted<BSONObj>& oldDoc, const BSONObj& newDoc, bool indexesAffected, @@ -1513,7 +1517,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx, getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, onUpdateArgs); - return {oldLocation}; + return oldLocation; } bool CollectionImpl::updateWithDamagesSupported() const { @@ -1525,7 +1529,7 @@ bool CollectionImpl::updateWithDamagesSupported() const { StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages( OperationContext* opCtx, - RecordId loc, + const RecordId& loc, const Snapshotted<RecordData>& oldRec, const char* damageSource, const mutablebson::DamageVector& damages, @@ -1833,7 +1837,7 @@ Status CollectionImpl::truncate(OperationContext* opCtx) { } void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx, - RecordId end, + const RecordId& end, bool inclusive) const { dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X)); invariant(isCapped()); @@ -2037,7 +2041,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> CollectionImpl::makePlanExe const CollectionPtr& yieldableCollection, PlanYieldPolicy::YieldPolicy yieldPolicy, ScanDirection scanDirection, - boost::optional<RecordId> resumeAfterRecordId) const { + const boost::optional<RecordId>& resumeAfterRecordId) const { auto isForward = scanDirection == ScanDirection::kForward; auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD; return InternalPlanner::collectionScan( diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h index 3477cfa7f91..67953a53236 100644 --- a/src/mongo/db/catalog/collection_impl.h +++ b/src/mongo/db/catalog/collection_impl.h @@ -123,7 +123,7 @@ public: bool requiresIdIndex() const final; - Snapshotted<BSONObj> docFor(OperationContext* opCtx, RecordId loc) const final { + Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const final { return Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(), _shared->_recordStore->dataFor(opCtx, loc).releaseToBson()); } @@ -132,7 +132,9 @@ public: * @param out - contents set to the right docs if exists, or nothing. * @return true iff loc exists */ - bool findDoc(OperationContext* opCtx, RecordId loc, Snapshotted<BSONObj>* out) const final; + bool findDoc(OperationContext* opCtx, + const RecordId& loc, + Snapshotted<BSONObj>* out) const final; std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx, bool forward = true) const final; @@ -144,7 +146,7 @@ public: void deleteDocument( OperationContext* opCtx, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false, @@ -172,7 +174,7 @@ public: OperationContext* opCtx, Snapshotted<BSONObj> doc, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false, @@ -232,7 +234,7 @@ public: * @return the post update location of the doc (may or may not be the same as oldLocation) */ RecordId updateDocument(OperationContext* opCtx, - RecordId oldLocation, + const RecordId& oldLocation, const Snapshotted<BSONObj>& oldDoc, const BSONObj& newDoc, bool indexesAffected, @@ -249,7 +251,7 @@ public: * @return the contents of the updated record. */ StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx, - RecordId loc, + const RecordId& loc, const Snapshotted<RecordData>& oldRec, const char* damageSource, const mutablebson::DamageVector& damages, @@ -276,7 +278,9 @@ public: * The caller should hold a collection X lock and ensure there are no index builds in progress * on the collection. */ - void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) const final; + void cappedTruncateAfter(OperationContext* opCtx, + const RecordId& end, + bool inclusive) const final; /** * Returns a non-ok Status if validator is not legal for this collection. @@ -424,7 +428,7 @@ public: const CollectionPtr& yieldableCollection, PlanYieldPolicy::YieldPolicy yieldPolicy, ScanDirection scanDirection, - boost::optional<RecordId> resumeAfterRecordId) const final; + const boost::optional<RecordId>& resumeAfterRecordId) const final; void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) final; diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h index 50b1ec4aa5a..ff233111e34 100644 --- a/src/mongo/db/catalog/collection_mock.h +++ b/src/mongo/db/catalog/collection_mock.h @@ -50,7 +50,7 @@ public: std::unique_ptr<IndexCatalog> indexCatalog) : _uuid(uuid), _nss(nss), _indexCatalog(std::move(indexCatalog)) {} CollectionMock(const NamespaceString& nss, RecordId catalogId) - : _nss(nss), _catalogId(catalogId) {} + : _nss(nss), _catalogId(std::move(catalogId)) {} ~CollectionMock() = default; std::shared_ptr<Collection> clone() const { @@ -71,7 +71,7 @@ public: } void setCatalogId(RecordId catalogId) { - _catalogId = catalogId; + _catalogId = std::move(catalogId); } const NamespaceString& ns() const { @@ -110,11 +110,11 @@ public: MONGO_UNREACHABLE; } - Snapshotted<BSONObj> docFor(OperationContext* opCtx, RecordId loc) const { + Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const { MONGO_UNREACHABLE; } - bool findDoc(OperationContext* opCtx, RecordId loc, Snapshotted<BSONObj>* out) const { + bool findDoc(OperationContext* opCtx, const RecordId& loc, Snapshotted<BSONObj>* out) const { MONGO_UNREACHABLE; } @@ -124,7 +124,7 @@ public: void deleteDocument(OperationContext* opCtx, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn, @@ -137,7 +137,7 @@ public: OperationContext* opCtx, Snapshotted<BSONObj> doc, StmtId stmtId, - RecordId loc, + const RecordId& loc, OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false, @@ -174,7 +174,7 @@ public: } RecordId updateDocument(OperationContext* opCtx, - RecordId oldLocation, + const RecordId& oldLocation, const Snapshotted<BSONObj>& oldDoc, const BSONObj& newDoc, bool indexesAffected, @@ -188,7 +188,7 @@ public: } StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx, - RecordId loc, + const RecordId& loc, const Snapshotted<RecordData>& oldRec, const char* damageSource, const mutablebson::DamageVector& damages, @@ -200,7 +200,7 @@ public: MONGO_UNREACHABLE; } - void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) const { + void cappedTruncateAfter(OperationContext* opCtx, const RecordId& end, bool inclusive) const { MONGO_UNREACHABLE; } @@ -375,7 +375,7 @@ public: const CollectionPtr& yieldableCollection, PlanYieldPolicy::YieldPolicy yieldPolicy, ScanDirection scanDirection, - boost::optional<RecordId> resumeAfterRecordId) const { + const boost::optional<RecordId>& resumeAfterRecordId) const { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 98c07a39072..2b98aac4e33 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -883,7 +883,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, std::pair<RecordId, std::unique_ptr<RecordStore>> catalogIdRecordStorePair = uassertStatusOK(storageEngine->getCatalog()->createCollection( opCtx, nss, optionsWithUUID, true /*allocateDefaultSpace*/)); - auto catalogId = catalogIdRecordStorePair.first; + auto& catalogId = catalogIdRecordStorePair.first; std::shared_ptr<Collection> ownedCollection = Collection::Factory::get(opCtx)->make( opCtx, nss, catalogId, optionsWithUUID, std::move(catalogIdRecordStorePair.second)); auto collection = ownedCollection.get(); diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp index 553f4dcbf0e..d872fa28daa 100644 --- a/src/mongo/db/catalog/index_builds_manager.cpp +++ b/src/mongo/db/catalog/index_builds_manager.cpp @@ -124,10 +124,11 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx, return Status::OK(); } -Status IndexBuildsManager::startBuildingIndex(OperationContext* opCtx, - const CollectionPtr& collection, - const UUID& buildUUID, - boost::optional<RecordId> resumeAfterRecordId) { +Status IndexBuildsManager::startBuildingIndex( + OperationContext* opCtx, + const CollectionPtr& collection, + const UUID& buildUUID, + const boost::optional<RecordId>& resumeAfterRecordId) { auto builder = invariant(_getBuilder(buildUUID)); return builder->insertAllDocumentsInCollection(opCtx, collection, resumeAfterRecordId); @@ -171,7 +172,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd } WriteUnitOfWork wunit(opCtx); for (int i = 0; record && i < internalInsertMaxBatchSize.load(); i++) { - RecordId id = record->id; + auto& id = record->id; RecordData& data = record->data; // We retain decimal data when repairing database even if decimal is disabled. auto validStatus = validateBSON(data.data(), data.size()); diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h index 159f691894c..8504aab98bd 100644 --- a/src/mongo/db/catalog/index_builds_manager.h +++ b/src/mongo/db/catalog/index_builds_manager.h @@ -101,7 +101,7 @@ public: Status startBuildingIndex(OperationContext* opCtx, const CollectionPtr& collection, const UUID& buildUUID, - boost::optional<RecordId> resumeAfterRecordId = boost::none); + const boost::optional<RecordId>& resumeAfterRecordId = boost::none); Status resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx, const CollectionPtr& collection, diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp index db58983bec8..3b64708a6d7 100644 --- a/src/mongo/db/catalog/index_consistency.cpp +++ b/src/mongo/db/catalog/index_consistency.cpp @@ -91,7 +91,7 @@ IndexEntryInfo::IndexEntryInfo(const IndexInfo& indexInfo, : indexName(indexInfo.indexName), keyPattern(indexInfo.keyPattern), ord(indexInfo.ord), - recordId(entryRecordId), + recordId(std::move(entryRecordId)), idKey(entryIdKey.getOwned()), keyString(entryKeyString) {} @@ -305,7 +305,7 @@ void IndexConsistency::addDocumentMultikeyPaths(IndexInfo* indexInfo, void IndexConsistency::addDocKey(OperationContext* opCtx, const KeyString::Value& ks, IndexInfo* indexInfo, - RecordId recordId) { + const RecordId& recordId) { auto rawHash = ks.hash(indexInfo->indexNameHash); auto hashLower = rawHash % kNumHashBuckets; auto hashUpper = (rawHash / kNumHashBuckets) % kNumHashBuckets; @@ -360,7 +360,7 @@ void IndexConsistency::addDocKey(OperationContext* opCtx, void IndexConsistency::addIndexKey(OperationContext* opCtx, const KeyString::Value& ks, IndexInfo* indexInfo, - RecordId recordId, + const RecordId& recordId, ValidateResults* results) { auto rawHash = ks.hash(indexInfo->indexNameHash); auto hashLower = rawHash % kNumHashBuckets; @@ -512,7 +512,7 @@ bool IndexConsistency::limitMemoryUsageForSecondPhase(ValidateResults* result) { BSONObj IndexConsistency::_generateInfo(const std::string& indexName, const BSONObj& keyPattern, - RecordId recordId, + const RecordId& recordId, const BSONObj& indexKey, const BSONObj& idKey) { diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h index dab1d2d8d97..ade97d20918 100644 --- a/src/mongo/db/catalog/index_consistency.h +++ b/src/mongo/db/catalog/index_consistency.h @@ -108,7 +108,7 @@ public: void addDocKey(OperationContext* opCtx, const KeyString::Value& ks, IndexInfo* indexInfo, - RecordId recordId); + const RecordId& recordId); /** * During the first phase of validation, given the index entry's KeyString, decrement the @@ -119,7 +119,7 @@ public: void addIndexKey(OperationContext* opCtx, const KeyString::Value& ks, IndexInfo* indexInfo, - RecordId recordId, + const RecordId& recordId, ValidateResults* results); /** @@ -232,7 +232,7 @@ private: */ BSONObj _generateInfo(const std::string& indexName, const BSONObj& keyPattern, - RecordId recordId, + const RecordId& recordId, const BSONObj& indexKey, const BSONObj& idKey); diff --git a/src/mongo/db/catalog/index_repair.cpp b/src/mongo/db/catalog/index_repair.cpp index 4e863cec74c..a62d8d81604 100644 --- a/src/mongo/db/catalog/index_repair.cpp +++ b/src/mongo/db/catalog/index_repair.cpp @@ -41,7 +41,7 @@ namespace index_repair { StatusWith<int> moveRecordToLostAndFound(OperationContext* opCtx, const NamespaceString& nss, const NamespaceString& lostAndFoundNss, - RecordId dupRecord) { + const RecordId& dupRecord) { AutoGetCollection autoColl(opCtx, lostAndFoundNss, MODE_IX); auto catalog = CollectionCatalog::get(opCtx); auto originalCollection = catalog->lookupCollectionByNamespace(opCtx, nss); diff --git a/src/mongo/db/catalog/index_repair.h b/src/mongo/db/catalog/index_repair.h index a4bee38c011..493f3943b65 100644 --- a/src/mongo/db/catalog/index_repair.h +++ b/src/mongo/db/catalog/index_repair.h @@ -44,7 +44,7 @@ namespace index_repair { StatusWith<int> moveRecordToLostAndFound(OperationContext* opCtx, const NamespaceString& ns, const NamespaceString& lostAndFoundNss, - RecordId dupRecord); + const RecordId& dupRecord); /** * If repair mode is enabled, tries the inserting missingIndexEntry into indexes. If the diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index f04cf2712d6..3c693d92d3f 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -398,7 +398,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init( Status MultiIndexBlock::insertAllDocumentsInCollection( OperationContext* opCtx, const CollectionPtr& collection, - boost::optional<RecordId> resumeAfterRecordId) { + const boost::optional<RecordId>& resumeAfterRecordId) { invariant(!_buildIsCleanedUp); invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); @@ -600,7 +600,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection( void MultiIndexBlock::_doCollectionScan(OperationContext* opCtx, const CollectionPtr& collection, - boost::optional<RecordId> resumeAfterRecordId, + const boost::optional<RecordId>& resumeAfterRecordId, ProgressMeterHolder* progress) { PlanYieldPolicy::YieldPolicy yieldPolicy; if (isBackgroundBuilding()) { diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h index 0ca275e72bf..711f5351562 100644 --- a/src/mongo/db/catalog/multi_index_block.h +++ b/src/mongo/db/catalog/multi_index_block.h @@ -153,7 +153,7 @@ public: Status insertAllDocumentsInCollection( OperationContext* opCtx, const CollectionPtr& collection, - boost::optional<RecordId> resumeAfterRecordId = boost::none); + const boost::optional<RecordId>& resumeAfterRecordId = boost::none); /** * Call this after init() for each document in the collection. @@ -344,7 +344,7 @@ private: */ void _doCollectionScan(OperationContext* opCtx, const CollectionPtr& collection, - boost::optional<RecordId> resumeAfterRecordId, + const boost::optional<RecordId>& resumeAfterRecordId, ProgressMeterHolder* progress); // Is set during init() and ensures subsequent function calls act on the same Collection. diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp index 3b6995cad82..7d0bd1e0463 100644 --- a/src/mongo/db/catalog/validate_state.cpp +++ b/src/mongo/db/catalog/validate_state.cpp @@ -278,8 +278,8 @@ void ValidateState::initializeCursors(OperationContext* opCtx) { // use cursor->next() to get subsequent Records. However, if the Record Store is empty, // there is no first record. In this case, we set the first Record Id to an invalid RecordId // (RecordId()), which will halt iteration at the initialization step. - const boost::optional<Record> record = _traverseRecordStoreCursor->next(opCtx); - _firstRecordId = record ? record->id : RecordId(); + auto record = _traverseRecordStoreCursor->next(opCtx); + _firstRecordId = record ? std::move(record->id) : RecordId(); } void ValidateState::_relockDatabaseAndCollection(OperationContext* opCtx) { diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp index 3fecc0c4d47..edea7336f3e 100644 --- a/src/mongo/db/exec/collection_scan.cpp +++ b/src/mongo/db/exec/collection_scan.cpp @@ -196,7 +196,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { // // Note that we want to return the record *after* this one since we have already // returned this one prior to the resume. - auto recordIdToSeek = *_params.resumeAfterRecordId; + auto& recordIdToSeek = *_params.resumeAfterRecordId; if (!_cursor->seekExact(recordIdToSeek)) { uasserted(ErrorCodes::KeyNotFound, str::stream() @@ -328,7 +328,7 @@ bool pastEndOfRange(const CollectionScanParams& params, const WorkingSetMember& return false; } - auto endRecord = params.maxRecord->recordId(); + const auto& endRecord = params.maxRecord->recordId(); return member.recordId > endRecord || (member.recordId == endRecord && !shouldIncludeEndRecord(params)); } else { @@ -336,7 +336,7 @@ bool pastEndOfRange(const CollectionScanParams& params, const WorkingSetMember& if (!params.minRecord) { return false; } - auto endRecord = params.minRecord->recordId(); + const auto& endRecord = params.minRecord->recordId(); return member.recordId < endRecord || (member.recordId == endRecord && !shouldIncludeEndRecord(params)); @@ -350,7 +350,7 @@ bool beforeStartOfRange(const CollectionScanParams& params, const WorkingSetMemb return false; } - auto startRecord = params.minRecord->recordId(); + const auto& startRecord = params.minRecord->recordId(); return member.recordId < startRecord || (member.recordId == startRecord && !shouldIncludeStartRecord(params)); } else { @@ -358,7 +358,7 @@ bool beforeStartOfRange(const CollectionScanParams& params, const WorkingSetMemb if (!params.maxRecord) { return false; } - auto startRecord = params.maxRecord->recordId(); + const auto& startRecord = params.maxRecord->recordId(); return member.recordId > startRecord || (member.recordId == startRecord && !shouldIncludeStartRecord(params)); } diff --git a/src/mongo/db/exec/delete_stage.cpp b/src/mongo/db/exec/delete_stage.cpp index 53f9119e8d9..9a507736bda 100644 --- a/src/mongo/db/exec/delete_stage.cpp +++ b/src/mongo/db/exec/delete_stage.cpp @@ -153,7 +153,10 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { ScopeGuard memberFreer([&] { _ws->free(id); }); invariant(member->hasRecordId()); - RecordId recordId = member->recordId; + // It's safe to have a reference instead of a copy here due to the member pointer only being + // invalidated if the memberFreer ScopeGuard activates. This will only be the case if the + // document is deleted successfully and thus the existing RecordId becomes invalid. + const auto& recordId = member->recordId; // Deletes can't have projections. This means that covering analysis will always add // a fetch. We should always get fetched data, and never just key data. invariant(member->hasObj()); diff --git a/src/mongo/db/exec/document_value/document_metadata_fields.h b/src/mongo/db/exec/document_value/document_metadata_fields.h index 12932c29686..562ed677330 100644 --- a/src/mongo/db/exec/document_value/document_metadata_fields.h +++ b/src/mongo/db/exec/document_value/document_metadata_fields.h @@ -302,7 +302,7 @@ public: } _holder->metaFields.set(MetaType::kRecordId); - _holder->recordId = rid; + _holder->recordId = std::move(rid); } bool hasSearchScoreDetails() const { diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp index 4bb9ae7259f..04600ac4f4a 100644 --- a/src/mongo/db/exec/idhack.cpp +++ b/src/mongo/db/exec/idhack.cpp @@ -106,7 +106,7 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) { // Create a new WSM for the result document. id = _workingSet->allocate(); WorkingSetMember* member = _workingSet->get(id); - member->recordId = recordId; + member->recordId = std::move(recordId); _workingSet->transitionToRecordIdAndIdx(id); const auto& coll = collection(); diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp index bd765a0404a..0c64eaf721b 100644 --- a/src/mongo/db/exec/index_scan.cpp +++ b/src/mongo/db/exec/index_scan.cpp @@ -244,7 +244,7 @@ PlanStage::StageState IndexScan::doWork(WorkingSetID* out) { // We found something to return, so fill out the WSM. WorkingSetID id = _workingSet->allocate(); WorkingSetMember* member = _workingSet->get(id); - member->recordId = kv->loc; + member->recordId = std::move(kv->loc); member->keyData.push_back(IndexKeyDatum( _keyPattern, kv->key, workingSetIndexId(), opCtx()->recoveryUnit()->getSnapshotId())); _workingSet->transitionToRecordIdAndIdx(id); diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp index a2524709051..dc37c738271 100644 --- a/src/mongo/db/exec/multi_iterator.cpp +++ b/src/mongo/db/exec/multi_iterator.cpp @@ -84,7 +84,7 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) { *out = _ws->allocate(); WorkingSetMember* member = _ws->get(*out); - member->recordId = record->id; + member->recordId = std::move(record->id); member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()); _ws->transitionToRecordIdAndObj(*out); return PlanStage::ADVANCED; diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp index fbbc3a9ae0d..0f6d514c139 100644 --- a/src/mongo/db/exec/sbe/stages/scan.cpp +++ b/src/mongo/db/exec/sbe/stages/scan.cpp @@ -408,7 +408,7 @@ PlanState ScanStage::getNext() { } if (_recordIdAccessor) { - _recordId = nextRecord->id; + _recordId = std::move(nextRecord->id); _recordIdAccessor->reset( false, value::TypeTags::RecordId, value::bitcastFrom<RecordId*>(&_recordId)); } @@ -833,15 +833,15 @@ void ParallelScanStage::open(bool reOpen) { while (ranges--) { auto nextRecord = randomCursor->next(); if (nextRecord) { - rids.emplace(nextRecord->id); + rids.emplace(std::move(nextRecord->id)); } } RecordId lastid{}; - for (auto id : rids) { - _state->ranges.emplace_back(Range{lastid, id}); - lastid = id; + for (auto& id : rids) { + _state->ranges.emplace_back(Range{std::move(lastid), id}); + lastid = std::move(id); } - _state->ranges.emplace_back(Range{lastid, RecordId{}}); + _state->ranges.emplace_back(Range{std::move(lastid), RecordId{}}); } } } diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp index 418ea2d8629..cb7977cfdcd 100644 --- a/src/mongo/db/fts/fts_index_format.cpp +++ b/src/mongo/db/fts/fts_index_format.cpp @@ -115,7 +115,7 @@ void FTSIndexFormat::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder, KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id) { + const boost::optional<RecordId>& id) { int extraSize = 0; vector<BSONElement> extrasBefore; vector<BSONElement> extrasAfter; diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h index f52918aeb4a..522fd577619 100644 --- a/src/mongo/db/fts/fts_index_format.h +++ b/src/mongo/db/fts/fts_index_format.h @@ -51,7 +51,7 @@ public: KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id = boost::none); + const boost::optional<RecordId>& id = boost::none); /** * Helper method to get return entry from the FTSIndex as a BSONObj diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp index d81a9f6ea30..d5af3a86a97 100644 --- a/src/mongo/db/index/2d_access_method.cpp +++ b/src/mongo/db/index/2d_access_method.cpp @@ -64,7 +64,7 @@ void TwoDAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { ExpressionKeysPrivate::get2DKeys(pooledBufferBuilder, obj, _params, diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h index e9621fcc7a2..a7ae364b994 100644 --- a/src/mongo/db/index/2d_access_method.h +++ b/src/mongo/db/index/2d_access_method.h @@ -70,7 +70,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; TwoDIndexingParams _params; }; diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp index 177f03ad34b..71c9bf86fec 100644 --- a/src/mongo/db/index/btree_access_method.cpp +++ b/src/mongo/db/index/btree_access_method.cpp @@ -79,7 +79,7 @@ void BtreeAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { const auto skipMultikey = context == GetKeysContext::kValidatingKeys && !_descriptor->getEntry()->isMultikey(opCtx, collection); _keyGenerator->getKeys(pooledBufferBuilder, diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h index 8f0e6304d20..5621c99933f 100644 --- a/src/mongo/db/index/btree_access_method.h +++ b/src/mongo/db/index/btree_access_method.h @@ -60,7 +60,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; // Our keys differ for V0 and V1. std::unique_ptr<BtreeKeyGenerator> _keyGenerator; diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp index f386a3aba87..b845aa2af1a 100644 --- a/src/mongo/db/index/btree_key_generator.cpp +++ b/src/mongo/db/index/btree_key_generator.cpp @@ -173,7 +173,7 @@ void BtreeKeyGenerator::_getKeysArrEltFixed(const std::vector<const char*>& fiel const std::vector<PositionalPathInfo>& positionalInfo, MultikeyPaths* multikeyPaths, const CollatorInterface* collator, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { // fieldNamesTemp and fixedTemp are passed in by the caller to be used as temporary data // structures as we need them to be mutable in the recursion. When they are stored outside we // can reuse their memory. @@ -210,7 +210,7 @@ void BtreeKeyGenerator::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder KeyStringSet* keys, MultikeyPaths* multikeyPaths, const CollatorInterface* collator, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { if (_isIdIndex) { // we special case for speed BSONElement e = obj["_id"]; @@ -311,7 +311,7 @@ size_t BtreeKeyGenerator::PositionalPathInfo::getApproximateSize() const { void BtreeKeyGenerator::_getKeysWithoutArray(SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, const CollatorInterface* collator, - boost::optional<RecordId> id, + const boost::optional<RecordId>& id, KeyStringSet* keys) const { KeyString::PooledBuilder keyString{pooledBufferBuilder, _keyStringVersion, _ordering}; @@ -351,7 +351,7 @@ void BtreeKeyGenerator::_getKeysWithArray(std::vector<const char*>* fieldNames, const std::vector<PositionalPathInfo>& positionalInfo, MultikeyPaths* multikeyPaths, const CollatorInterface* collator, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { BSONElement arrElt; // A set containing the position of any indexed fields in the key pattern that traverse through diff --git a/src/mongo/db/index/btree_key_generator.h b/src/mongo/db/index/btree_key_generator.h index 7287b68420d..36a5146f80c 100644 --- a/src/mongo/db/index/btree_key_generator.h +++ b/src/mongo/db/index/btree_key_generator.h @@ -84,7 +84,7 @@ public: KeyStringSet* keys, MultikeyPaths* multikeyPaths, const CollatorInterface* collator = nullptr, - boost::optional<RecordId> id = boost::none) const; + const boost::optional<RecordId>& id = boost::none) const; size_t getApproximateSize() const; @@ -157,7 +157,7 @@ private: const std::vector<PositionalPathInfo>& positionalInfo, MultikeyPaths* multikeyPaths, const CollatorInterface* collator, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; /** * An optimized version of the key generation algorithm to be used when it is known that 'obj' @@ -166,7 +166,7 @@ private: void _getKeysWithoutArray(SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, const CollatorInterface* collator, - boost::optional<RecordId> id, + const boost::optional<RecordId>& id, KeyStringSet* keys) const; /** @@ -228,7 +228,7 @@ private: const std::vector<PositionalPathInfo>& positionalInfo, MultikeyPaths* multikeyPaths, const CollatorInterface* collator, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; KeyString::Value _buildNullKeyString() const; diff --git a/src/mongo/db/index/column_store_sorter.cpp b/src/mongo/db/index/column_store_sorter.cpp index f708518a560..ef82ad9bf13 100644 --- a/src/mongo/db/index/column_store_sorter.cpp +++ b/src/mongo/db/index/column_store_sorter.cpp @@ -82,7 +82,7 @@ ColumnStoreSorter::ColumnStoreSorter(size_t maxMemoryUsageBytes, _maxMemoryUsageBytes(maxMemoryUsageBytes), _spillFile(std::make_shared<Sorter<Key, Value>::File>(pathForNewSpillFile(), _stats)) {} -void ColumnStoreSorter::add(PathView path, RecordId recordId, CellView cellContents) { +void ColumnStoreSorter::add(PathView path, const RecordId& recordId, CellView cellContents) { auto& cellListAtPath = _dataByPath[path]; if (cellListAtPath.empty()) { // Track memory usage of this new path. @@ -181,7 +181,7 @@ void ColumnStoreSorter::spill() { writer.writeChunk(); currentChunkSize = 0; } - for (auto ridAndCell : cellVector) { + for (auto& ridAndCell : cellVector) { const auto& cell = ridAndCell.second; currentChunkSize += path.size() + ridAndCell.first.memUsage() + cell.size(); writer.addAlreadySorted(Key{path, ridAndCell.first}, diff --git a/src/mongo/db/index/column_store_sorter.h b/src/mongo/db/index/column_store_sorter.h index 833213b3646..56f203c5f64 100644 --- a/src/mongo/db/index/column_store_sorter.h +++ b/src/mongo/db/index/column_store_sorter.h @@ -54,7 +54,7 @@ class ColumnStoreSorter { public: ColumnStoreSorter(size_t maxMemoryUsageBytes, StringData dbName, SorterFileStats* stats); - void add(PathView path, RecordId recordId, CellView cellContents); + void add(PathView path, const RecordId& recordId, CellView cellContents); size_t numSpills() const { return _numSpills; diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp index 0e8fb0d6208..5463cc9de8d 100644 --- a/src/mongo/db/index/expression_keys_private.cpp +++ b/src/mongo/db/index/expression_keys_private.cpp @@ -489,7 +489,7 @@ void ExpressionKeysPrivate::get2DKeys(SharedBufferFragmentBuilder& pooledBufferB KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id) { + const boost::optional<RecordId>& id) { BSONElementMultiSet bSet; // Get all the nested location fields, but don't return individual elements from @@ -581,7 +581,7 @@ void ExpressionKeysPrivate::getFTSKeys(SharedBufferFragmentBuilder& pooledBuffer KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id) { + const boost::optional<RecordId>& id) { fts::FTSIndexFormat::getKeys( pooledBufferBuilder, ftsSpec, obj, keys, keyStringVersion, ordering, id); } @@ -598,7 +598,7 @@ void ExpressionKeysPrivate::getHashKeys(SharedBufferFragmentBuilder& pooledBuffe KeyString::Version keyStringVersion, Ordering ordering, bool ignoreArraysAlongPath, - boost::optional<RecordId> id) { + const boost::optional<RecordId>& id) { static const BSONObj nullObj = BSON("" << BSONNULL); auto hasFieldValue = false; KeyString::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); @@ -672,7 +672,7 @@ void ExpressionKeysPrivate::getS2Keys(SharedBufferFragmentBuilder& pooledBufferB KeyString::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, - boost::optional<RecordId> id) { + const boost::optional<RecordId>& id) { std::vector<KeyString::HeapBuilder> keysToAdd; // Does one of our documents have a geo field? diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h index dae982831ef..c97d48913c9 100644 --- a/src/mongo/db/index/expression_keys_private.h +++ b/src/mongo/db/index/expression_keys_private.h @@ -76,7 +76,7 @@ public: KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id = boost::none); + const boost::optional<RecordId>& id = boost::none); // // FTS @@ -88,7 +88,7 @@ public: KeyStringSet* keys, KeyString::Version keyStringVersion, Ordering ordering, - boost::optional<RecordId> id = boost::none); + const boost::optional<RecordId>& id = boost::none); // // Hash @@ -108,7 +108,7 @@ public: KeyString::Version keyStringVersion, Ordering ordering, bool ignoreArraysAlongPath, - boost::optional<RecordId> id = boost::none); + const boost::optional<RecordId>& id = boost::none); /** * Hashing function used by both getHashKeys and the cursors we create. @@ -133,7 +133,7 @@ public: KeyString::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, - boost::optional<RecordId> id = boost::none); + const boost::optional<RecordId>& id = boost::none); }; } // namespace mongo diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp index 4fbda54dae9..78f2d68b21e 100644 --- a/src/mongo/db/index/fts_access_method.cpp +++ b/src/mongo/db/index/fts_access_method.cpp @@ -47,7 +47,7 @@ void FTSAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { ExpressionKeysPrivate::getFTSKeys(pooledBufferBuilder, obj, _ftsSpec, diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h index a1343553916..966b959af6a 100644 --- a/src/mongo/db/index/fts_access_method.h +++ b/src/mongo/db/index/fts_access_method.h @@ -60,7 +60,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; fts::FTSSpec _ftsSpec; }; diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp index ec7b05b3665..7e7e7862fd3 100644 --- a/src/mongo/db/index/hash_access_method.cpp +++ b/src/mongo/db/index/hash_access_method.cpp @@ -63,7 +63,7 @@ void HashAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { ExpressionKeysPrivate::getHashKeys(pooledBufferBuilder, obj, _keyPattern, diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h index 851c841dbe2..6dc7df9e48d 100644 --- a/src/mongo/db/index/hash_access_method.h +++ b/src/mongo/db/index/hash_access_method.h @@ -67,7 +67,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; BSONObj _keyPattern; diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index 0144e00d973..f9cd8f7c7c2 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -437,7 +437,7 @@ RecordId SortedDataIndexAccessMethod::findSingle(OperationContext* opCtx, if (auto loc = _newInterface->findLoc(opCtx, actualKey)) { dassert(!loc->isNull()); - return *loc; + return std::move(*loc); } return RecordId(); @@ -771,7 +771,7 @@ Status SortedDataIndexAccessMethod::BulkBuilderImpl::insert( &_multikeyMetadataKeys, multikeyPaths.get(), loc, - [&](Status status, const BSONObj&, boost::optional<RecordId>) { + [&](Status status, const BSONObj&, const boost::optional<RecordId>&) { // If a key generation error was suppressed, record the document as // "skipped" so the index builder can retry at a point when data is // consistent. @@ -1018,7 +1018,7 @@ void SortedDataIndexAccessMethod::getKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id, + const boost::optional<RecordId>& id, OnSuppressedErrorFn&& onSuppressedError) const { invariant(!id || _newInterface->rsKeyFormat() != KeyFormat::String || id->isStr(), fmt::format("RecordId is not in the same string format as its RecordStore; id: {}", diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index 7e3a4dd504a..5136faed942 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -387,8 +387,8 @@ public: * If any key generation errors are encountered and suppressed due to the provided GetKeysMode, * 'onSuppressedErrorFn' is called. */ - using OnSuppressedErrorFn = - std::function<void(Status status, const BSONObj& obj, boost::optional<RecordId> loc)>; + using OnSuppressedErrorFn = std::function<void( + Status status, const BSONObj& obj, const boost::optional<RecordId>& loc)>; void getKeys(OperationContext* opCtx, const CollectionPtr& collection, SharedBufferFragmentBuilder& pooledBufferBuilder, @@ -398,7 +398,7 @@ public: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id, + const boost::optional<RecordId>& id, OnSuppressedErrorFn&& onSuppressedError = nullptr) const; /** @@ -585,7 +585,7 @@ protected: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const = 0; + const boost::optional<RecordId>& id) const = 0; const IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog const IndexDescriptor* const _descriptor; diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp index df708587511..6061ea3b90f 100644 --- a/src/mongo/db/index/index_build_interceptor.cpp +++ b/src/mongo/db/index/index_build_interceptor.cpp @@ -177,7 +177,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, while (record) { opCtx->checkForInterrupt(); - RecordId currentRecordId = record->id; + auto& currentRecordId = record->id; BSONObj unownedDoc = record->data.toBson(); // Don't apply this record if the total batch size in bytes would be too large. @@ -207,7 +207,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, // Save the record ids of the documents inserted into the index for deletion later. // We can't delete records while holding a positioned cursor. - recordsAddedToIndex.push_back(currentRecordId); + recordsAddedToIndex.emplace_back(std::move(currentRecordId)); // Don't continue if the batch is full. Allow the transaction to commit. if (batchSize == kBatchMaxSize) { diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp index a2ea40dd9b1..eaba79ce7eb 100644 --- a/src/mongo/db/index/s2_access_method.cpp +++ b/src/mongo/db/index/s2_access_method.cpp @@ -143,7 +143,7 @@ void S2AccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { ExpressionKeysPrivate::getS2Keys(pooledBufferBuilder, obj, _descriptor->keyPattern(), diff --git a/src/mongo/db/index/s2_access_method.h b/src/mongo/db/index/s2_access_method.h index c25a8b47ce1..38999d95379 100644 --- a/src/mongo/db/index/s2_access_method.h +++ b/src/mongo/db/index/s2_access_method.h @@ -76,7 +76,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; S2IndexingParams _params; diff --git a/src/mongo/db/index/s2_bucket_access_method.cpp b/src/mongo/db/index/s2_bucket_access_method.cpp index 0da4cdc6f63..b9e926a753b 100644 --- a/src/mongo/db/index/s2_bucket_access_method.cpp +++ b/src/mongo/db/index/s2_bucket_access_method.cpp @@ -125,7 +125,7 @@ void S2BucketAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { ExpressionKeysPrivate::getS2Keys(pooledBufferBuilder, obj, _descriptor->keyPattern(), diff --git a/src/mongo/db/index/s2_bucket_access_method.h b/src/mongo/db/index/s2_bucket_access_method.h index 8996bfe78a5..c2d7186881d 100644 --- a/src/mongo/db/index/s2_bucket_access_method.h +++ b/src/mongo/db/index/s2_bucket_access_method.h @@ -76,7 +76,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; S2IndexingParams _params; diff --git a/src/mongo/db/index/wildcard_access_method.cpp b/src/mongo/db/index/wildcard_access_method.cpp index 4ea3cfc261c..0631158e40d 100644 --- a/src/mongo/db/index/wildcard_access_method.cpp +++ b/src/mongo/db/index/wildcard_access_method.cpp @@ -69,7 +69,7 @@ void WildcardAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { _keyGen.generateKeys(pooledBufferBuilder, obj, keys, multikeyMetadataKeys, id); } } // namespace mongo diff --git a/src/mongo/db/index/wildcard_access_method.h b/src/mongo/db/index/wildcard_access_method.h index a82e7e616c2..1622941fb8e 100644 --- a/src/mongo/db/index/wildcard_access_method.h +++ b/src/mongo/db/index/wildcard_access_method.h @@ -75,7 +75,7 @@ private: KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, - boost::optional<RecordId> id) const final; + const boost::optional<RecordId>& id) const final; const WildcardKeyGenerator _keyGen; }; diff --git a/src/mongo/db/index/wildcard_key_generator.cpp b/src/mongo/db/index/wildcard_key_generator.cpp index 6aa2aa64d7b..c04ba500eb7 100644 --- a/src/mongo/db/index/wildcard_key_generator.cpp +++ b/src/mongo/db/index/wildcard_key_generator.cpp @@ -114,7 +114,7 @@ void WildcardKeyGenerator::generateKeys(SharedBufferFragmentBuilder& pooledBuffe BSONObj inputDoc, KeyStringSet* keys, KeyStringSet* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { FieldRef rootPath; auto keysSequence = keys->extract_sequence(); // multikeyPaths is allowed to be nullptr @@ -139,7 +139,7 @@ void WildcardKeyGenerator::_traverseWildcard(SharedBufferFragmentBuilder& pooled FieldRef* path, KeyStringSet::sequence_type* keys, KeyStringSet::sequence_type* multikeyPaths, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { for (const auto& elem : obj) { // If the element's fieldName contains a ".", fast-path skip it because it's not queryable. if (elem.fieldNameStringData().find('.', 0) != std::string::npos) @@ -185,7 +185,7 @@ bool WildcardKeyGenerator::_addKeyForNestedArray(SharedBufferFragmentBuilder& po const FieldRef& fullPath, bool enclosingObjIsArray, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { // If this element is an array whose parent is also an array, index it as a value. if (enclosingObjIsArray && elem.type() == BSONType::Array) { _addKey(pooledBufferBuilder, elem, fullPath, keys, id); @@ -198,7 +198,7 @@ bool WildcardKeyGenerator::_addKeyForEmptyLeaf(SharedBufferFragmentBuilder& pool BSONElement elem, const FieldRef& fullPath, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { invariant(elem.isABSONObj()); if (elem.embeddedObject().isEmpty()) { // In keeping with the behaviour of regular indexes, an empty object is indexed as-is while @@ -217,7 +217,7 @@ void WildcardKeyGenerator::_addKey(SharedBufferFragmentBuilder& pooledBufferBuil BSONElement elem, const FieldRef& fullPath, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const { + const boost::optional<RecordId>& id) const { // Wildcard keys are of the form { "": "path.to.field", "": <collation-aware value> }. KeyString::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); keyString.appendString(fullPath.dottedField()); diff --git a/src/mongo/db/index/wildcard_key_generator.h b/src/mongo/db/index/wildcard_key_generator.h index 085fd191f37..4164fa9aa92 100644 --- a/src/mongo/db/index/wildcard_key_generator.h +++ b/src/mongo/db/index/wildcard_key_generator.h @@ -79,7 +79,7 @@ public: BSONObj inputDoc, KeyStringSet* keys, KeyStringSet* multikeyPaths, - boost::optional<RecordId> id = boost::none) const; + const boost::optional<RecordId>& id = boost::none) const; private: // Traverses every path of the post-projection document, adding keys to the set as it goes. @@ -89,7 +89,7 @@ private: FieldRef* path, KeyStringSet::sequence_type* keys, KeyStringSet::sequence_type* multikeyPaths, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; // Helper functions to format the entry appropriately before adding it to the key/path tracker. void _addMultiKey(SharedBufferFragmentBuilder& pooledBufferBuilder, @@ -99,7 +99,7 @@ private: BSONElement elem, const FieldRef& fullPath, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; // Helper to check whether the element is a nested array, and conditionally add it to 'keys'. bool _addKeyForNestedArray(SharedBufferFragmentBuilder& pooledBufferBuilder, @@ -107,12 +107,12 @@ private: const FieldRef& fullPath, bool enclosingObjIsArray, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; bool _addKeyForEmptyLeaf(SharedBufferFragmentBuilder& pooledBufferBuilder, BSONElement elem, const FieldRef& fullPath, KeyStringSet::sequence_type* keys, - boost::optional<RecordId> id) const; + const boost::optional<RecordId>& id) const; WildcardProjection _proj; const CollatorInterface* _collator; diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index 3fe2f6f7349..898065a7976 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -2534,7 +2534,7 @@ void IndexBuildsCoordinator::_buildIndex(OperationContext* opCtx, void IndexBuildsCoordinator::_scanCollectionAndInsertSortedKeysIntoIndex( OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState, - boost::optional<RecordId> resumeAfterRecordId) { + const boost::optional<RecordId>& resumeAfterRecordId) { // Collection scan and insert into index. { indexBuildsSSS.scanCollection.addAndFetch(1); diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h index ac8193685ac..bb1e372ae8f 100644 --- a/src/mongo/db/index_builds_coordinator.h +++ b/src/mongo/db/index_builds_coordinator.h @@ -690,7 +690,7 @@ protected: void _scanCollectionAndInsertSortedKeysIntoIndex( OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState, - boost::optional<RecordId> resumeAfterRecordId = boost::none); + const boost::optional<RecordId>& resumeAfterRecordId = boost::none); /** * Performs the second phase of the index build, for use when resuming from the second phase. */ diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index 2bc1de7d33c..110c76addfe 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -120,7 +120,7 @@ CollectionScanParams createCollectionScanParams( WorkingSet* ws, const CollectionPtr* coll, InternalPlanner::Direction direction, - boost::optional<RecordId> resumeAfterRecordId, + const boost::optional<RecordId>& resumeAfterRecordId, boost::optional<RecordIdBound> minRecord, boost::optional<RecordIdBound> maxRecord, CollectionScanParams::ScanBoundInclusion boundInclusion) { @@ -148,7 +148,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection const CollectionPtr* coll, PlanYieldPolicy::YieldPolicy yieldPolicy, const Direction direction, - boost::optional<RecordId> resumeAfterRecordId, + const boost::optional<RecordId>& resumeAfterRecordId, boost::optional<RecordIdBound> minRecord, boost::optional<RecordIdBound> maxRecord, CollectionScanParams::ScanBoundInclusion boundInclusion) { diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h index 2a56d735a40..b0d13bb6d5e 100644 --- a/src/mongo/db/query/internal_plans.h +++ b/src/mongo/db/query/internal_plans.h @@ -79,7 +79,7 @@ public: const CollectionPtr* collection, PlanYieldPolicy::YieldPolicy yieldPolicy, Direction direction = FORWARD, - boost::optional<RecordId> resumeAfterRecordId = boost::none, + const boost::optional<RecordId>& resumeAfterRecordId = boost::none, boost::optional<RecordIdBound> minRecord = boost::none, boost::optional<RecordIdBound> maxRecord = boost::none, CollectionScanParams::ScanBoundInclusion boundInclusion = @@ -192,9 +192,9 @@ private: WorkingSet* ws, const CollectionPtr* collection, Direction direction, - boost::optional<RecordId> resumeAfterRecordId = boost::none, - boost::optional<RecordId> minRecord = boost::none, - boost::optional<RecordId> maxRecord = boost::none); + const boost::optional<RecordId>& resumeAfterRecordId = boost::none, + const boost::optional<RecordId>& minRecord = boost::none, + const boost::optional<RecordId>& maxRecord = boost::none); static std::unique_ptr<PlanStage> _collectionScan( const boost::intrusive_ptr<ExpressionContext>& expCtx, diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index d8663b4c4e3..2b9505ed9e5 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -418,7 +418,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob if (nullptr != dlOut) { tassert(6297500, "Working set member has no record ID", member->hasRecordId()); - *dlOut = member->recordId; + *dlOut = std::move(member->recordId); } if (hasRequestedData) { diff --git a/src/mongo/db/query/plan_executor_sbe.cpp b/src/mongo/db/query/plan_executor_sbe.cpp index 70e2e15c8d7..920cd17715b 100644 --- a/src/mongo/db/query/plan_executor_sbe.cpp +++ b/src/mongo/db/query/plan_executor_sbe.cpp @@ -246,7 +246,7 @@ PlanExecutor::ExecState PlanExecutorSBE::getNextImpl(ObjectType* out, RecordId* *out = Document{std::move(doc)}; } if (dlOut && recordId) { - *dlOut = *recordId; + *dlOut = std::move(*recordId); } _stash.pop_front(); return PlanExecutor::ExecState::ADVANCED; diff --git a/src/mongo/db/query/record_id_bound.h b/src/mongo/db/query/record_id_bound.h index 99400ae938d..03a7a205b52 100644 --- a/src/mongo/db/query/record_id_bound.h +++ b/src/mongo/db/query/record_id_bound.h @@ -49,12 +49,12 @@ public: RecordIdBound() = default; explicit RecordIdBound(RecordId&& recordId, boost::optional<BSONObj> bson = boost::none) - : _recordId(recordId), _bson(bson) {} + : _recordId(std::move(recordId)), _bson(bson) {} explicit RecordIdBound(const RecordId& recordId, boost::optional<BSONObj> bson = boost::none) : _recordId(recordId), _bson(bson) {} - RecordId recordId() const { + const RecordId& recordId() const { return _recordId; } diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h index 2302630b37b..62a17167482 100644 --- a/src/mongo/db/record_id.h +++ b/src/mongo/db/record_id.h @@ -33,8 +33,10 @@ #include <boost/optional.hpp> #include <climits> #include <cstdint> +#include <cstring> #include <fmt/format.h> #include <ostream> +#include <type_traits> #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" @@ -44,14 +46,27 @@ namespace mongo { +namespace details { +class RecordIdChecks; +} + /** * The key that uniquely identifies a Record in a Collection or RecordStore. */ -class RecordId { +#pragma pack(push, 1) +class alignas(int64_t) RecordId { + // The alignas is necessary in order to comply with memory alignment. Internally we're using + // 8-byte aligned data members (int64_t / char *) but as we're packing the structure the + // compiler will set the alignment to 1 due to the pragma so we must correct its alignment + // information for users of the class. + + // Class used for static assertions that can only happen when RecordId is completely defined. + friend class details::RecordIdChecks; + public: // This set of constants define the boundaries of the 'normal' id ranges for the int64_t format. - static constexpr int64_t kMinRepr = LLONG_MIN; - static constexpr int64_t kMaxRepr = LLONG_MAX; + static constexpr int64_t kMinRepr = std::numeric_limits<int64_t>::min(); + static constexpr int64_t kMaxRepr = std::numeric_limits<int64_t>::max(); // A RecordId binary string cannot be larger than this arbitrary size. RecordIds get written to // the key and the value in WiredTiger, so we should avoid large strings. @@ -73,15 +88,58 @@ public: return RecordId(kMaxRepr); } - RecordId() = default; + RecordId() : _format(Format::kNull){}; + + ~RecordId() { + if (_format == Format::kBigStr) { + free(_data.heapStr.stringPtr); + } + } + + RecordId(RecordId&& other) { + std::memcpy(this, &other, sizeof(RecordId)); + other._format = Format::kNull; + }; + + RecordId(const RecordId& other) { + std::memcpy(this, &other, sizeof(RecordId)); + if (other._format == Format::kBigStr) { + auto ptr = (char*)mongoMalloc(other._data.heapStr.size); + std::memcpy(ptr, other._data.heapStr.stringPtr, other._data.heapStr.size); + _data.heapStr.stringPtr = ptr; + } + }; + + RecordId& operator=(const RecordId& other) { + if (_format == Format::kBigStr) { + free(_data.heapStr.stringPtr); + } + std::memcpy(this, &other, sizeof(RecordId)); + if (other._format == Format::kBigStr) { + auto ptr = (char*)mongoMalloc(other._data.heapStr.size); + std::memcpy(ptr, other._data.heapStr.stringPtr, other._data.heapStr.size); + _data.heapStr.stringPtr = ptr; + } + return *this; + }; + + + RecordId& operator=(RecordId&& other) { + if (_format == Format::kBigStr) { + free(_data.heapStr.stringPtr); + } + std::memcpy(this, &other, sizeof(RecordId)); + other._format = Format::kNull; + return *this; + } /** * Construct a RecordId that holds an int64_t. The raw value for RecordStore storage may be * retrieved using getLong(). */ explicit RecordId(int64_t s) { - memcpy(_buffer, &s, sizeof(s)); _format = Format::kLong; + _data.longId.id = s; } /** @@ -90,21 +148,20 @@ public: */ explicit RecordId(const char* str, int32_t size) { invariant(size > 0, "key size must be greater than 0"); + uassert( + 5894900, + fmt::format("Size of RecordId ({}) is above limit of {} bytes", size, kBigStrMaxSize), + size <= kBigStrMaxSize); if (size <= kSmallStrMaxSize) { _format = Format::kSmallStr; - // Must fit into the buffer minus 1 byte for size. - _buffer[0] = static_cast<uint8_t>(size); - memcpy(_buffer + 1, str, size); - } else if (size <= kBigStrMaxSize) { - _format = Format::kBigStr; - auto sharedBuf = SharedBuffer::allocate(size); - memcpy(sharedBuf.get(), str, size); - _sharedBuffer = std::move(sharedBuf); + _data.inlineStr.size = static_cast<uint8_t>(size); + std::memcpy(_data.inlineStr.dataArr.data(), str, size); } else { - uasserted(5894900, - fmt::format("Size of RecordId ({}) is above limit of {} bytes", - size, - kBigStrMaxSize)); + _format = Format::kBigStr; + _data.heapStr.size = size; + auto ptr = (char*)mongoMalloc(size); + _data.heapStr.stringPtr = ptr; + std::memcpy(ptr, str, size); } } @@ -159,8 +216,8 @@ public: if (_format == Format::kNull) { return 0; } - invariant(isLong(), - fmt::format("expected RecordID long format, got: {}", _formatToString(_format))); + dassert(isLong(), + fmt::format("expected RecordID long format, got: {}", _formatToString(_format))); return _getLongNoCheck(); } @@ -169,20 +226,13 @@ public: * constructed with a binary string value, and invariants otherwise. */ StringData getStr() const { - invariant( - isStr(), - fmt::format("expected RecordID string format, got: {}", _formatToString(_format))); + dassert(isStr(), + fmt::format("expected RecordID string format, got: {}", _formatToString(_format))); if (_format == Format::kSmallStr) { return _getSmallStrNoCheck(); - } else if (_format == Format::kBigStr) { + } else { return _getBigStrNoCheck(); } - MONGO_UNREACHABLE; - } - - // If this RecordId is holding a large string, returns the ConstSharedBuffer holding it. - const ConstSharedBuffer& sharedBuffer() const { - return _sharedBuffer; } /** @@ -197,6 +247,15 @@ public: } /** + * Returns whether the data for the RecordId is completely stored inline (within the class + * memory allocation). The only cases where this won't be true is when the RecordId contains a + * large key string that cannot be allocated inline completely. + */ + bool isInlineAllocated_forTest() { + return _format != Format::kBigStr; + } + + /** * Valid RecordIds are the only ones which may be used to represent Records. The range of valid * RecordIds includes both "normal" ids that refer to user data, and "reserved" ids that are * used internally. All RecordIds outside of the valid range are sentinel values. @@ -216,13 +275,14 @@ public: switch (_format) { case Format::kNull: return rhs._format == Format::kNull ? 0 : -1; - case Format::kLong: + case Format::kLong: { if (rhs._format == Format::kNull) { return 1; } - return _getLongNoCheck() == rhs.getLong() - ? 0 - : (_getLongNoCheck() > rhs.getLong()) ? 1 : -1; + auto ourId = _getLongNoCheck(); + auto rhsId = rhs.getLong(); + return ourId == rhsId ? 0 : (ourId > rhsId) ? 1 : -1; + } case Format::kSmallStr: if (rhs._format == Format::kNull) { return 1; @@ -255,11 +315,12 @@ public: } /** - * Returns the total amount of memory used by this RecordId, including itself and any shared + * Returns the total amount of memory used by this RecordId, including itself and any heap * buffers. */ size_t memUsage() const { - return sizeof(RecordId) + _sharedBuffer.capacity(); + size_t largeStrSize = (_format == Format::kBigStr) ? _data.heapStr.size : 0; + return sizeof(RecordId) + largeStrSize; } /** @@ -321,7 +382,7 @@ public: * Decode a token created by serializeToken(). */ static RecordId deserializeToken(BufReader& buf) { - auto format = buf.read<Format>(); + auto format = static_cast<Format>(buf.read<char>()); if (format == Format::kNull) { return RecordId(); } else if (format == Format::kLong) { @@ -341,34 +402,33 @@ public: * This maximum size for 'small' strings was chosen as a good tradeoff between keeping the * RecordId struct lightweight to copy by value (32 bytes), but also making the struct large * enough to hold a wider variety of strings. Larger strings must be stored in the - * ConstSharedBuffer, which requires an extra memory allocation and is reference counted, which - * makes it more expensive to copy. + * heap, which requires an extra memory allocation and makes it more expensive to copy. */ - enum { kSmallStrMaxSize = 22 }; + static constexpr auto kSmallStrMaxSize = 30; private: /** * Format specifies the in-memory representation of this RecordId. This does not represent any * durable storage format. */ - enum Format : int8_t { + enum Format : uint8_t { /* Uninitialized and contains no value */ kNull, /** - * Stores an integer. The first 8 bytes of '_buffer' encode the value in machine-endian - * order. The RecordId may only be accessed using getLong(). + * Stores an integer. Data is stored in '_data.longId.id'. The RecordId may only be accessed + * using getLong(). */ kLong, /** - * Stores a variable-length binary string smaller than kSmallStrMaxSize. The first byte of - * '_buffer' encodes the length and the remaining bytes store the string. This RecordId may - * only be accessed using getStr(). + * Stores a variable-length binary string smaller than kSmallStrMaxSize. Data is stored in + * the InlineStr struct at '_data.inlineStr'. This RecordId may only be accessed using + * getStr(). */ kSmallStr, /** * Stores a variable-length binary string larger than kSmallStrMaxSize. The value is stored - * in a reference-counted buffer, '_sharedBuffer'. This RecordId may only be accessed using - * getStr(). + * in a heap buffer '_data.heapStr.stringPtr' with its size stored in '_data.heapStr.size'. + * This RecordId may only be accessed using getStr(). */ kBigStr }; @@ -388,36 +448,63 @@ private: } int64_t _getLongNoCheck() const { - int64_t val; - memcpy(&val, _buffer, sizeof(val)); - return val; + return _data.longId.id; } StringData _getSmallStrNoCheck() const { - char size = _buffer[0]; - invariant(size > 0); - invariant(size <= kSmallStrMaxSize); - return StringData(_buffer + 1, size); + return StringData(_data.inlineStr.dataArr.data(), _data.inlineStr.size); } StringData _getBigStrNoCheck() const { - // We use a ConstSharedBuffer that is only allocated once and assume the string size is - // just the originally allocated capacity. - size_t size = _sharedBuffer.capacity(); - invariant(size > kSmallStrMaxSize); - invariant(size <= kBigStrMaxSize); - return StringData(_sharedBuffer.get(), size); + return StringData(_data.heapStr.stringPtr, _data.heapStr.size); } - Format _format = Format::kNull; - // An extra byte of space is required to store the size for the - // kSmallStr Format. Zero the buffer so we don't need to write - // explicit lifecycle methods that avoid copying from - // uninitialized portions of the buffer. - char _buffer[kSmallStrMaxSize + 1] = {}; - // Used only for the kBigStr Format. - ConstSharedBuffer _sharedBuffer; + static constexpr auto kTargetSizeInBytes = 32; + // In the usual case we would store the data as Format followed by a struct union of the + // InlineString (size + array), HeapStr (size + ptr), and LongId (int64_t). This however leaves + // 7 bytes unused for pading if Format is 1 byte and 4 if it is 4 bytes (x86) due to data + // alignment requirements of the union. To avoid this we manually perform memory padding in the + // structs of the union coupled with packing the class so that all items align properly. + Format _format; // offset = 0, size = 1 + static_assert(sizeof(Format) == 1); + // All of this will work if and only if char size is 1 (std::byte) for the InlineString. + static_assert(sizeof(std::byte) == sizeof(char)); + // Offsets/padding will be computed in respect to the whole class by taking into account the + // Format data member. + struct HeapStr { + std::byte _padding[std::alignment_of_v<uint32_t> - sizeof(Format)]; // offset = 1, size = 3 + uint32_t size; // offset = 1 + 3, size = 4 + static constexpr auto ptrPaddingBytes = + std::alignment_of_v<char*> - sizeof(Format) - sizeof(_padding) - sizeof(size); + static_assert(ptrPaddingBytes == 0, + "No padding should be necessary between the size and pointer of HeapStr"); + char* stringPtr; // offset = 1 + 3 + 4, size = 8 + }; + struct InlineStr { + uint8_t size; // offset = 1, size = 1 + std::array<char, kTargetSizeInBytes - sizeof(Format) - sizeof(size)> + dataArr; // offset = 1 + 1, size = 30 + }; + struct LongId { + std::byte _padding[std::alignment_of_v<int64_t> - sizeof(Format)]; // offset = 1, size = 7 + int64_t id; // offset = 1 + 7, size = 8 + }; + union Content { + HeapStr heapStr; + InlineStr inlineStr; + LongId longId; + }; + Content _data; // offset = 1, size = 31 +}; +#pragma pack(pop) + +namespace details { +// Various assertions of RecordId that can only happen when the type is completely defined. +class RecordIdChecks { + static_assert(sizeof(RecordId) == RecordId::kTargetSizeInBytes); + static_assert(std::alignment_of_v<RecordId> == std::alignment_of_v<int64_t>); }; +} // namespace details inline bool operator==(const RecordId& lhs, const RecordId& rhs) { return lhs.compare(rhs) == 0; diff --git a/src/mongo/db/record_id_helpers.cpp b/src/mongo/db/record_id_helpers.cpp index bf313976a3b..4ffa7a8e75f 100644 --- a/src/mongo/db/record_id_helpers.cpp +++ b/src/mongo/db/record_id_helpers.cpp @@ -60,13 +60,12 @@ StatusWith<RecordId> keyForOptime(const Timestamp& opTime, const KeyFormat keyFo if (opTime.getInc() > uint32_t(std::numeric_limits<int32_t>::max())) return {ErrorCodes::BadValue, "ts inc too high"}; - const auto out = RecordId(opTime.getSecs(), opTime.getInc()); + auto out = RecordId(opTime.getSecs(), opTime.getInc()); if (out <= RecordId::minLong()) return {ErrorCodes::BadValue, "ts too low"}; if (out >= RecordId::maxLong()) return {ErrorCodes::BadValue, "ts too high"}; - - return out; + return {std::move(out)}; } case KeyFormat::String: { KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); @@ -145,7 +144,7 @@ RecordId keyForDate(Date_t date) { return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); } -void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName) { +void appendToBSONAs(const RecordId& rid, BSONObjBuilder* builder, StringData fieldName) { rid.withFormat([&](RecordId::Null) { builder->appendNull(fieldName); }, [&](int64_t val) { builder->append(fieldName, val); }, [&](const char* str, int len) { @@ -153,7 +152,7 @@ void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName) }); } -BSONObj toBSONAs(RecordId rid, StringData fieldName) { +BSONObj toBSONAs(const RecordId& rid, StringData fieldName) { BSONObjBuilder builder; appendToBSONAs(rid, &builder, fieldName); return builder.obj(); @@ -178,7 +177,7 @@ RecordId reservedIdFor(ReservationId res, KeyFormat keyFormat) { } } -bool isReserved(RecordId id) { +bool isReserved(const RecordId& id) { if (id.isNull()) { return false; } diff --git a/src/mongo/db/record_id_helpers.h b/src/mongo/db/record_id_helpers.h index b957b30cce6..282132dd684 100644 --- a/src/mongo/db/record_id_helpers.h +++ b/src/mongo/db/record_id_helpers.h @@ -72,8 +72,8 @@ StatusWith<RecordId> extractKeyOptime(const char* data, int len); * RecordId because it loses information about the original RecordId format. If you require passing * a RecordId as a token or storing for a resumable scan, for example, use RecordId::serializeToken. */ -void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName); -BSONObj toBSONAs(RecordId rid, StringData fieldName); +void appendToBSONAs(const RecordId& rid, BSONObjBuilder* builder, StringData fieldName); +BSONObj toBSONAs(const RecordId& rid, StringData fieldName); /** * Enumerates all reserved ids that have been allocated for a specific purpose. These IDs may not be @@ -89,7 +89,7 @@ RecordId reservedIdFor(ReservationId res, KeyFormat keyFormat); /** * Returns true if this RecordId falls within the reserved range for a given RecordId type. */ -bool isReserved(RecordId id); +bool isReserved(const RecordId& id); } // namespace record_id_helpers } // namespace mongo diff --git a/src/mongo/db/record_id_test.cpp b/src/mongo/db/record_id_test.cpp index 2a0e4360caf..ef6e04b1e27 100644 --- a/src/mongo/db/record_id_test.cpp +++ b/src/mongo/db/record_id_test.cpp @@ -34,6 +34,7 @@ #include "mongo/db/record_id_helpers.h" #include "mongo/unittest/death_test.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/debug_util.h" namespace mongo { namespace { @@ -295,24 +296,24 @@ TEST(RecordId, RecordIdBigStr) { // This string should be just enough to qualify for the small string optimization. RecordId smallId(buf, RecordId::kSmallStrMaxSize); - ASSERT_FALSE(smallId.sharedBuffer().isShared()); + ASSERT_TRUE(smallId.isInlineAllocated_forTest()); ASSERT_EQ(smallId.getStr().size(), RecordId::kSmallStrMaxSize); ASSERT_EQ(sizeof(RecordId), smallId.memUsage()); // At a certain size RecordId strings should expand beyond the size of the struct and start - // using a shared buffer. + // using a heap buffer. RecordId bigId(buf, RecordId::kSmallStrMaxSize + 1); - ASSERT_FALSE(bigId.sharedBuffer().isShared()); + ASSERT_FALSE(bigId.isInlineAllocated_forTest()); ASSERT_EQ(bigId.getStr().size(), RecordId::kSmallStrMaxSize + 1); ASSERT_EQ(sizeof(RecordId) + bigId.getStr().size(), bigId.memUsage()); ASSERT_GT(bigId, smallId); ASSERT_LT(smallId, bigId); - // Once copied, this RecordId should be sharing its contents. + // Once copied, this RecordId should not be sharing its contents. RecordId bigCopy = bigId; - ASSERT_TRUE(bigId.sharedBuffer().isShared()); - ASSERT_TRUE(bigCopy.sharedBuffer().isShared()); - ASSERT_EQ(bigId.getStr().rawData(), bigCopy.getStr().rawData()); + ASSERT_FALSE(bigId.isInlineAllocated_forTest()); + ASSERT_FALSE(bigCopy.isInlineAllocated_forTest()); + ASSERT_NE(bigId.getStr().rawData(), bigCopy.getStr().rawData()); ASSERT_EQ(bigId.getStr().size(), bigCopy.getStr().size()); ASSERT_EQ(sizeof(RecordId) + bigId.getStr().size(), bigCopy.memUsage()); @@ -328,9 +329,15 @@ TEST(RecordId, RecordIdBigStr) { // RecordIds of different formats may not be compared. DEATH_TEST(RecordId, UnsafeComparison, "Invariant failure") { - RecordId rid1(1); - RecordId rid2 = record_id_helpers::keyForOID(OID::createFromString("000000000000000000000001")); - ASSERT_NOT_EQUALS(rid1, rid2); + if (kDebugBuild) { + RecordId rid1(1); + RecordId rid2 = + record_id_helpers::keyForOID(OID::createFromString("000000000000000000000001")); + ASSERT_NOT_EQUALS(rid1, rid2); + } else { + // This test should not be run in release builds as the assertion won't be in there. + invariant(false, "Deliberately crash here so the test doesn't fail on release builds"); + } } } // namespace diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp index 86ea5ece9c4..35d39b60f06 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp @@ -146,7 +146,9 @@ public: return Status::OK(); } - void doCappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) override {} + void doCappedTruncateAfter(OperationContext* opCtx, + const RecordId& end, + bool inclusive) override {} virtual void appendNumericCustomStats(OperationContext* opCtx, BSONObjBuilder* result, diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp index e7e661b8d81..a0521026168 100644 --- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp +++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp @@ -368,7 +368,7 @@ Status EphemeralForTestRecordStore::doInsertRecords(OperationContext* opCtx, extractAndCheckLocForOplog(lock, record->data.data(), record->data.size()); if (!status.isOK()) return status.getStatus(); - loc = status.getValue(); + loc = std::move(status.getValue()); } else { loc = allocateLoc(lock); } @@ -377,7 +377,7 @@ Status EphemeralForTestRecordStore::doInsertRecords(OperationContext* opCtx, _data->records[loc] = rec; record->id = loc; - opCtx->recoveryUnit()->onRollback([this, loc]() { + opCtx->recoveryUnit()->onRollback([this, loc = std::move(loc)]() { stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex); Records::iterator it = _data->records.find(loc); @@ -489,13 +489,13 @@ Status EphemeralForTestRecordStore::doTruncate(OperationContext* opCtx) { } void EphemeralForTestRecordStore::doCappedTruncateAfter(OperationContext* opCtx, - RecordId end, + const RecordId& end, bool inclusive) { stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex); Records::iterator it = inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end); while (it != _data->records.end()) { - RecordId id = it->first; + auto& id = it->first; EphemeralForTestRecord record = it->second; if (_cappedCallback) { diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h index c3b30e45ef0..1658979bbdf 100644 --- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h +++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h @@ -90,7 +90,9 @@ public: Status doTruncate(OperationContext* opCtx) override; - void doCappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) override; + void doCappedTruncateAfter(OperationContext* opCtx, + const RecordId& end, + bool inclusive) override; virtual void appendNumericCustomStats(OperationContext* opCtx, BSONObjBuilder* result, diff --git a/src/mongo/db/storage/durable_catalog.h b/src/mongo/db/storage/durable_catalog.h index 49db4546918..1a622c9284a 100644 --- a/src/mongo/db/storage/durable_catalog.h +++ b/src/mongo/db/storage/durable_catalog.h @@ -60,7 +60,7 @@ public: struct Entry { Entry() {} Entry(RecordId catalogId, std::string ident, NamespaceString nss) - : catalogId(catalogId), ident(std::move(ident)), nss(std::move(nss)) {} + : catalogId(std::move(catalogId)), ident(std::move(ident)), nss(std::move(nss)) {} RecordId catalogId; std::string ident; NamespaceString nss; @@ -87,18 +87,19 @@ public: virtual std::vector<Entry> getAllCatalogEntries(OperationContext* opCtx) const = 0; - virtual Entry getEntry(RecordId catalogId) const = 0; + virtual Entry getEntry(const RecordId& catalogId) const = 0; virtual std::string getIndexIdent(OperationContext* opCtx, - RecordId id, + const RecordId& id, StringData idxName) const = 0; - virtual std::vector<std::string> getIndexIdents(OperationContext* opCtx, RecordId id) const = 0; + virtual std::vector<std::string> getIndexIdents(OperationContext* opCtx, + const RecordId& id) const = 0; - virtual BSONObj getCatalogEntry(OperationContext* opCtx, RecordId catalogId) const = 0; + virtual BSONObj getCatalogEntry(OperationContext* opCtx, const RecordId& catalogId) const = 0; virtual std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaData( - OperationContext* opCtx, RecordId id) const = 0; + OperationContext* opCtx, const RecordId& id) const = 0; /** * Updates the catalog entry for the collection 'nss' with the fields specified in 'md'. If @@ -106,7 +107,7 @@ public: * adds it to the catalog entry. */ virtual void putMetaData(OperationContext* opCtx, - RecordId id, + const RecordId& id, BSONCollectionCatalogEntry::MetaData& md) = 0; virtual std::vector<std::string> getAllIdents(OperationContext* opCtx) const = 0; @@ -152,7 +153,7 @@ public: bool allocateDefaultSpace) = 0; virtual Status createIndex(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& nss, const CollectionOptions& collOptions, const IndexDescriptor* spec) = 0; @@ -174,7 +175,7 @@ public: */ struct ImportResult { ImportResult(RecordId catalogId, std::unique_ptr<RecordStore> rs, UUID uuid) - : catalogId(catalogId), rs(std::move(rs)), uuid(uuid) {} + : catalogId(std::move(catalogId)), rs(std::move(rs)), uuid(uuid) {} RecordId catalogId; std::unique_ptr<RecordStore> rs; UUID uuid; @@ -187,7 +188,7 @@ public: const ImportOptions& importOptions) = 0; virtual Status renameCollection(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& toNss, BSONCollectionCatalogEntry::MetaData& md) = 0; @@ -197,7 +198,7 @@ public: * Expects (invariants) that all of the index catalog entries have been removed already via * removeIndex. */ - virtual Status dropCollection(OperationContext* opCtx, RecordId catalogId) = 0; + virtual Status dropCollection(OperationContext* opCtx, const RecordId& catalogId) = 0; /** * Drops the provided ident and recreates it as empty for use in resuming an index build. @@ -208,14 +209,14 @@ public: const IndexDescriptor* spec, StringData ident) = 0; - virtual int getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const = 0; + virtual int getTotalIndexCount(OperationContext* opCtx, const RecordId& catalogId) const = 0; virtual bool isIndexPresent(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName) const = 0; virtual bool isIndexReady(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName) const = 0; /** @@ -230,7 +231,7 @@ public: * number of elements in the index key pattern of empty sets. */ virtual bool isIndexMultikey(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName, MultikeyPaths* multikeyPaths) const = 0; diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp index b7773e96bd9..9d2c45c69f6 100644 --- a/src/mongo/db/storage/durable_catalog_impl.cpp +++ b/src/mongo/db/storage/durable_catalog_impl.cpp @@ -145,7 +145,7 @@ bool indexTypeSupportsPathLevelMultikeyTracking(StringData accessMethod) { class DurableCatalogImpl::AddIdentChange : public RecoveryUnit::Change { public: AddIdentChange(DurableCatalogImpl* catalog, RecordId catalogId) - : _catalog(catalog), _catalogId(catalogId) {} + : _catalog(catalog), _catalogId(std::move(catalogId)) {} virtual void commit(boost::optional<Timestamp>) {} virtual void rollback() { @@ -267,7 +267,7 @@ std::vector<DurableCatalog::Entry> DurableCatalogImpl::getAllCatalogEntries( return ret; } -DurableCatalog::Entry DurableCatalogImpl::getEntry(RecordId catalogId) const { +DurableCatalog::Entry DurableCatalogImpl::getEntry(const RecordId& catalogId) const { stdx::lock_guard<Latch> lk(_catalogIdToEntryMapLock); auto it = _catalogIdToEntryMap.find(catalogId); invariant(it != _catalogIdToEntryMap.end()); @@ -337,7 +337,7 @@ StatusWith<DurableCatalog::Entry> DurableCatalogImpl::_importEntry(OperationCont } std::string DurableCatalogImpl::getIndexIdent(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData idxName) const { BSONObj obj = _findEntry(opCtx, catalogId); BSONObj idxIdent = obj["idxIdent"].Obj(); @@ -345,7 +345,7 @@ std::string DurableCatalogImpl::getIndexIdent(OperationContext* opCtx, } std::vector<std::string> DurableCatalogImpl::getIndexIdents(OperationContext* opCtx, - RecordId catalogId) const { + const RecordId& catalogId) const { std::vector<std::string> idents; BSONObj obj = _findEntry(opCtx, catalogId); @@ -365,7 +365,7 @@ std::vector<std::string> DurableCatalogImpl::getIndexIdents(OperationContext* op return idents; } -BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, RecordId catalogId) const { +BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, const RecordId& catalogId) const { LOGV2_DEBUG(22208, 3, "looking up metadata for: {catalogId}", "catalogId"_attr = catalogId); RecordData data; if (!_rs->findRecord(opCtx, catalogId, &data)) { @@ -379,7 +379,7 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, RecordId catalog } std::shared_ptr<BSONCollectionCatalogEntry::MetaData> DurableCatalogImpl::getMetaData( - OperationContext* opCtx, RecordId catalogId) const { + OperationContext* opCtx, const RecordId& catalogId) const { BSONObj obj = _findEntry(opCtx, catalogId); LOGV2_DEBUG(22209, 3, " fetched CCE metadata: {obj}", "obj"_attr = obj); std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md; @@ -393,7 +393,7 @@ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> DurableCatalogImpl::getMet } void DurableCatalogImpl::putMetaData(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, BSONCollectionCatalogEntry::MetaData& md) { NamespaceString nss(md.ns); BSONObj obj = _findEntry(opCtx, catalogId); @@ -441,7 +441,7 @@ void DurableCatalogImpl::putMetaData(OperationContext* opCtx, } Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& toNss, BSONCollectionCatalogEntry::MetaData& md) { BSONObj old = _findEntry(opCtx, catalogId).getOwned(); @@ -474,7 +474,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx, return Status::OK(); } -Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, RecordId catalogId) { +Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const RecordId& catalogId) { stdx::lock_guard<Latch> lk(_catalogIdToEntryMapLock); const auto it = _catalogIdToEntryMap.find(catalogId); if (it == _catalogIdToEntryMap.end()) { @@ -634,7 +634,7 @@ StatusWith<std::pair<RecordId, std::unique_ptr<RecordStore>>> DurableCatalogImpl } Status DurableCatalogImpl::createIndex(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& nss, const CollectionOptions& collOptions, const IndexDescriptor* spec) { @@ -755,13 +755,13 @@ StatusWith<DurableCatalog::ImportResult> DurableCatalogImpl::importCollection( } Status DurableCatalogImpl::renameCollection(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& toNss, BSONCollectionCatalogEntry::MetaData& md) { return _replaceEntry(opCtx, catalogId, toNss, md); } -Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, RecordId catalogId) { +Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, const RecordId& catalogId) { Entry entry; { stdx::lock_guard<Latch> lk(_catalogIdToEntryMapLock); @@ -795,7 +795,7 @@ Status DurableCatalogImpl::dropAndRecreateIndexIdentForResume(OperationContext* } bool DurableCatalogImpl::isIndexMultikey(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName, MultikeyPaths* multikeyPaths) const { auto md = getMetaData(opCtx, catalogId); @@ -812,7 +812,8 @@ bool DurableCatalogImpl::isIndexMultikey(OperationContext* opCtx, return md->indexes[offset].multikey; } -int DurableCatalogImpl::getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const { +int DurableCatalogImpl::getTotalIndexCount(OperationContext* opCtx, + const RecordId& catalogId) const { auto md = getMetaData(opCtx, catalogId); if (!md) return 0; @@ -821,7 +822,7 @@ int DurableCatalogImpl::getTotalIndexCount(OperationContext* opCtx, RecordId cat } bool DurableCatalogImpl::isIndexPresent(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName) const { auto md = getMetaData(opCtx, catalogId); if (!md) @@ -832,7 +833,7 @@ bool DurableCatalogImpl::isIndexPresent(OperationContext* opCtx, } bool DurableCatalogImpl::isIndexReady(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName) const { auto md = getMetaData(opCtx, catalogId); if (!md) diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h index 873733d58b4..ecd29d74307 100644 --- a/src/mongo/db/storage/durable_catalog_impl.h +++ b/src/mongo/db/storage/durable_catalog_impl.h @@ -64,24 +64,25 @@ public: std::vector<Entry> getAllCatalogEntries(OperationContext* opCtx) const; - Entry getEntry(RecordId catalogId) const; + Entry getEntry(const RecordId& catalogId) const; - std::string getCollectionIdent(RecordId catalogId) const; + std::string getCollectionIdent(const RecordId& catalogId) const; std::string getIndexIdent(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData idxName) const; - std::vector<std::string> getIndexIdents(OperationContext* opCtx, RecordId catalogId) const; + std::vector<std::string> getIndexIdents(OperationContext* opCtx, + const RecordId& catalogId) const; - BSONObj getCatalogEntry(OperationContext* opCtx, RecordId catalogId) const { + BSONObj getCatalogEntry(OperationContext* opCtx, const RecordId& catalogId) const { return _findEntry(opCtx, catalogId); } - std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaData(OperationContext* opCtx, - RecordId catalogId) const; + std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaData( + OperationContext* opCtx, const RecordId& catalogId) const; void putMetaData(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, BSONCollectionCatalogEntry::MetaData& md); std::vector<std::string> getAllIdents(OperationContext* opCtx) const; @@ -112,7 +113,7 @@ public: bool allocateDefaultSpace); Status createIndex(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& nss, const CollectionOptions& collOptions, const IndexDescriptor* spec); @@ -124,11 +125,11 @@ public: const ImportOptions& importOptions) override; Status renameCollection(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& toNss, BSONCollectionCatalogEntry::MetaData& md); - Status dropCollection(OperationContext* opCtx, RecordId catalogId); + Status dropCollection(OperationContext* opCtx, const RecordId& catalogId); Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx, const NamespaceString& nss, @@ -137,15 +138,19 @@ public: StringData ident); bool isIndexMultikey(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, StringData indexName, MultikeyPaths* multikeyPaths) const; - int getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const; + int getTotalIndexCount(OperationContext* opCtx, const RecordId& catalogId) const; - bool isIndexPresent(OperationContext* opCtx, RecordId catalogId, StringData indexName) const; + bool isIndexPresent(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName) const; - bool isIndexReady(OperationContext* opCtx, RecordId catalogId, StringData indexName) const; + bool isIndexReady(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName) const; void setRand_forTest(const std::string& rand); @@ -158,7 +163,7 @@ private: friend class DurableCatalogImplTest; friend class StorageEngineTest; - BSONObj _findEntry(OperationContext* opCtx, RecordId catalogId) const; + BSONObj _findEntry(OperationContext* opCtx, const RecordId& catalogId) const; StatusWith<Entry> _addEntry(OperationContext* opCtx, NamespaceString nss, const CollectionOptions& options); @@ -166,10 +171,10 @@ private: NamespaceString nss, const BSONObj& metadata); Status _replaceEntry(OperationContext* opCtx, - RecordId catalogId, + const RecordId& catalogId, const NamespaceString& toNss, BSONCollectionCatalogEntry::MetaData& md); - Status _removeEntry(OperationContext* opCtx, RecordId catalogId); + Status _removeEntry(OperationContext* opCtx, const RecordId& catalogId); /** * Generates a new unique identifier for a new "thing". diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h index 2f6232459dc..fedc8437e25 100644 --- a/src/mongo/db/storage/index_entry_comparison.h +++ b/src/mongo/db/storage/index_entry_comparison.h @@ -92,7 +92,7 @@ inline bool operator!=(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) { * Represents KeyString struct containing a KeyString::Value and its RecordId */ struct KeyStringEntry { - KeyStringEntry(KeyString::Value ks, RecordId loc) : keyString(ks), loc(loc) { + KeyStringEntry(KeyString::Value ks, RecordId id) : keyString(ks), loc(std::move(id)) { if (!kDebugBuild) { return; } diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp index 154dd3a59fa..0ba7f9c35af 100644 --- a/src/mongo/db/storage/key_string.cpp +++ b/src/mongo/db/storage/key_string.cpp @@ -339,7 +339,7 @@ string readInvertedCStringWithNuls(BufReader* reader) { } // namespace template <class BufferT> -void BuilderBase<BufferT>::resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId) { +void BuilderBase<BufferT>::resetToKey(const BSONObj& obj, Ordering ord, const RecordId& recordId) { resetToEmpty(ord); _appendAllElementsForIndexing(obj, Discriminator::kInclusive); appendRecordId(recordId); @@ -551,7 +551,7 @@ void BuilderBase<BufferT>::_appendAllElementsForIndexing(const BSONObj& obj, } template <class BufferT> -void BuilderBase<BufferT>::appendRecordId(RecordId loc) { +void BuilderBase<BufferT>::appendRecordId(const RecordId& loc) { _doneAppending(); _transition(BuildState::kAppendedRecordID); loc.withFormat([](RecordId::Null n) { invariant(false); }, diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h index e1daa8881e0..b9173d5b876 100644 --- a/src/mongo/db/storage/key_string.h +++ b/src/mongo/db/storage/key_string.h @@ -504,7 +504,7 @@ public: explicit BuilderBase(Version version) : BuilderBase(version, ALL_ASCENDING, Discriminator::kInclusive) {} - BuilderBase(Version version, const BSONObj& obj, Ordering ord, RecordId recordId) + BuilderBase(Version version, const BSONObj& obj, Ordering ord, const RecordId& recordId) : BuilderBase(version, ord) { resetToKey(obj, ord, recordId); } @@ -527,7 +527,7 @@ public: resetFromBuffer(other.getBuffer(), other.getSize()); } - BuilderBase(Version version, RecordId rid) : BuilderBase(version) { + BuilderBase(Version version, const RecordId& rid) : BuilderBase(version) { appendRecordId(rid); } @@ -552,7 +552,7 @@ public: return {version, _buffer().len(), SharedBufferFragment(newBuf.release(), newBufLen)}; } - void appendRecordId(RecordId loc); + void appendRecordId(const RecordId& loc); void appendTypeBits(const TypeBits& bits); /* @@ -603,7 +603,7 @@ public: _transition(BuildState::kEmpty); } - void resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId); + void resetToKey(const BSONObj& obj, Ordering ord, const RecordId& recordId); void resetToKey(const BSONObj& obj, Ordering ord, Discriminator discriminator = Discriminator::kInclusive); diff --git a/src/mongo/db/storage/record_id_bm.cpp b/src/mongo/db/storage/record_id_bm.cpp index 8e830aa8999..be816b7e363 100644 --- a/src/mongo/db/storage/record_id_bm.cpp +++ b/src/mongo/db/storage/record_id_bm.cpp @@ -27,6 +27,8 @@ * it in the license file. */ +#include <random> + #include "mongo/platform/basic.h" #include "mongo/db/record_id.h" @@ -128,6 +130,32 @@ void BM_RecordIdFormatString(benchmark::State& state) { } } +template <typename V> +void BM_RecordIdSort(benchmark::State& state) { + std::mt19937_64 gen(1234); + std::uniform_int_distribution<uint64_t> dist; + int64_t last = 0; + struct KV { + uint64_t key; + V val; + }; + auto comp = [](const KV& left, const KV& right) { return left.key < right.key; }; + std::vector<KV> data; + + for (auto j = 0; j < state.range(0); ++j) + data.emplace_back(KV{dist(gen), V(++last)}); + for (auto _ : state) { + auto copy = data; + std::sort(copy.begin(), copy.end(), comp); + benchmark::ClobberMemory(); + } + state.SetItemsProcessed(data.size() * state.iterations()); + state.SetBytesProcessed(data.size() * state.iterations() * sizeof(KV)); +} + +BENCHMARK_TEMPLATE(BM_RecordIdSort, uint64_t)->RangeMultiplier(10)->Range(100, 100'000); +BENCHMARK_TEMPLATE(BM_RecordIdSort, RecordId)->RangeMultiplier(10)->Range(100, 100'000); + BENCHMARK(BM_RecordIdCopyLong); BENCHMARK(BM_RecordIdCopyOID); BENCHMARK(BM_RecordIdCopyMedString); diff --git a/src/mongo/db/storage/record_store.cpp b/src/mongo/db/storage/record_store.cpp index 194bf1f1641..152b4f952a6 100644 --- a/src/mongo/db/storage/record_store.cpp +++ b/src/mongo/db/storage/record_store.cpp @@ -76,7 +76,9 @@ Status RecordStore::truncate(OperationContext* opCtx) { return doTruncate(opCtx); } -void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) { +void RecordStore::cappedTruncateAfter(OperationContext* opCtx, + const RecordId& end, + bool inclusive) { validateWriteAllowed(opCtx); doCappedTruncateAfter(opCtx, end, inclusive); } diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index c4473b80c7e..d12c571a355 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -372,13 +372,16 @@ public: * A thin wrapper around insertRecords() to simplify handling of single document inserts. * If RecordId is null, the storage engine will generate one and return it. */ - StatusWith<RecordId> insertRecord( - OperationContext* opCtx, RecordId rid, const char* data, int len, Timestamp timestamp) { + StatusWith<RecordId> insertRecord(OperationContext* opCtx, + const RecordId& rid, + const char* data, + int len, + Timestamp timestamp) { std::vector<Record> inOutRecords{Record{rid, RecordData(data, len)}}; Status status = insertRecords(opCtx, &inOutRecords, std::vector<Timestamp>{timestamp}); if (!status.isOK()) return status; - return inOutRecords.front().id; + return std::move(inOutRecords.front().id); } /** @@ -458,7 +461,7 @@ public: * function. An assertion will be thrown if that is attempted. * @param inclusive - Truncate 'end' as well iff true */ - void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive); + void cappedTruncateAfter(OperationContext* opCtx, const RecordId& end, bool inclusive); /** * does this RecordStore support the compact operation? @@ -626,7 +629,9 @@ protected: const char* damageSource, const mutablebson::DamageVector& damages) = 0; virtual Status doTruncate(OperationContext* opCtx) = 0; - virtual void doCappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) = 0; + virtual void doCappedTruncateAfter(OperationContext* opCtx, + const RecordId& end, + bool inclusive) = 0; virtual Status doCompact(OperationContext* opCtx) { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp index 101a90bbe78..a756d8303ae 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp @@ -68,7 +68,7 @@ void mongo::removeFromIndex(OperationContext* opCtx, mongo::KeyString::Value mongo::makeKeyString(SortedDataInterface* sorted, BSONObj bsonKey, - boost::optional<RecordId> rid) { + const boost::optional<RecordId>& rid) { KeyString::Builder builder(sorted->getKeyStringVersion(), bsonKey, sorted->getOrdering()); if (rid) { builder.appendRecordId(*rid); diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h index 1e62e6df981..606f621e6dc 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.h +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h @@ -113,7 +113,7 @@ std::unique_ptr<SortedDataInterfaceHarnessHelper> newSortedDataInterfaceHarnessH KeyString::Value makeKeyString(SortedDataInterface* sorted, BSONObj bsonKey, - boost::optional<RecordId> rid = boost::none); + const boost::optional<RecordId>& rid = boost::none); KeyString::Value makeKeyStringForSeek(SortedDataInterface* sorted, BSONObj bsonKey, diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp index a84b6ca6061..320246d2f45 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp @@ -145,7 +145,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( opCtx->recoveryUnit()->abandonSnapshot(); return; } - const auto waitingFor = lastOplogRecord->id; + const auto& waitingFor = lastOplogRecord->id; // Close transaction before we wait. opCtx->recoveryUnit()->abandonSnapshot(); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 738b3f24a94..5481f840590 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -89,7 +89,8 @@ struct RecordIdAndWall { RecordId id; Date_t wall; - RecordIdAndWall(RecordId lastRecord, Date_t wallTime) : id(lastRecord), wall(wallTime) {} + RecordIdAndWall(RecordId lastRecord, Date_t wallTime) + : id(std::move(lastRecord)), wall(wallTime) {} }; WiredTigerRecordStore::CursorKey makeCursorKey(const RecordId& rid, KeyFormat format) { @@ -314,7 +315,7 @@ void WiredTigerRecordStore::OplogStones::popOldestStone() { } void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(OperationContext* opCtx, - RecordId lastRecord, + const RecordId& lastRecord, Date_t wallTime) { auto logFailedLockAcquisition = [&](const std::string& lock) { LOGV2_DEBUG(5384101, @@ -350,8 +351,8 @@ void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(OperationContext return; } - OplogStones::Stone stone(_currentRecords.swap(0), _currentBytes.swap(0), lastRecord, wallTime); - _stones.push_back(stone); + auto& stone = + _stones.emplace_back(_currentRecords.swap(0), _currentBytes.swap(0), lastRecord, wallTime); LOGV2_DEBUG(22381, 2, @@ -383,7 +384,7 @@ void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* o } void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter( - int64_t recordsRemoved, int64_t bytesRemoved, RecordId firstRemovedId) { + int64_t recordsRemoved, int64_t bytesRemoved, const RecordId& firstRemovedId) { stdx::lock_guard<Latch> lk(_mutex); int64_t numStonesToRemove = 0; @@ -1290,7 +1291,7 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT // Stash the truncate point for next time to cleanly skip over tombstones, etc. _oplogStones->firstRecord = stone->lastRecord; - _oplogFirstRecord = stone->lastRecord; + _oplogFirstRecord = std::move(stone->lastRecord); } catch (const WriteConflictException&) { LOGV2_DEBUG( 22400, 1, "Caught WriteConflictException while truncating oplog entries, retrying"); @@ -1347,7 +1348,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, record_id_helpers::extractKeyOptime(record.data.data(), record.data.size()); if (!status.isOK()) return status.getStatus(); - record.id = status.getValue(); + record.id = std::move(status.getValue()); } else { // Some RecordStores, like TemporaryRecordStores, may want to set their own // RecordIds. @@ -2066,7 +2067,7 @@ void WiredTigerRecordStore::setDataSize(long long dataSize) { } void WiredTigerRecordStore::doCappedTruncateAfter(OperationContext* opCtx, - RecordId end, + const RecordId& end, bool inclusive) { std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, true); @@ -2082,7 +2083,7 @@ void WiredTigerRecordStore::doCappedTruncateAfter(OperationContext* opCtx, std::unique_ptr<SeekableRecordCursor> reverseCursor = getCursor(opCtx, false); invariant(reverseCursor->seekExact(end)); auto prev = reverseCursor->next(); - lastKeptId = prev ? prev->id : RecordId(); + lastKeptId = prev ? std::move(prev->id) : RecordId(); firstRemovedId = end; } else { // If not deleting the record located at 'end', then advance the cursor to the first record @@ -2394,7 +2395,8 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekNear(const RecordId _lastReturnedId = curId; _eof = false; - return {{curId, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}}; + return { + {std::move(curId), {static_cast<const char*>(value.data), static_cast<int>(value.size)}}}; } void WiredTigerRecordStoreCursorBase::save() { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index 3bb07432f71..69a2340a5c8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -200,7 +200,7 @@ public: BSONObjBuilder* result, double scale) const; - void doCappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) final; + void doCappedTruncateAfter(OperationContext* opCtx, const RecordId& end, bool inclusive) final; virtual void updateStatsAfterRepair(OperationContext* opCtx, long long numRecords, diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h index f0440a0390c..50bd30a7109 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h @@ -52,7 +52,10 @@ public: Date_t wallTime; // Walltime of when this chunk of the oplog was created. Stone(int64_t records, int64_t bytes, RecordId lastRecord, Date_t wallTime) - : records(records), bytes(bytes), lastRecord(lastRecord), wallTime(wallTime) {} + : records(records), + bytes(bytes), + lastRecord(std::move(lastRecord)), + wallTime(wallTime) {} }; OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs); @@ -77,7 +80,9 @@ public: void popOldestStone(); - void createNewStoneIfNeeded(OperationContext* opCtx, RecordId lastRecord, Date_t wallTime); + void createNewStoneIfNeeded(OperationContext* opCtx, + const RecordId& lastRecord, + Date_t wallTime); void updateCurrentStoneAfterInsertOnCommit(OperationContext* opCtx, int64_t bytesInserted, @@ -89,7 +94,7 @@ public: // Updates the metadata about the oplog stones after a rollback occurs. void updateStonesAfterCappedTruncateAfter(int64_t recordsRemoved, int64_t bytesRemoved, - RecordId firstRemovedId); + const RecordId& firstRemovedId); // Resize oplog size void adjust(int64_t maxSize); |