diff options
Diffstat (limited to 'src')
38 files changed, 597 insertions, 271 deletions
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index ff8db42a8f8..774d0f347db 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -704,8 +704,8 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx, int recordIndex = 0; for (auto it = begin; it != end; it++) { RecordId loc = records[recordIndex++].id; - invariant(RecordId::min() < loc); - invariant(loc < RecordId::max()); + invariant(RecordId::min<int64_t>() < loc); + invariant(loc < RecordId::max<int64_t>()); BsonRecord bsonRecord = {loc, Timestamp(it->oplogSlot.getTimestamp()), &(it->doc)}; bsonRecords.push_back(bsonRecord); diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp index 634659d959e..fc5b2f5feee 100644 --- a/src/mongo/db/catalog/index_consistency.cpp +++ b/src/mongo/db/catalog/index_consistency.cpp @@ -493,10 +493,10 @@ BSONObj IndexConsistency::_generateInfo(const std::string& indexName, if (!idKey.isEmpty()) { invariant(idKey.nFields() == 1); - return BSON("indexName" << indexName << "recordId" << recordId.repr() << "idKey" << idKey - << "indexKey" << rehydratedKey); + return BSON("indexName" << indexName << "recordId" << recordId.as<int64_t>() << "idKey" + << idKey << "indexKey" << rehydratedKey); } else { - return BSON("indexName" << indexName << "recordId" << recordId.repr() << "indexKey" + return BSON("indexName" << indexName << "recordId" << recordId.as<int64_t>() << "indexKey" << rehydratedKey); } } diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index f0c46930775..cf465b46b0d 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -913,7 +913,7 @@ BSONObj MultiIndexBlock::_constructStateObject(OperationContext* opCtx, // We can be interrupted by shutdown before inserting the first document from the collection // scan, in which case there is no _lastRecordIdInserted. if (_phase == IndexBuildPhaseEnum::kCollectionScan && _lastRecordIdInserted) - builder.append("collectionScanPosition", _lastRecordIdInserted->repr()); + builder.append("collectionScanPosition", _lastRecordIdInserted->as<int64_t>()); BSONArrayBuilder indexesArray(builder.subarrayStart("indexes")); for (const auto& index : _indexes) { diff --git a/src/mongo/db/catalog/throttle_cursor.cpp b/src/mongo/db/catalog/throttle_cursor.cpp index 8198391c1c8..5fcd98435d2 100644 --- a/src/mongo/db/catalog/throttle_cursor.cpp +++ b/src/mongo/db/catalog/throttle_cursor.cpp @@ -54,7 +54,7 @@ boost::optional<Record> SeekableRecordThrottleCursor::seekExact(OperationContext const RecordId& id) { boost::optional<Record> record = _cursor->seekExact(id); if (record) { - const int64_t dataSize = record->data.size() + sizeof(record->id.repr()); + const int64_t dataSize = record->data.size() + sizeof(record->id); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } @@ -64,7 +64,7 @@ boost::optional<Record> SeekableRecordThrottleCursor::seekExact(OperationContext boost::optional<Record> SeekableRecordThrottleCursor::next(OperationContext* opCtx) { boost::optional<Record> record = _cursor->next(); if (record) { - const int64_t dataSize = record->data.size() + sizeof(record->id.repr()); + const int64_t dataSize = record->data.size() + sizeof(record->id); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } @@ -82,7 +82,7 @@ boost::optional<IndexKeyEntry> SortedDataInterfaceThrottleCursor::seek( OperationContext* opCtx, const KeyString::Value& key) { boost::optional<IndexKeyEntry> entry = _cursor->seek(key); if (entry) { - const int64_t dataSize = entry->key.objsize() + sizeof(entry->loc.repr()); + const int64_t dataSize = entry->key.objsize() + sizeof(entry->loc); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } @@ -93,7 +93,7 @@ boost::optional<KeyStringEntry> SortedDataInterfaceThrottleCursor::seekForKeyStr OperationContext* opCtx, const KeyString::Value& key) { boost::optional<KeyStringEntry> entry = _cursor->seekForKeyString(key); if (entry) { - const int64_t dataSize = entry->keyString.getSize() + sizeof(entry->loc.repr()); + const int64_t dataSize = entry->keyString.getSize() + sizeof(entry->loc); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } @@ -103,7 +103,7 @@ boost::optional<KeyStringEntry> SortedDataInterfaceThrottleCursor::seekForKeyStr boost::optional<IndexKeyEntry> SortedDataInterfaceThrottleCursor::next(OperationContext* opCtx) { boost::optional<IndexKeyEntry> entry = _cursor->next(); if (entry) { - const int64_t dataSize = entry->key.objsize() + sizeof(entry->loc.repr()); + const int64_t dataSize = entry->key.objsize() + sizeof(entry->loc); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } @@ -114,7 +114,7 @@ boost::optional<KeyStringEntry> SortedDataInterfaceThrottleCursor::nextKeyString OperationContext* opCtx) { boost::optional<KeyStringEntry> entry = _cursor->nextKeyString(); if (entry) { - const int64_t dataSize = entry->keyString.getSize() + sizeof(entry->loc.repr()); + const int64_t dataSize = entry->keyString.getSize() + sizeof(entry->loc); _dataThrottle->awaitIfNeeded(opCtx, dataSize); } diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp index 461968d68ff..ada9feeb27c 100644 --- a/src/mongo/db/catalog/validate_adaptor.cpp +++ b/src/mongo/db/catalog/validate_adaptor.cpp @@ -300,7 +300,7 @@ void ValidateAdaptor::traverseIndex(OperationContext* opCtx, } const RecordId kWildcardMultikeyMetadataRecordId{ - RecordId::ReservedId::kWildcardMultikeyMetadataId}; + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)}; if (descriptor->getIndexType() == IndexType::INDEX_WILDCARD && indexEntry->loc == kWildcardMultikeyMetadataRecordId) { _indexConsistency->removeMultikeyMetadataPath(indexEntry->keyString, &indexInfo); diff --git a/src/mongo/db/catalog/validate_results.cpp b/src/mongo/db/catalog/validate_results.cpp index 820ddee062e..853ea874748 100644 --- a/src/mongo/db/catalog/validate_results.cpp +++ b/src/mongo/db/catalog/validate_results.cpp @@ -45,7 +45,7 @@ void ValidateResults::appendToResultObj(BSONObjBuilder* resultObj, bool debuggin // Need to convert RecordId to int64_t to append to BSONObjBuilder BSONArrayBuilder builder; for (RecordId corruptRecord : corruptRecords) { - builder.append(corruptRecord.repr()); + builder.append(corruptRecord.as<int64_t>()); } resultObj->append("corruptRecords", builder.arr()); diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h index ac9681af48e..8b19ce56ab2 100644 --- a/src/mongo/db/exec/collection_scan.h +++ b/src/mongo/db/exec/collection_scan.h @@ -77,7 +77,7 @@ public: BSONObj getPostBatchResumeToken() const { // Return a resume token compatible with resumable initial sync. if (_params.requestResumeToken) { - return BSON("$recordId" << _lastSeenId.repr()); + return BSON("$recordId" << _lastSeenId.as<int64_t>()); } // Return a resume token compatible with resharding oplog sync. if (_params.shouldTrackLatestOplogTimestamp) { diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp index 2fe95dc3980..7a5e376ec2f 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp @@ -285,7 +285,7 @@ PlanState IndexScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(_nextRecord->loc.repr())); + value::bitcastFrom<int64_t>(_nextRecord->loc.as<int64_t>())); } if (_accessors.size()) { diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp index 69d35aa62bd..d03a591394c 100644 --- a/src/mongo/db/exec/sbe/stages/scan.cpp +++ b/src/mongo/db/exec/sbe/stages/scan.cpp @@ -230,7 +230,7 @@ PlanState ScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(nextRecord->id.repr())); + value::bitcastFrom<int64_t>(nextRecord->id.as<int64_t>())); } if (!_fieldAccessors.empty()) { @@ -553,7 +553,7 @@ PlanState ParallelScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(nextRecord->id.repr())); + value::bitcastFrom<int64_t>(nextRecord->id.as<int64_t>())); } diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp index c88775669df..c07beee9361 100644 --- a/src/mongo/db/exec/working_set.cpp +++ b/src/mongo/db/exec/working_set.cpp @@ -226,7 +226,7 @@ void WorkingSetMember::serialize(BufBuilder& buf) const { } if (hasRecordId()) { - buf.appendNum(recordId.repr()); + buf.appendNum(recordId.as<int64_t>()); } _metadata.serializeForSorter(buf); diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp index a66d81e5b6a..13af0b45381 100644 --- a/src/mongo/db/exec/working_set_test.cpp +++ b/src/mongo/db/exec/working_set_test.cpp @@ -218,7 +218,7 @@ TEST_F(WorkingSetFixture, RecordIdAndObjStateCanRoundtripThroughSerialization) { ASSERT_EQ(WorkingSetMember::RID_AND_OBJ, roundtripped.getState()); ASSERT_DOCUMENT_EQ(roundtripped.doc.value(), doc); ASSERT_EQ(roundtripped.doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(roundtripped.recordId.repr(), 43); + ASSERT_EQ(roundtripped.recordId.as<int64_t>(), 43); ASSERT_FALSE(roundtripped.metadata()); } @@ -244,7 +244,7 @@ TEST_F(WorkingSetFixture, RecordIdAndIdxStateCanRoundtripThroughSerialization) { auto roundtripped = roundtripWsmThroughSerialization(*member); ASSERT_EQ(WorkingSetMember::RID_AND_IDX, roundtripped.getState()); - ASSERT_EQ(roundtripped.recordId.repr(), 43); + ASSERT_EQ(roundtripped.recordId.as<int64_t>(), 43); ASSERT_EQ(roundtripped.keyData.size(), 2u); ASSERT_BSONOBJ_EQ(roundtripped.keyData[0].indexKeyPattern, BSON("a" << 1 << "b" << 1)); @@ -298,7 +298,7 @@ TEST_F(WorkingSetFixture, WsmCanBeExtractedAndReinserted) { ASSERT_EQ(extractedWsm.getState(), WorkingSetMember::RID_AND_OBJ); ASSERT_DOCUMENT_EQ(extractedWsm.doc.value(), doc); ASSERT_EQ(extractedWsm.doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(extractedWsm.recordId.repr(), 43); + ASSERT_EQ(extractedWsm.recordId.as<int64_t>(), 43); ASSERT_FALSE(extractedWsm.metadata()); auto emplacedId = ws->emplace(std::move(extractedWsm)); @@ -309,7 +309,7 @@ TEST_F(WorkingSetFixture, WsmCanBeExtractedAndReinserted) { ASSERT_EQ(emplacedWsm->getState(), WorkingSetMember::RID_AND_OBJ); ASSERT_DOCUMENT_EQ(emplacedWsm->doc.value(), doc); ASSERT_EQ(emplacedWsm->doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(emplacedWsm->recordId.repr(), 43); + ASSERT_EQ(emplacedWsm->recordId.as<int64_t>(), 43); ASSERT_FALSE(emplacedWsm->metadata()); } diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index 4de61819671..3a53bcf0662 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -71,7 +71,7 @@ namespace { // Reserved RecordId against which multikey metadata keys are indexed. static const RecordId kMultikeyMetadataKeyId = - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}; + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId); /** * Returns true if at least one prefix of any of the indexed fields causes the index to be diff --git a/src/mongo/db/index/skipped_record_tracker.cpp b/src/mongo/db/index/skipped_record_tracker.cpp index 1a825164ced..88e51bae118 100644 --- a/src/mongo/db/index/skipped_record_tracker.cpp +++ b/src/mongo/db/index/skipped_record_tracker.cpp @@ -67,7 +67,7 @@ void SkippedRecordTracker::finalizeTemporaryTable(OperationContext* opCtx, } void SkippedRecordTracker::record(OperationContext* opCtx, const RecordId& recordId) { - auto toInsert = BSON(kRecordIdField << recordId.repr()); + auto toInsert = BSON(kRecordIdField << recordId.as<int64_t>()); // Lazily initialize table when we record the first document. if (!_skippedRecordsTable) { diff --git a/src/mongo/db/index/wildcard_key_generator.cpp b/src/mongo/db/index/wildcard_key_generator.cpp index 6c9b1305e1b..0eddf567007 100644 --- a/src/mongo/db/index/wildcard_key_generator.cpp +++ b/src/mongo/db/index/wildcard_key_generator.cpp @@ -246,7 +246,8 @@ void WildcardKeyGenerator::_addMultiKey(SharedBufferFragmentBuilder& pooledBuffe _keyStringVersion, key, _ordering, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + RecordId{RecordId::reservedIdFor<int64_t>( + RecordId::Reservation::kWildcardMultikeyMetadataId)}); multikeyPaths->push_back(keyString.release()); } } diff --git a/src/mongo/db/index/wildcard_key_generator_test.cpp b/src/mongo/db/index/wildcard_key_generator_test.cpp index beb53dcb3e6..5f37133f72c 100644 --- a/src/mongo/db/index/wildcard_key_generator_test.cpp +++ b/src/mongo/db/index/wildcard_key_generator_test.cpp @@ -163,12 +163,12 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ShouldIndexNonNestedEmptyArrayAsUnd fromjson("{'': 'b.c', '': undefined}"), fromjson("{'': 'd', '': []}"), fromjson("{'': 'd.e', '': undefined}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'b.c'}"), - fromjson("{'': 1, '': 'd'}"), - fromjson("{'': 1, '': 'd.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'b.c'}"), + fromjson("{'': 1, '': 'd'}"), + fromjson("{'': 1, '': 'd.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -192,9 +192,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPath) { fromjson("{'': 'a.c', '': 2}"), fromjson("{'': 'a.d', '': 3}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -218,9 +218,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPathAndDedupKeys) { fromjson("{'': 'a.c', '': 2}"), fromjson("{'': 'a.d', '': 3}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -245,9 +245,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractZeroElementMultikeyPath) { fromjson("{'': 'a.d', '': 3}"), fromjson("{'': 'e', '': undefined}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -281,9 +281,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractNestedMultikeyPaths) { fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -321,12 +321,12 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMixedPathTypesAndAllSubpaths fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'a.e'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'a.e'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -356,9 +356,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithSinglePathCompon fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -385,9 +385,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithMultiplePathComp fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -412,9 +412,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractMultikeySubtree) { fromjson("{'': 'g.h.j.k', '': 11}"), fromjson("{'': 'g.h.j.k', '': 11.5}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -441,9 +441,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractNestedMultikeySubtree) { fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -473,9 +473,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionSingleSubtree) { fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -502,9 +502,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedSubtree) { fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -529,9 +529,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultikeySubtree) { fromjson("{'': 'g.h.j.k', '': 11}"), fromjson("{'': 'g.h.j.k', '': 11.5}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -556,9 +556,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedMultikeySubtr fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -586,9 +586,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultipleSubtrees) { fromjson("{'': 'a.e', '': 5}"), fromjson("{'': 'g.h.i', '': 9}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -623,9 +623,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionSingleSubtree) { fromjson("{'': 'a.e', '': 5}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -658,9 +658,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedSubtree) { fromjson("{'': 'g', '': {}}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -694,9 +694,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultikeySubtree) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -731,11 +731,11 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedMultikeySubtr fromjson("{'': 'g.h.k', '': 12}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -767,11 +767,11 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultipleSubtrees) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -801,9 +801,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfProjectionIsEmpty) { fromjson("{'': 'g.h.i', '': 9}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -828,9 +828,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldForSingleSubtreeKeyPattern) { fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -900,9 +900,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldByDefaultForInclusionProjection fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -975,9 +975,9 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdSubfieldExclusionInExplicitProjection fromjson("{'': 'g.h.i', '': 9}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1004,9 +1004,9 @@ TEST_F(WildcardKeyGeneratorIdTest, IncludeIdFieldIfExplicitlySpecifiedInProjecti fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1031,9 +1031,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfExplicitlySpecifiedInProjecti fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1107,12 +1107,12 @@ TEST_F(WildcardKeyGeneratorCollationTest, CollationMixedPathAndKeyTypes) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'gnirts'}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'a.e'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'a.e'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1140,9 +1140,9 @@ TEST_F(WildcardKeyGeneratorDottedFieldsTest, DoNotIndexDottedFields) { fromjson("{'': 'b', '': [{'a.b': 9}]}"), fromjson("{'': 'c', '': 10}")}); - auto expectedMultikeyPaths = - makeKeySet({fromjson("{'': 1, '': 'b'}")}, - RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}); + auto expectedMultikeyPaths = makeKeySet( + {fromjson("{'': 1, '': 'b'}")}, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp index 217005d070e..d86b50c527a 100644 --- a/src/mongo/db/pipeline/expression.cpp +++ b/src/mongo/db/pipeline/expression.cpp @@ -2807,7 +2807,7 @@ Value ExpressionMeta::evaluate(const Document& root, Variables* variables) const static_assert(RecordId::kMinRepr >= std::numeric_limits<long long>::min()); static_assert(RecordId::kMaxRepr <= std::numeric_limits<long long>::max()); return metadata.hasRecordId() - ? Value{static_cast<long long>(metadata.getRecordId().repr())} + ? Value{static_cast<long long>(metadata.getRecordId().as<int64_t>())} : Value(); case MetaType::kIndexKey: return metadata.hasIndexKey() ? Value(metadata.getIndexKey()) : Value(); diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp index 39142a914b5..1484f62cd73 100644 --- a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp +++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp @@ -189,8 +189,9 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> generateOptimizedOplo sbe::makeS<sbe::CoScanStage>(csn->nodeId()), 1, boost::none, csn->nodeId()), csn->nodeId(), *seekRecordIdSlot, - sbe::makeE<sbe::EConstant>(sbe::value::TypeTags::RecordId, - sbe::value::bitcastFrom<int64_t>(seekRecordId->repr()))), + sbe::makeE<sbe::EConstant>( + sbe::value::TypeTags::RecordId, + sbe::value::bitcastFrom<int64_t>(seekRecordId->as<int64_t>()))), std::move(stage), sbe::makeSV(), sbe::makeSV(*seekRecordIdSlot), @@ -366,7 +367,7 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> generateGenericCollSc seekSlot, sbe::makeE<sbe::EConstant>( sbe::value::TypeTags::RecordId, - sbe::value::bitcastFrom<int64_t>(csn->resumeAfterRecordId->repr()))); + sbe::value::bitcastFrom<int64_t>(csn->resumeAfterRecordId->as<int64_t>()))); // Construct a 'seek' branch of the 'union'. If we're succeeded to reposition the cursor, // the branch will output the 'seekSlot' to start the real scan from, otherwise it will diff --git a/src/mongo/db/query/wildcard_multikey_paths.cpp b/src/mongo/db/query/wildcard_multikey_paths.cpp index 746925cf7ff..d3ca935e8e9 100644 --- a/src/mongo/db/query/wildcard_multikey_paths.cpp +++ b/src/mongo/db/query/wildcard_multikey_paths.cpp @@ -42,8 +42,9 @@ namespace mongo { */ static FieldRef extractMultikeyPathFromIndexKey(const IndexKeyEntry& entry) { invariant(entry.loc.isReserved()); - invariant(entry.loc.repr() == - static_cast<int64_t>(RecordId::ReservedId::kWildcardMultikeyMetadataId)); + invariant(entry.loc.as<int64_t>() == + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId) + .as<int64_t>()); // Validate that the first piece of the key is the integer 1. BSONObjIterator iter(entry.key); diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h index ac816a44291..78953f29d6d 100644 --- a/src/mongo/db/record_id.h +++ b/src/mongo/db/record_id.h @@ -37,69 +37,148 @@ #include <ostream> #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/builder.h" #include "mongo/util/bufreader.h" namespace mongo { + /** * The key that uniquely identifies a Record in a Collection or RecordStore. */ class RecordId { public: - // This set of constants define the boundaries of the 'normal' and 'reserved' id ranges. - static constexpr int64_t kNullRepr = 0; + // This set of constants define the boundaries of the 'normal' and 'reserved' id ranges for + // the kLong format. static constexpr int64_t kMinRepr = LLONG_MIN; static constexpr int64_t kMaxRepr = LLONG_MAX; static constexpr int64_t kMinReservedRepr = kMaxRepr - (1024 * 1024); + // OID Constants + static constexpr unsigned char kMinOID[OID::kOIDSize] = {0x00}; + static constexpr unsigned char kMaxOID[OID::kOIDSize] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + // This reserved range leaves 2^20 possible reserved values. + static constexpr unsigned char kMinReservedOID[OID::kOIDSize] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00, 0x00}; + /** - * Enumerates all ids in the reserved range that have been allocated for a specific purpose. + * A RecordId that compares less than all ids for a given data format. */ - enum class ReservedId : int64_t { kWildcardMultikeyMetadataId = kMinReservedRepr }; + template <typename T> + static RecordId min() { + if constexpr (std::is_same_v<T, int64_t>) { + return RecordId(kMinRepr); + } else { + static_assert(std::is_same_v<T, OID>, "Unsupported RecordID format"); + return RecordId(OID(kMinOID)); + } + } /** - * Constructs a Null RecordId. + * A RecordId that compares greater than all ids that represent documents in a collection. */ - RecordId() : _repr(kNullRepr) {} - - explicit RecordId(int64_t repr) : _repr(repr) {} + template <typename T> + static RecordId max() { + if constexpr (std::is_same_v<T, int64_t>) { + return RecordId(kMaxRepr); + } else { + static_assert(std::is_same_v<T, OID>, "Unsupported RecordID format"); + return RecordId(OID(kMaxOID)); + } + } - explicit RecordId(ReservedId repr) : RecordId(static_cast<int64_t>(repr)) {} + /** + * Returns the first record in the reserved id range at the top of the RecordId space. + */ + template <typename T> + static RecordId minReserved() { + if constexpr (std::is_same_v<T, int64_t>) { + return RecordId(kMinReservedRepr); + } else { + static_assert(std::is_same_v<T, OID>, "Unsupported RecordID format"); + return RecordId(OID(kMinReservedOID)); + } + } /** - * Construct a RecordId from two halves. - * TODO consider removing. + * Enumerates all reserved ids that have been allocated for a specific purpose. + * The underlying value of the reserved Record ID is data-format specific and must be retrieved + * by the getReservedId() helper. */ - RecordId(int high, int low) : _repr((uint64_t(high) << 32) | uint32_t(low)) {} + enum class Reservation { kWildcardMultikeyMetadataId }; /** - * A RecordId that compares less than all ids that represent documents in a collection. + * Returns the reserved RecordId value for a given Reservation. */ - static RecordId min() { - return RecordId(kMinRepr); + template <typename T> + static RecordId reservedIdFor(Reservation res) { + // There is only one reservation at the moment. + invariant(res == Reservation::kWildcardMultikeyMetadataId); + if constexpr (std::is_same_v<T, int64_t>) { + return RecordId(kMinReservedRepr); + } else { + static_assert(std::is_same_v<T, OID>, "Unsupported RecordID format"); + return RecordId(OID(kMinReservedOID)); + } } + RecordId() : _format(Format::kNull) {} + explicit RecordId(int64_t repr) : _storage(repr), _format(Format::kLong) {} + explicit RecordId(const OID& oid) : _storage(oid), _format(Format::kOid) {} + /** - * A RecordId that compares greater than all ids that represent documents in a collection. + * Construct a RecordId from two halves. */ - static RecordId max() { - return RecordId(kMaxRepr); - } + RecordId(int high, int low) : RecordId((uint64_t(high) << 32) | uint32_t(low)) {} + + /** Tag for dispatching on null values */ + class Null {}; /** - * Returns the first record in the reserved id range at the top of the RecordId space. + * Helpers to dispatch based on the underlying type. */ - static RecordId minReserved() { - return RecordId(kMinReservedRepr); + template <typename OnNull, typename OnLong, typename OnOid> + auto withFormat(OnNull&& onNull, OnLong&& onLong, OnOid&& onOid) const { + switch (_format) { + case Format::kNull: + return onNull(Null()); + case Format::kLong: + return onLong(_storage._long); + case Format::kOid: + return onOid(_storage._oid); + default: + MONGO_UNREACHABLE; + } } - bool isNull() const { - return _repr == 0; + /** + * Returns the underlying data for a given format. Will invariant if the RecordId is not storing + * requested format. + */ + template <typename T> + T as() const { + if constexpr (std::is_same_v<T, int64_t>) { + // In the the int64_t format, null can also be represented by '0'. + if (_format == Format::kNull) { + return 0; + } + invariant(_format == Format::kLong); + return _storage._long; + } else { + static_assert(std::is_same_v<T, OID>, "Unsupported RecordID format"); + invariant(_format == Format::kOid); + return _storage._oid; + } } - int64_t repr() const { - return _repr; + bool isNull() const { + // In the the int64_t format, null can also represented by '0'. + if (_format == Format::kLong) { + return _storage._long == 0; + } + return _format == Format::kNull; } /** @@ -116,18 +195,55 @@ public: * excluding the reserved range at the top of the RecordId space. */ bool isNormal() const { - return _repr > 0 && _repr < kMinReservedRepr; + return withFormat([](Null n) { return false; }, + [](int64_t rid) { return rid > 0 && rid < kMinReservedRepr; }, + [](const OID& oid) { return oid.compare(OID(kMinReservedOID)) < 0; }); } /** * Returns true if this RecordId falls within the reserved range at the top of the record space. */ bool isReserved() const { - return _repr >= kMinReservedRepr && _repr < kMaxRepr; + return withFormat([](Null n) { return false; }, + [](int64_t rid) { return rid >= kMinReservedRepr && rid < kMaxRepr; }, + [](const OID& oid) { + return oid.compare(OID(kMinReservedOID)) >= 0 && + oid.compare(OID(kMaxOID)) < 0; + }); + } + + int compare(const RecordId& rhs) const { + // Null always compares less than every other RecordId. + if (isNull() && rhs.isNull()) { + return 0; + } else if (isNull()) { + return -1; + } else if (rhs.isNull()) { + return 1; + } + invariant(_format == rhs._format); + return withFormat([](Null n) { return 0; }, + [&](const int64_t rid) { + return rid == rhs._storage._long ? 0 + : rid < rhs._storage._long ? -1 : 1; + }, + [&](const OID& oid) { return oid.compare(rhs._storage._oid); }); + } + + size_t hash() const { + size_t hash = 0; + withFormat([](Null n) {}, + [&](int64_t rid) { boost::hash_combine(hash, rid); }, + [&](const OID& oid) { + boost::hash_combine(hash, std::string(oid.view().view(), OID::kOIDSize)); + }); + return hash; } - int compare(RecordId rhs) const { - return _repr == rhs._repr ? 0 : _repr < rhs._repr ? -1 : 1; + std::string toString() const { + return withFormat([](Null n) { return std::string("null"); }, + [](int64_t rid) { return std::to_string(rid); }, + [](const OID& oid) { return oid.toString(); }); } /** @@ -135,70 +251,86 @@ public: * may differ across platforms. Hash values should not be persisted. */ struct Hasher { - size_t operator()(RecordId rid) const { - size_t hash = 0; - // TODO consider better hashes - boost::hash_combine(hash, rid.repr()); - return hash; + size_t operator()(const RecordId& rid) const { + return rid.hash(); } }; - /// members for Sorter - struct SorterDeserializeSettings {}; // unused - void serializeForSorter(BufBuilder& buf) const { - buf.appendNum(static_cast<long long>(_repr)); - } - static RecordId deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) { - return RecordId(buf.read<LittleEndian<int64_t>>()); - } - int memUsageForSorter() const { - return sizeof(RecordId); - } - RecordId getOwned() const { - return *this; - } - void serialize(fmt::memory_buffer& buffer) const { - fmt::format_to(buffer, "RecordId({})", _repr); + withFormat([&](Null n) { fmt::format_to(buffer, "RecordId(null)"); }, + [&](int64_t rid) { fmt::format_to(buffer, "RecordId({})", rid); }, + [&](const OID& oid) { fmt::format_to(buffer, "RecordId({})", oid.toString()); }); } void serialize(BSONObjBuilder* builder) const { - builder->append("RecordId"_sd, _repr); + withFormat([&](Null n) { builder->append("RecordId", "null"); }, + [&](int64_t rid) { builder->append("RecordId"_sd, rid); }, + [&](const OID& oid) { builder->append("RecordId"_sd, oid); }); } private: - int64_t _repr; + /** + * Specifies the storage format of this RecordId. + */ + enum class Format : uint32_t { + /** Contains no value */ + kNull, + /** int64_t */ + kLong, + /** OID = char[12] */ + kOid + }; + +// Pack our union so that it only uses 12 bytes. The union will default to a 8 byte alignment, +// making it 16 bytes total with 4 bytes of padding. Instead, we force the union to use a 4 byte +// alignment, so it packs into 12 bytes. This leaves 4 bytes for our Format, allowing the RecordId +// to use 16 bytes total. +#pragma pack(push, 4) + union Storage { + // Format::kLong + int64_t _long; + // Format::kOid + OID _oid; + + Storage() {} + Storage(int64_t s) : _long(s) {} + Storage(const OID& s) : _oid(s) {} + }; +#pragma pack(pop) + + Storage _storage; + Format _format; }; inline bool operator==(RecordId lhs, RecordId rhs) { - return lhs.repr() == rhs.repr(); + return lhs.compare(rhs) == 0; } inline bool operator!=(RecordId lhs, RecordId rhs) { - return lhs.repr() != rhs.repr(); + return lhs.compare(rhs); } inline bool operator<(RecordId lhs, RecordId rhs) { - return lhs.repr() < rhs.repr(); + return lhs.compare(rhs) < 0; } inline bool operator<=(RecordId lhs, RecordId rhs) { - return lhs.repr() <= rhs.repr(); + return lhs.compare(rhs) <= 0; } inline bool operator>(RecordId lhs, RecordId rhs) { - return lhs.repr() > rhs.repr(); + return lhs.compare(rhs) > 0; } inline bool operator>=(RecordId lhs, RecordId rhs) { - return lhs.repr() >= rhs.repr(); + return lhs.compare(rhs) >= 0; } inline StringBuilder& operator<<(StringBuilder& stream, const RecordId& id) { - return stream << "RecordId(" << id.repr() << ')'; + return stream << "RecordId(" << id.toString() << ')'; } inline std::ostream& operator<<(std::ostream& stream, const RecordId& id) { - return stream << "RecordId(" << id.repr() << ')'; + return stream << "RecordId(" << id.toString() << ')'; } inline std::ostream& operator<<(std::ostream& stream, const boost::optional<RecordId>& id) { - return stream << "RecordId(" << (id ? id.get().repr() : 0) << ')'; + return stream << "RecordId(" << (id ? id->toString() : 0) << ')'; } } // namespace mongo diff --git a/src/mongo/db/record_id_test.cpp b/src/mongo/db/record_id_test.cpp index 8a0e1100ad6..8a2649f6d3e 100644 --- a/src/mongo/db/record_id_test.cpp +++ b/src/mongo/db/record_id_test.cpp @@ -31,6 +31,7 @@ #include "mongo/db/record_id.h" +#include "mongo/unittest/death_test.h" #include "mongo/unittest/unittest.h" namespace mongo { @@ -45,6 +46,15 @@ TEST(RecordId, HashEqual) { ASSERT_EQUALS(hasher(locA), hasher(locB)); } +TEST(RecordId, HashEqualOid) { + RecordId locA(OID::gen()); + RecordId locB; + locB = locA; + ASSERT_EQUALS(locA, locB); + RecordId::Hasher hasher; + ASSERT_EQUALS(hasher(locA), hasher(locB)); +} + TEST(RecordId, HashNotEqual) { RecordId original(1, 2); RecordId diffFile(10, 2); @@ -65,5 +75,97 @@ TEST(RecordId, HashNotEqual) { ASSERT_NOT_EQUALS(hasher(original), hasher(reversed)); } +TEST(RecordId, HashNotEqualOid) { + RecordId loc1(OID::gen()); + RecordId loc2(OID::gen()); + RecordId loc3(OID::gen()); + ASSERT_NOT_EQUALS(loc1, loc2); + ASSERT_NOT_EQUALS(loc1, loc3); + ASSERT_NOT_EQUALS(loc2, loc3); + + // Unequal DiskLocs need not produce unequal hashes. But unequal hashes are likely, and + // assumed here for sanity checking of the custom hash implementation. + RecordId::Hasher hasher; + ASSERT_NOT_EQUALS(hasher(loc1), hasher(loc2)); + ASSERT_NOT_EQUALS(hasher(loc1), hasher(loc3)); + ASSERT_NOT_EQUALS(hasher(loc2), hasher(loc3)); +} + +TEST(RecordId, OidTest) { + RecordId ridNull; + ASSERT(ridNull.isNull()); + ASSERT(!ridNull.isReserved()); + ASSERT(!ridNull.isValid()); + ASSERT(!ridNull.isNormal()); + + RecordId null2; + ASSERT(null2 == ridNull); + + OID oid1 = OID::gen(); + RecordId rid1(oid1); + ASSERT(rid1.isNormal()); + ASSERT(!rid1.isReserved()); + ASSERT(rid1.isValid()); + ASSERT_EQ(rid1.as<OID>(), oid1); + ASSERT_GT(rid1, ridNull); + ASSERT_LT(ridNull, rid1); +} + +TEST(RecordId, NullTest) { + // The int64 format should be considered null if its value is 0. Likewise, the value should be + // interpreted as int64_t(0) if it is null. + RecordId nullRid(0); + ASSERT(nullRid.isNull()); + + RecordId rid0; + ASSERT(rid0.isNull()); + ASSERT_EQ(0, rid0.as<int64_t>()); + ASSERT_EQ(nullRid, rid0); +} + +TEST(RecordId, OidTestCompare) { + RecordId ridNull; + RecordId rid0(OID::createFromString("000000000000000000000000")); + ASSERT_GT(rid0, ridNull); + + RecordId rid1(OID::createFromString("000000000000000000000001")); + ASSERT_GT(rid1, rid0); + ASSERT_EQ(RecordId::min<OID>(), rid0); + ASSERT_GT(RecordId::min<OID>(), ridNull); + + RecordId rid2(OID::createFromString("000000000000000000000002")); + ASSERT_GT(rid2, rid1); + RecordId rid3(OID::createFromString("ffffffffffffffffffffffff")); + ASSERT_GT(rid3, rid2); + ASSERT_GT(rid3, rid0); + + ASSERT_EQ(RecordId::max<OID>(), rid3); + ASSERT_GT(RecordId::max<OID>(), rid0); +} + +TEST(RecordId, Reservations) { + // It's important that reserved IDs like this never change. + RecordId ridReserved(RecordId::kMaxRepr - (1024 * 1024)); + ASSERT_EQ(ridReserved, + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + ASSERT(ridReserved.isReserved()); + ASSERT(ridReserved.isValid()); + ASSERT(!ridReserved.isNormal()); + + RecordId oidReserved(OID::createFromString("fffffffffffffffffff00000")); + ASSERT_EQ(oidReserved, + RecordId::reservedIdFor<OID>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + ASSERT(oidReserved.isReserved()); + ASSERT(oidReserved.isValid()); + ASSERT(!oidReserved.isNormal()); +} + +// RecordIds of different formats may not be compared. +DEATH_TEST(RecordId, UnsafeComparison, "Invariant failure") { + RecordId rid1(1); + RecordId rid2(OID::createFromString("000000000000000000000001")); + ASSERT_NOT_EQUALS(rid1, rid2); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp index a7674cf2499..2539d18929b 100644 --- a/src/mongo/db/repl/replication_recovery.cpp +++ b/src/mongo/db/repl/replication_recovery.cpp @@ -687,7 +687,7 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx, << truncateAfterTimestamp.toString() << ", but instead found " << redact(truncateAfterOplogEntry.toBSONForLogging()) << " with timestamp " - << Timestamp(truncateAfterRecordId.repr()).toString()); + << Timestamp(truncateAfterRecordId.as<int64_t>()).toString()); // Truncate the oplog AFTER the oplog entry found to be <= truncateAfterTimestamp. LOGV2(21553, diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript index 46fdb63811f..23d13b1a904 100644 --- a/src/mongo/db/storage/SConscript +++ b/src/mongo/db/storage/SConscript @@ -413,6 +413,16 @@ env.Benchmark( ], ) +env.Benchmark( + target='storage_record_id_bm', + source=[ + 'record_id_bm.cpp' + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/base', + ], +) + env.Library( target='remove_saver', source=[ diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h index 2a831d39275..cea85d7ddf4 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h @@ -144,7 +144,7 @@ public: virtual Timestamp getAllDurableTimestamp() const override { RecordId id = _visibilityManager->getAllCommittedRecord(); - return Timestamp(id.repr()); + return Timestamp(id.as<int64_t>()); } boost::optional<Timestamp> getOplogNeededForCrashRecovery() const final { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp index 1e85c6ed572..e757e70ffb5 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp @@ -126,7 +126,7 @@ int64_t RecordStore::storageSize(OperationContext* opCtx, bool RecordStore::findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const { StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - auto it = workingCopy->find(createKey(_ident, loc.repr())); + auto it = workingCopy->find(createKey(_ident, loc.as<int64_t>())); if (it == workingCopy->end()) { return false; } @@ -139,7 +139,7 @@ void RecordStore::deleteRecord(OperationContext* opCtx, const RecordId& dl) { auto ru = RecoveryUnit::get(opCtx); StringStore* workingCopy(ru->getHead()); SizeAdjuster adjuster(opCtx, this); - invariant(workingCopy->erase(createKey(_ident, dl.repr()))); + invariant(workingCopy->erase(createKey(_ident, dl.as<int64_t>()))); ru->makeDirty(); } @@ -165,7 +165,7 @@ Status RecordStore::insertRecords(OperationContext* opCtx, oploghack::extractKey(record.data.data(), record.data.size()); if (!status.isOK()) return status.getStatus(); - thisRecordId = status.getValue().repr(); + thisRecordId = status.getValue().as<int64_t>(); _visibilityManager->addUncommittedRecord(opCtx, this, RecordId(thisRecordId)); } else { thisRecordId = _nextRecordId(opCtx); @@ -188,7 +188,7 @@ Status RecordStore::updateRecord(OperationContext* opCtx, StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); SizeAdjuster adjuster(opCtx, this); { - std::string key = createKey(_ident, oldLocation.repr()); + std::string key = createKey(_ident, oldLocation.as<int64_t>()); StringStore::const_iterator it = workingCopy->find(key); invariant(it != workingCopy->end()); workingCopy->update(StringStore::value_type{key, std::string(data, len)}); @@ -254,7 +254,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo auto ru = RecoveryUnit::get(opCtx); StringStore* workingCopy(ru->getHead()); WriteUnitOfWork wuow(opCtx); - const auto recordKey = createKey(_ident, end.repr()); + const auto recordKey = createKey(_ident, end.as<int64_t>()); auto recordIt = inclusive ? workingCopy->lower_bound(recordKey) : workingCopy->upper_bound(recordKey); auto endIt = workingCopy->upper_bound(_postfix); @@ -316,7 +316,7 @@ boost::optional<RecordId> RecordStore::oplogStartHack(OperationContext* opCtx, StringStore* workingCopy{RecoveryUnit::get(opCtx)->getHead()}; - std::string key = createKey(_ident, startingPosition.repr()); + std::string key = createKey(_ident, startingPosition.as<int64_t>()); StringStore::const_reverse_iterator it(workingCopy->upper_bound(key)); if (it == workingCopy->rend()) @@ -353,13 +353,13 @@ void RecordStore::_initHighestIdIfNeeded(OperationContext* opCtx) { return; } - // Need to start at 1 so we are always higher than RecordId::min() + // Need to start at 1 so we are always higher than RecordId::min<int64_t>() int64_t nextId = 1; // Find the largest RecordId currently in use. std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false); if (auto record = cursor->next()) { - nextId = record->id.repr() + 1; + nextId = record->id.as<int64_t>() + 1; } _highestRecordId.store(nextId); @@ -457,7 +457,7 @@ boost::optional<Record> RecordStore::Cursor::seekExact(const RecordId& id) { _savedPosition = boost::none; _lastMoveWasRestore = false; StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - std::string key = createKey(_rs._ident, id.repr()); + std::string key = createKey(_rs._ident, id.as<int64_t>()); it = workingCopy->find(key); if (it == workingCopy->end() || !inPrefix(it->first)) @@ -541,7 +541,7 @@ boost::optional<Record> RecordStore::ReverseCursor::seekExact(const RecordId& id _needFirstSeek = false; _savedPosition = boost::none; StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - std::string key = createKey(_rs._ident, id.repr()); + std::string key = createKey(_rs._ident, id.as<int64_t>()); StringStore::const_iterator canFind = workingCopy->find(key); if (canFind == workingCopy->end() || !inPrefix(canFind->first)) { it = workingCopy->rend(); diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp index 4ee917b60e3..95a5d38eae8 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp @@ -140,9 +140,9 @@ IndexDataEntry::IndexDataEntry(const std::string& indexDataEntry) : _buffer(reinterpret_cast<const uint8_t*>(indexDataEntry.data())) {} std::string IndexDataEntry::create(RecordId loc, const KeyString::TypeBits& typeBits) { - uint64_t repr = loc.repr(); + uint64_t repr = loc.as<int64_t>(); uint64_t typebitsSize = typeBits.getSize(); - std::string output(sizeof(loc) + sizeof(typebitsSize) + typebitsSize, '\0'); + std::string output(sizeof(repr) + sizeof(typebitsSize) + typebitsSize, '\0'); // RecordId std::memcpy(output.data(), &repr, sizeof(repr)); @@ -605,9 +605,11 @@ void CursorBase<CursorImpl>::setEndPosition(const BSONObj& key, bool inclusive) // If forward and inclusive or reverse and not inclusive, then we use the last element in this // ident. Otherwise, we use the first as our bound. if (_forward == inclusive) - it = workingCopy->upper_bound(createRadixKeyFromObj(key, RecordId::max(), _prefix, _order)); + it = workingCopy->upper_bound( + createRadixKeyFromObj(key, RecordId::max<int64_t>(), _prefix, _order)); else - it = workingCopy->lower_bound(createRadixKeyFromObj(key, RecordId::min(), _prefix, _order)); + it = workingCopy->lower_bound( + createRadixKeyFromObj(key, RecordId::min<int64_t>(), _prefix, _order)); if (_forward) _endPos = it; else @@ -659,10 +661,10 @@ boost::optional<KeyStringEntry> CursorBase<CursorImpl>::seekAfterProcessing( // is also reversed. if (_forward == inclusive) it = _workingCopy->lower_bound( - createRadixKeyFromKSWithoutRecordId(keyStringVal, RecordId::min(), _prefix)); + createRadixKeyFromKSWithoutRecordId(keyStringVal, RecordId::min<int64_t>(), _prefix)); else it = _workingCopy->upper_bound( - createRadixKeyFromKSWithoutRecordId(keyStringVal, RecordId::max(), _prefix)); + createRadixKeyFromKSWithoutRecordId(keyStringVal, RecordId::max<int64_t>(), _prefix)); if (_forward) _forwardIt = it; else @@ -895,10 +897,11 @@ bool CursorUnique::checkCursorValid() { // For unique indexes, we need to check if the cursor moved up a position when it // was restored. This isn't required for non-unique indexes because we store the // RecordId in the KeyString and use a "<" comparison instead of "<=" since we know - // that no RecordId will ever reach RecordId::max() so we don't need to check the - // equal side of things. This assumption doesn't hold for unique index KeyStrings. + // that no RecordId will ever reach RecordId::max<int64_t>() so we don't need to + // check the equal side of things. This assumption doesn't hold for unique index + // KeyStrings. std::string endPosKeyString = - createRadixKeyFromObj(*_endPosKey, RecordId::min(), _prefix, _order); + createRadixKeyFromObj(*_endPosKey, RecordId::min<int64_t>(), _prefix, _order); if (_forwardIt->first.compare(endPosKeyString) <= 0) return true; @@ -920,7 +923,7 @@ bool CursorUnique::checkCursorValid() { return true; std::string endPosKeyString = - createRadixKeyFromObj(*_endPosKey, RecordId::min(), _prefix, _order); + createRadixKeyFromObj(*_endPosKey, RecordId::min<int64_t>(), _prefix, _order); if (_reverseIt->first.compare(endPosKeyString) >= 0) return true; @@ -1461,17 +1464,19 @@ SortedDataInterfaceStandard::SortedDataInterfaceStandard(OperationContext* opCtx // This is the string representation of the KeyString before elements in this ident, which is // ident + \0. This is before all elements in this ident. _KSForIdentStart = createRadixKeyWithLocFromObj( - BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _ordering); + BSONObj(), RecordId::min<int64_t>(), ident.toString().append(1, '\0'), _ordering); // Similarly, this is the string representation of the KeyString for something greater than // all other elements in this ident. - _KSForIdentEnd = createRadixKeyWithLocFromObj(BSONObj(), RecordId::min(), _identEnd, _ordering); + _KSForIdentEnd = + createRadixKeyWithLocFromObj(BSONObj(), RecordId::min<int64_t>(), _identEnd, _ordering); } SortedDataInterfaceStandard::SortedDataInterfaceStandard(const Ordering& ordering, StringData ident) : SortedDataInterfaceBase(ordering, ident) { _KSForIdentStart = createRadixKeyWithLocFromObj( - BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _ordering); - _KSForIdentEnd = createRadixKeyWithLocFromObj(BSONObj(), RecordId::min(), _identEnd, _ordering); + BSONObj(), RecordId::min<int64_t>(), ident.toString().append(1, '\0'), _ordering); + _KSForIdentEnd = + createRadixKeyWithLocFromObj(BSONObj(), RecordId::min<int64_t>(), _identEnd, _ordering); } Status SortedDataInterfaceStandard::insert(OperationContext* opCtx, diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp index 3a20f37f214..fb9eadb2841 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp @@ -101,7 +101,7 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx, RecordId VisibilityManager::getAllCommittedRecord() { stdx::lock_guard<Latch> lock(_stateLock); return _uncommittedRecords.empty() ? _highestSeen - : RecordId(_uncommittedRecords.begin()->repr() - 1); + : RecordId(_uncommittedRecords.begin()->as<int64_t>() - 1); } bool VisibilityManager::isFirstHidden(RecordId rid) { diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp index 96c94000ba1..173febc1313 100644 --- a/src/mongo/db/storage/key_string.cpp +++ b/src/mongo/db/storage/key_string.cpp @@ -463,11 +463,11 @@ void BuilderBase<BufferT>::appendRecordId(RecordId loc) { // big-endian order. This does not encode negative RecordIds to give maximum space to // positive RecordIds which are the only ones that are allowed to be stored in an index. - int64_t raw = loc.repr(); + int64_t raw = loc.as<int64_t>(); if (raw < 0) { - // Note: we encode RecordId::min() and RecordId() the same which is ok, as they are - // never stored so they will never be compared to each other. - invariant(raw == RecordId::min().repr()); + // Note: we encode RecordId::min<int64_t>() and RecordId() the same which is ok, as they + // are never stored so they will never be compared to each other. + invariant(raw == RecordId::min<int64_t>().as<int64_t>()); raw = 0; } const uint64_t value = static_cast<uint64_t>(raw); diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp index e253acb6568..b40f12f74b4 100644 --- a/src/mongo/db/storage/key_string_test.cpp +++ b/src/mongo/db/storage/key_string_test.cpp @@ -444,7 +444,7 @@ TEST_F(KeyStringBuilderTest, Array1) { ROUNDTRIP(version, BSON("" << BSON_ARRAY(1 << 2 << 3))); { - KeyString::Builder a(version, emptyArray, ALL_ASCENDING, RecordId::min()); + KeyString::Builder a(version, emptyArray, ALL_ASCENDING, RecordId::min<int64_t>()); KeyString::Builder b(version, emptyArray, ALL_ASCENDING, RecordId(5)); ASSERT_LESS_THAN(a, b); } @@ -886,7 +886,7 @@ TEST_F(KeyStringBuilderTest, LotsOfNumbers3) { TEST_F(KeyStringBuilderTest, RecordIdOrder1) { Ordering ordering = Ordering::make(BSON("a" << 1)); - KeyString::Builder a(version, BSON("" << 5), ordering, RecordId::min()); + KeyString::Builder a(version, BSON("" << 5), ordering, RecordId::min<int64_t>()); KeyString::Builder b(version, BSON("" << 5), ordering, RecordId(2)); KeyString::Builder c(version, BSON("" << 5), ordering, RecordId(3)); KeyString::Builder d(version, BSON("" << 6), ordering, RecordId()); @@ -901,7 +901,7 @@ TEST_F(KeyStringBuilderTest, RecordIdOrder1) { TEST_F(KeyStringBuilderTest, RecordIdOrder2) { Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1)); - KeyString::Builder a(version, BSON("" << 5 << "" << 6), ordering, RecordId::min()); + KeyString::Builder a(version, BSON("" << 5 << "" << 6), ordering, RecordId::min<int64_t>()); KeyString::Builder b(version, BSON("" << 5 << "" << 6), ordering, RecordId(5)); KeyString::Builder c(version, BSON("" << 5 << "" << 5), ordering, RecordId(4)); KeyString::Builder d(version, BSON("" << 3 << "" << 4), ordering, RecordId(3)); @@ -917,7 +917,7 @@ TEST_F(KeyStringBuilderTest, RecordIdOrder2) { TEST_F(KeyStringBuilderTest, RecordIdOrder2Double) { Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1)); - KeyString::Builder a(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId::min()); + KeyString::Builder a(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId::min<int64_t>()); KeyString::Builder b(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId(5)); KeyString::Builder c(version, BSON("" << 3.0 << "" << 4.0), ordering, RecordId(3)); @@ -1555,11 +1555,11 @@ TEST_F(KeyStringBuilderTest, RecordIds) { if (rid.isValid()) { ASSERT_GT(ks, KeyString::Builder(version, RecordId())); - ASSERT_GT(ks, KeyString::Builder(version, RecordId::min())); - ASSERT_LT(ks, KeyString::Builder(version, RecordId::max())); + ASSERT_GT(ks, KeyString::Builder(version, RecordId::min<int64_t>())); + ASSERT_LT(ks, KeyString::Builder(version, RecordId::max<int64_t>())); - ASSERT_GT(ks, KeyString::Builder(version, RecordId(rid.repr() - 1))); - ASSERT_LT(ks, KeyString::Builder(version, RecordId(rid.repr() + 1))); + ASSERT_GT(ks, KeyString::Builder(version, RecordId(rid.as<int64_t>() - 1))); + ASSERT_LT(ks, KeyString::Builder(version, RecordId(rid.as<int64_t>() + 1))); } } @@ -1579,7 +1579,7 @@ TEST_F(KeyStringBuilderTest, RecordIds) { { // Test concatenating RecordIds like in a unique index. KeyString::Builder ks(version); - ks.appendRecordId(RecordId::max()); // uses all bytes + ks.appendRecordId(RecordId::max<int64_t>()); // uses all bytes ks.appendRecordId(rid); ks.appendRecordId(RecordId(0xDEADBEEF)); // uses some extra bytes ks.appendRecordId(rid); @@ -1591,7 +1591,7 @@ TEST_F(KeyStringBuilderTest, RecordIds) { // forward scan BufReader reader(ks.getBuffer(), ks.getSize()); - ASSERT_EQ(KeyString::decodeRecordId(&reader), RecordId::max()); + ASSERT_EQ(KeyString::decodeRecordId(&reader), RecordId::max<int64_t>()); ASSERT_EQ(KeyString::decodeRecordId(&reader), rid); ASSERT_EQ(KeyString::decodeRecordId(&reader), RecordId(0xDEADBEEF)); ASSERT_EQ(KeyString::decodeRecordId(&reader), rid); diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp index 85c8f16dc87..4c28829fb8e 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp @@ -567,7 +567,7 @@ TEST(KVEngineTestHarness, BasicTimestampSingle) { opCtx1.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, kReadTimestamp); - ASSERT(!rs->findRecord(&opCtx1, RecordId::min(), nullptr)); + ASSERT(!rs->findRecord(&opCtx1, RecordId::min<int64_t>(), nullptr)); // Insert a record at a later time. RecordId rid; diff --git a/src/mongo/db/storage/oplog_hack.cpp b/src/mongo/db/storage/oplog_hack.cpp index ed71df2c8e6..69dd1c4d900 100644 --- a/src/mongo/db/storage/oplog_hack.cpp +++ b/src/mongo/db/storage/oplog_hack.cpp @@ -53,9 +53,9 @@ StatusWith<RecordId> keyForOptime(const Timestamp& opTime) { return StatusWith<RecordId>(ErrorCodes::BadValue, "ts inc too high"); const RecordId out = RecordId(opTime.getSecs(), opTime.getInc()); - if (out <= RecordId::min()) + if (out <= RecordId::min<int64_t>()) return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too low"); - if (out >= RecordId::max()) + if (out >= RecordId::max<int64_t>()) return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too high"); return StatusWith<RecordId>(out); diff --git a/src/mongo/db/storage/record_id_bm.cpp b/src/mongo/db/storage/record_id_bm.cpp new file mode 100644 index 00000000000..a246db21e8e --- /dev/null +++ b/src/mongo/db/storage/record_id_bm.cpp @@ -0,0 +1,69 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/db/record_id.h" + +#include <benchmark/benchmark.h> + +namespace mongo { +namespace { + +RecordId incInt(RecordId r) { + return RecordId(r.as<int64_t>() + 1); +} + +RecordId incOID(RecordId r) { + OID o = r.as<OID>(); + o.setTimestamp(o.getTimestamp() + 1); + return RecordId(o); +} + +void BM_RecordIdCopyLong(benchmark::State& state) { + RecordId rid(1 << 31); + for (auto _ : state) { + benchmark::ClobberMemory(); + benchmark::DoNotOptimize(rid = incInt(rid)); + } +} + +void BM_RecordIdCopyOID(benchmark::State& state) { + RecordId rid(OID::gen()); + for (auto _ : state) { + benchmark::ClobberMemory(); + benchmark::DoNotOptimize(rid = incOID(rid)); + } +} + +BENCHMARK(BM_RecordIdCopyLong); +BENCHMARK(BM_RecordIdCopyOID); + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp index 723a43ce834..9a863745d6c 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp @@ -110,7 +110,8 @@ TEST(SortedDataInterface, BuilderAddKeyWithReservedRecordId) { const std::unique_ptr<SortedDataBuilderInterface> builder( sorted->makeBulkBuilder(opCtx.get(), true)); - RecordId reservedLoc(RecordId::ReservedId::kWildcardMultikeyMetadataId); + RecordId reservedLoc( + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); ASSERT(reservedLoc.isReserved()); WriteUnitOfWork wuow(opCtx.get()); diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp index 74de3c0ec3e..32b674cae0e 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp @@ -707,7 +707,8 @@ TEST(SortedDataInterface, InsertReservedRecordId) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); - RecordId reservedLoc(RecordId::ReservedId::kWildcardMultikeyMetadataId); + RecordId reservedLoc( + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); ASSERT(reservedLoc.isReserved()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, reservedLoc), diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp index d688e84f3dc..e55281db60f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp @@ -63,7 +63,7 @@ void WiredTigerOplogManager::startVisibilityThread(OperationContext* opCtx, // event of a secondary crashing, replication recovery will truncate the oplog, resetting // visibility to the truncate point. In the event of a primary crashing, it will perform // rollback before servicing oplog reads. - auto topOfOplogTimestamp = Timestamp(lastRecord->id.repr()); + auto topOfOplogTimestamp = Timestamp(lastRecord->id.as<int64_t>()); setOplogReadTimestamp(topOfOplogTimestamp); LOGV2_DEBUG(22368, 1, @@ -174,7 +174,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( LOGV2_DEBUG(22371, 2, "Operation is waiting for an entry to become visible in the oplog.", - "awaitedOplogEntryTimestamp"_attr = Timestamp(waitingFor.repr()), + "awaitedOplogEntryTimestamp"_attr = Timestamp(waitingFor.as<int64_t>()), "currentLatestVisibleOplogEntryTimestamp"_attr = Timestamp(currentLatestVisibleTimestamp)); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index d6cb545542c..1fca344df7f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -261,7 +261,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() { "wallTime"_attr = stone.wallTime, "pinnedOplog"_attr = _rs->getPinnedOplog()); - if (static_cast<std::uint64_t>(stone.lastRecord.repr()) < + if (static_cast<std::uint64_t>(stone.lastRecord.as<int64_t>()) < _rs->getPinnedOplog().asULL()) { break; } @@ -518,7 +518,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon _calculateStonesByScanning(opCtx); return; } - earliestOpTime = Timestamp(record->id.repr()); + earliestOpTime = Timestamp(record->id.as<int64_t>()); } { @@ -533,7 +533,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon _calculateStonesByScanning(opCtx); return; } - latestOpTime = Timestamp(record->id.repr()); + latestOpTime = Timestamp(record->id.as<int64_t>()); } LOGV2(22389, @@ -1410,7 +1410,8 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT while (auto stone = _oplogStones->peekOldestStoneIfNeeded()) { invariant(stone->lastRecord.isValid()); - if (static_cast<std::uint64_t>(stone->lastRecord.repr()) >= mayTruncateUpTo.asULL()) { + if (static_cast<std::uint64_t>(stone->lastRecord.as<int64_t>()) >= + mayTruncateUpTo.asULL()) { // Do not truncate oplogs needed for replication recovery. return; } @@ -1460,12 +1461,12 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT } invariantWTOK(ret); RecordId nextRecord = getKey(cursor); - if (static_cast<std::uint64_t>(nextRecord.repr()) > mayTruncateUpTo.asULL()) { + if (static_cast<std::uint64_t>(nextRecord.as<int64_t>()) > mayTruncateUpTo.asULL()) { LOGV2_DEBUG(5140901, 0, "Cannot truncate as there are no oplog entries after the stone but " "before the truncate-up-to point", - "nextRecord"_attr = Timestamp(nextRecord.repr()), + "nextRecord"_attr = Timestamp(nextRecord.as<int64_t>()), "mayTruncateUpTo"_attr = mayTruncateUpTo); return; } @@ -1560,7 +1561,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, // flush. Because these are direct writes into the oplog, the machinery to trigger a // journal flush is bypassed. A followup oplog read will require a fresh visibility // value to make progress. - ts = Timestamp(record.id.repr()); + ts = Timestamp(record.id.as<int64_t>()); opCtx->recoveryUnit()->setOrderedCommit(false); } else { ts = timestamps[i]; @@ -1598,10 +1599,10 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, } bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const { - invariant(id.repr() > 0); + invariant(id.as<int64_t>() > 0); invariant(_kvEngine->getOplogManager()->isRunning()); return _kvEngine->getOplogManager()->getOplogReadTimestamp() < - static_cast<std::uint64_t>(id.repr()); + static_cast<std::uint64_t>(id.as<int64_t>()); } bool WiredTigerRecordStore::haveCappedWaiters() { @@ -1637,7 +1638,7 @@ StatusWith<Timestamp> WiredTigerRecordStore::getLatestOplogTimestamp( RecordId recordId = getKey(cursor); - return {Timestamp(static_cast<unsigned long long>(recordId.repr()))}; + return {Timestamp(static_cast<unsigned long long>(recordId.as<int64_t>()))}; } StatusWith<Timestamp> WiredTigerRecordStore::getEarliestOplogTimestamp(OperationContext* opCtx) { @@ -1664,7 +1665,7 @@ StatusWith<Timestamp> WiredTigerRecordStore::getEarliestOplogTimestamp(Operation _cappedFirstRecord = getKey(cursor); } - return {Timestamp(static_cast<unsigned long long>(_cappedFirstRecord.repr()))}; + return {Timestamp(static_cast<unsigned long long>(_cappedFirstRecord.as<int64_t>()))}; } Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx, @@ -1966,7 +1967,7 @@ boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack( RecordId searchFor = startingPosition; auto visibilityTs = wtRu->getOplogVisibilityTs(); - if (visibilityTs && searchFor.repr() > *visibilityTs) { + if (visibilityTs && searchFor.as<int64_t>() > *visibilityTs) { searchFor = RecordId(*visibilityTs); } @@ -2016,13 +2017,13 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) { return; } - // Need to start at 1 so we are always higher than RecordId::min() + // Need to start at 1 so we are always higher than RecordId::min<int64_t>() int64_t nextId = 1; // Find the largest RecordId currently in use. std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false); if (auto record = cursor->next()) { - nextId = record->id.repr() + 1; + nextId = record->id.as<int64_t>() + 1; } _nextIdNum.store(nextId); @@ -2191,7 +2192,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx, if (_isOplog) { // Immediately rewind visibility to our truncation point, to prevent new // transactions from appearing. - Timestamp truncTs(lastKeptId.repr()); + Timestamp truncTs(lastKeptId.as<int64_t>()); if (!serverGlobalParams.enableMajorityReadConcern && _kvEngine->getOldestTimestamp() > truncTs) { @@ -2284,7 +2285,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::next() { id = getKey(c); } - if (_forward && _oplogVisibleTs && id.repr() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && id.as<int64_t>() > *_oplogVisibleTs) { _eof = true; return {}; } @@ -2317,7 +2318,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::next() { boost::optional<Record> WiredTigerRecordStoreCursorBase::seekExact(const RecordId& id) { invariant(_hasRestored); - if (_forward && _oplogVisibleTs && id.repr() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && id.as<int64_t>() > *_oplogVisibleTs) { _eof = true; return {}; } @@ -2453,7 +2454,7 @@ RecordId StandardWiredTigerRecordStore::getKey(WT_CURSOR* cursor) const { } void StandardWiredTigerRecordStore::setKey(WT_CURSOR* cursor, RecordId id) const { - cursor->set_key(cursor, id.repr()); + cursor->set_key(cursor, id.as<int64_t>()); } std::unique_ptr<SeekableRecordCursor> StandardWiredTigerRecordStore::getCursor( @@ -2481,7 +2482,7 @@ WiredTigerRecordStoreStandardCursor::WiredTigerRecordStoreStandardCursor( : WiredTigerRecordStoreCursorBase(opCtx, rs, forward) {} void WiredTigerRecordStoreStandardCursor::setKey(WT_CURSOR* cursor, RecordId id) const { - cursor->set_key(cursor, id.repr()); + cursor->set_key(cursor, id.as<int64_t>()); } RecordId WiredTigerRecordStoreStandardCursor::getKey(WT_CURSOR* cursor) const { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index cfd4b8a43ca..fba746efab0 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -950,8 +950,8 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) { // 1) Initialize the top of oplog to "1". ServiceContext::UniqueOperationContext op1(harnessHelper->newOperationContext()); op1->recoveryUnit()->beginUnitOfWork(op1.get()); - Timestamp tsOne = - Timestamp(static_cast<unsigned long long>(_oplogOrderInsertOplog(op1.get(), rs, 1).repr())); + Timestamp tsOne = Timestamp( + static_cast<unsigned long long>(_oplogOrderInsertOplog(op1.get(), rs, 1).as<int64_t>())); op1->recoveryUnit()->commitUnitOfWork(); // Asserting on a recovery unit without a snapshot. ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op1.get())); @@ -969,8 +969,8 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) { ServiceContext::UniqueOperationContext op2(harnessHelper->newOperationContext()); op2->recoveryUnit()->beginUnitOfWork(op2.get()); - Timestamp tsThree = - Timestamp(static_cast<unsigned long long>(_oplogOrderInsertOplog(op2.get(), rs, 3).repr())); + Timestamp tsThree = Timestamp( + static_cast<unsigned long long>(_oplogOrderInsertOplog(op2.get(), rs, 3).as<int64_t>())); // Before committing, the query still only sees timestamp "1". ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op2.get())); op2->recoveryUnit()->commitUnitOfWork(); diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index 3448c1b22e9..047bf5f1b72 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -987,7 +987,8 @@ public: // Insert additional multikey path metadata index keys. lockDb(MODE_X); - const RecordId recordId(RecordId::ReservedId::kWildcardMultikeyMetadataId); + const RecordId recordId( + RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); const IndexCatalog* indexCatalog = coll->getIndexCatalog(); auto descriptor = indexCatalog->findIndexByName(&_opCtx, indexName); auto accessMethod = @@ -1118,7 +1119,8 @@ public: lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); - RecordId recordId(RecordId::ReservedId::kWildcardMultikeyMetadataId); + RecordId recordId(RecordId::reservedIdFor<int64_t>( + RecordId::Reservation::kWildcardMultikeyMetadataId)); const KeyString::Value indexKey = KeyString::HeapBuilder(sortedDataInterface->getKeyStringVersion(), BSON("" << 1 << "" @@ -1169,7 +1171,7 @@ public: // Insert documents. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); @@ -1265,7 +1267,7 @@ public: // Insert documents. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); @@ -1382,7 +1384,7 @@ public: // Insert documents. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); @@ -1650,7 +1652,7 @@ public: // Insert documents. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); @@ -1828,7 +1830,7 @@ public: // Insert documents. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); @@ -2392,7 +2394,7 @@ public: // Insert a document. OpDebug* const nullOpDebug = nullptr; - RecordId rid = RecordId::min(); + RecordId rid = RecordId::min<int64_t>(); lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp index 19b45551ccb..cb478c627c2 100644 --- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp +++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp @@ -45,7 +45,7 @@ namespace { using namespace unittest; -static const RecordId kMetadataId = RecordId::minReserved(); +static const RecordId kMetadataId = RecordId::minReserved<int64_t>(); static const int kIndexVersion = static_cast<int>(IndexDescriptor::kLatestIndexVersion); static const NamespaceString kDefaultNSS{"wildcard_multikey_persistence.test"}; |