diff options
60 files changed, 792 insertions, 588 deletions
diff --git a/jstests/core/timeseries/clustered_index_types.js b/jstests/core/timeseries/clustered_index_types.js index 843b60390d2..26d71b7ff43 100644 --- a/jstests/core/timeseries/clustered_index_types.js +++ b/jstests/core/timeseries/clustered_index_types.js @@ -1,5 +1,5 @@ /** - * Tests inserting duplicate _id values on a collection clustered by _id. + * Tests inserting various _id values and duplicates on a collection clustered by _id. * * @tags: [ * assumes_against_mongod_not_mongos, @@ -36,4 +36,34 @@ assert.eq(1, coll.find({_id: oid}).itcount()); // Updates should work. assert.commandWorked(coll.update({_id: oid}, {a: 1})); assert.eq(1, coll.find({_id: oid}).itcount()); + +assert.commandWorked(coll.insert({_id: 0, a: 1})); +assert.eq(1, coll.find({_id: 0}).itcount()); +assert.commandWorked(coll.insert({_id: "", a: 2})); +assert.eq(1, coll.find({_id: ""}).itcount()); +assert.commandWorked(coll.insert({_id: NumberLong("9223372036854775807"), a: 3})); +assert.eq(1, coll.find({_id: NumberLong("9223372036854775807")}).itcount()); +assert.commandWorked(coll.insert({_id: {a: 1, b: 1}, a: 4})); +assert.eq(1, coll.find({_id: {a: 1, b: 1}}).itcount()); +assert.commandFailedWithCode(coll.insert({_id: {a: {b: 1}, c: 1}, a: 5}), ErrorCodes.BadValue); +assert.commandWorked(coll.insert({_id: -1, a: 6})); +assert.eq(1, coll.find({_id: -1}).itcount()); +assert.commandWorked(coll.insert({_id: "123456789012", a: 7})); +assert.eq(1, coll.find({_id: "123456789012"}).itcount()); +assert.commandWorked(coll.insert({a: 8})); +assert.eq(1, coll.find({a: 8}).itcount()); +assert.commandWorked(coll.insert({_id: null, a: 9})); +assert.eq(1, coll.find({_id: null}).itcount()); +assert.commandFailedWithCode(coll.insert({_id: "123456789012345678912387238478142876534", a: 10}), + ErrorCodes.BadValue); + +assert.commandWorked(coll.createIndex({a: 1})); + +// No support for numeric type differentiation. +assert.commandWorked(coll.insert({_id: 42.0})); +assert.commandFailedWithCode(coll.insert({_id: 42}), ErrorCodes.DuplicateKey); +assert.commandFailedWithCode(coll.insert({_id: NumberLong("42")}), ErrorCodes.DuplicateKey); +assert.eq(1, coll.find({_id: 42.0}).itcount()); +assert.eq(1, coll.find({_id: 42}).itcount()); +assert.eq(1, coll.find({_id: NumberLong("42")}).itcount()); })(); diff --git a/jstests/core/timeseries/timeseries_show_record_id.js b/jstests/core/timeseries/timeseries_show_record_id.js index 2abacb977be..e5f891770fc 100644 --- a/jstests/core/timeseries/timeseries_show_record_id.js +++ b/jstests/core/timeseries/timeseries_show_record_id.js @@ -46,7 +46,7 @@ for (let i = 0; i < 100; i++) { function checkRecordId(documents) { for (const document of documents) { assert(document.hasOwnProperty("$recordId")); - assert(document["$recordId"].isObjectId); + assert(isString(document["$recordId"])); } } diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 2edde6d77ac..fb02b1a78ce 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -955,7 +955,6 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/index_build_oplog_entry', '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/dbhelpers', - '$BUILD_DIR/mongo/db/repl/cloner_utils', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/repl/replica_set_messages', @@ -1133,6 +1132,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/commands/fsync_locked', + '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/idl/server_parameter', 'catalog/database_holder', @@ -1150,7 +1150,9 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/base', ], -) + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/storage/key_string', + ],) env.Library( target='query_exec', @@ -1296,6 +1298,7 @@ env.Library( 'catalog/database_holder', 'commands/server_status_core', 'kill_sessions', + 'record_id_helpers', 'stats/resource_consumption_metrics', ], ) @@ -2412,6 +2415,7 @@ if wiredtiger: 'query_exec', 'range_arithmetic', 'read_write_concern_defaults_mock', + 'record_id_helpers', 'repl/mock_repl_coord_server_fixture', 'repl/oplog_interface_local', 'repl/repl_coordinator_interface', diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index 62725a206a8..3adc744efc1 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -393,6 +393,7 @@ env.Library( '$BUILD_DIR/mongo/db/commands/server_status_core', '$BUILD_DIR/mongo/db/index/index_access_methods', '$BUILD_DIR/mongo/db/index/index_build_interceptor', + '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/repl/repl_settings', '$BUILD_DIR/mongo/db/storage/storage_debug_util', '$BUILD_DIR/mongo/db/storage/storage_engine_common', @@ -430,7 +431,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/base', - ] + ], ) env.Library( diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 4d13f2ea779..27fb8ecbd8b 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -63,6 +63,7 @@ #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" @@ -675,14 +676,8 @@ Status CollectionImpl::insertDocumentForBulkLoader( RecordId recordId; if (isClustered()) { - // Collections clustered by _id require ObjectId values. - BSONElement oidElem; - bool foundId = doc.getObjectID(oidElem); - uassert(ErrorCodes::BadValue, - str::stream() << "Document " << redact(doc) << " is missing the '_id' field", - foundId); invariant(_shared->_recordStore->keyFormat() == KeyFormat::String); - recordId = RecordId(oidElem.OID().view().view(), OID::kOIDSize); + recordId = uassertStatusOK(record_id_helpers::keyForDoc(doc)); } // Using timestamp 0 for these inserts, which are non-oplog so we don't have an appropriate @@ -758,16 +753,8 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx, RecordId recordId; if (isClustered()) { - // Collections clustered by _id require ObjectId values. - BSONElement oidElem; - if (!doc.getObjectID(oidElem)) { - return Status(ErrorCodes::BadValue, - str::stream() - << "Document " << redact(doc) << " is missing the '_id' field"); - } - invariant(_shared->_recordStore->keyFormat() == KeyFormat::String); - recordId = RecordId(oidElem.OID().view().view(), OID::kOIDSize); + recordId = uassertStatusOK(record_id_helpers::keyForDoc(doc)); } if (MONGO_unlikely(corruptDocumentOnInsert.shouldFail())) { diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp index 3f624451007..b8e53a88071 100644 --- a/src/mongo/db/catalog/index_consistency.cpp +++ b/src/mongo/db/catalog/index_consistency.cpp @@ -42,6 +42,7 @@ #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/storage_debug_util.h" #include "mongo/logv2/log.h" #include "mongo/util/string_map.h" @@ -501,11 +502,7 @@ BSONObj IndexConsistency::_generateInfo(const std::string& indexName, BSONObjBuilder infoBuilder; infoBuilder.append("indexName", indexName); - infoBuilder.append( - "recordId", - recordId.withFormat([](RecordId::Null n) { return std::string("null"); }, - [](int64_t rid) { return std::to_string(rid); }, - [](const char* str, int size) { return OID::from(str).toString(); })); + recordId.serializeToken("recordId", &infoBuilder); if (!idKey.isEmpty()) { infoBuilder.append("idKey", idKey); diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index 54d8dd74aa8..93aa592ef66 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -890,13 +890,7 @@ BSONObj MultiIndexBlock::_constructStateObject(OperationContext* opCtx, // We can be interrupted by shutdown before inserting the first document from the collection // scan, in which case there is no _lastRecordIdInserted. if (_phase == IndexBuildPhaseEnum::kCollectionScan && _lastRecordIdInserted) { - _lastRecordIdInserted->withFormat( - [](RecordId::Null n) { invariant(false); }, - [&](int64_t rid) { builder.append("collectionScanPosition", rid); }, - [&](const char* str, int size) { - OID oid = OID::from(str); - builder.appendOID("collectionScanPosition", &oid); - }); + _lastRecordIdInserted->serializeToken("collectionScanPosition", &builder); } BSONArrayBuilder indexesArray(builder.subarrayStart("indexes")); diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp index 7785c710102..d7eaf3ab188 100644 --- a/src/mongo/db/catalog/validate_adaptor.cpp +++ b/src/mongo/db/catalog/validate_adaptor.cpp @@ -306,25 +306,19 @@ void ValidateAdaptor::traverseIndex(OperationContext* opCtx, throw; } + const RecordId kWildcardMultikeyMetadataRecordId = + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId); while (indexEntry) { if (!isFirstEntry) { _validateKeyOrder( opCtx, index, indexEntry->keyString, prevIndexKeyStringValue, &indexResults); } - const RecordId kWildcardMultikeyMetadataRecordId = [&]() { - auto keyFormat = _validateState->getCollection()->getRecordStore()->keyFormat(); - if (keyFormat == KeyFormat::Long) { - return RecordId::reservedIdFor<int64_t>( - RecordId::Reservation::kWildcardMultikeyMetadataId); - } else { - invariant(keyFormat == KeyFormat::String); - return RecordId::reservedIdFor<OID>( - RecordId::Reservation::kWildcardMultikeyMetadataId); - } - }(); - if (descriptor->getIndexType() == IndexType::INDEX_WILDCARD && - indexEntry->loc == kWildcardMultikeyMetadataRecordId) { + bool isMetadataKey = indexEntry->loc.withFormat( + [](RecordId::Null) { return false; }, + [&](int64_t val) { return val == kWildcardMultikeyMetadataRecordId.getLong(); }, + [](const char* str, int len) { return false; }); + if (descriptor->getIndexType() == IndexType::INDEX_WILDCARD && isMetadataKey) { _indexConsistency->removeMultikeyMetadataPath(indexEntry->keyString, &indexInfo); } else { try { diff --git a/src/mongo/db/catalog/validate_results.cpp b/src/mongo/db/catalog/validate_results.cpp index dca634fc638..46872995fc5 100644 --- a/src/mongo/db/catalog/validate_results.cpp +++ b/src/mongo/db/catalog/validate_results.cpp @@ -42,13 +42,12 @@ void ValidateResults::appendToResultObj(BSONObjBuilder* resultObj, bool debuggin resultObj->append("extraIndexEntries", extraIndexEntries); resultObj->append("missingIndexEntries", missingIndexEntries); - // Need to convert RecordId to the appropriate type. + // Need to convert RecordId to a printable type. BSONArrayBuilder builder; for (const RecordId& corruptRecord : corruptRecords) { - corruptRecord.withFormat( - [&](RecordId::Null n) { builder.append("null"); }, - [&](const int64_t rid) { builder.append(rid); }, - [&](const char* str, int size) { builder.append(OID::from(str)); }); + BSONObjBuilder objBuilder; + corruptRecord.serializeToken("", &objBuilder); + builder.append(objBuilder.done().firstElement()); } resultObj->append("corruptRecords", builder.arr()); diff --git a/src/mongo/db/exec/SConscript b/src/mongo/db/exec/SConscript index 0a238aae69c..6a1ad9c6fdf 100644 --- a/src/mongo/db/exec/SConscript +++ b/src/mongo/db/exec/SConscript @@ -152,6 +152,7 @@ env.CppUnitTest( "$BUILD_DIR/mongo/db/query/collation/collator_interface_mock", "$BUILD_DIR/mongo/db/query/query_test_service_context", "$BUILD_DIR/mongo/db/query_exec", + "$BUILD_DIR/mongo/db/record_id_helpers", "$BUILD_DIR/mongo/db/service_context_d", "$BUILD_DIR/mongo/db/service_context_d_test_fixture", "$BUILD_DIR/mongo/dbtests/mocklib", diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h index 8fdbb70a1e2..1544b77cf65 100644 --- a/src/mongo/db/exec/collection_scan.h +++ b/src/mongo/db/exec/collection_scan.h @@ -77,10 +77,9 @@ public: BSONObj getPostBatchResumeToken() const { // Return a resume token compatible with resumable initial sync. if (_params.requestResumeToken) { - return _lastSeenId.withFormat( - [](RecordId::Null n) { return BSON("$recordId" << NullLabeler{}); }, - [](int64_t rid) { return BSON("$recordId" << rid); }, - [](const char* str, int size) { return BSON("$recordId" << OID::from(str)); }); + BSONObjBuilder builder; + _lastSeenId.serializeToken("$recordId", &builder); + return builder.obj(); } // Return a resume token compatible with resharding oplog sync. if (_params.shouldTrackLatestOplogTimestamp) { diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp index 9483505b298..0bed934ea82 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp @@ -298,7 +298,7 @@ PlanState IndexScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(_nextRecord->loc.asLong())); + value::bitcastFrom<int64_t>(_nextRecord->loc.getLong())); } if (_accessors.size()) { diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp index de15801a3d3..b6891640a4a 100644 --- a/src/mongo/db/exec/sbe/stages/scan.cpp +++ b/src/mongo/db/exec/sbe/stages/scan.cpp @@ -236,7 +236,7 @@ PlanState ScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(nextRecord->id.asLong())); + value::bitcastFrom<int64_t>(nextRecord->id.getLong())); } if (!_fieldAccessors.empty()) { @@ -566,7 +566,7 @@ PlanState ParallelScanStage::getNext() { if (_recordIdAccessor) { _recordIdAccessor->reset(value::TypeTags::RecordId, - value::bitcastFrom<int64_t>(nextRecord->id.asLong())); + value::bitcastFrom<int64_t>(nextRecord->id.getLong())); } diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp index 4ffd946d102..6cf48a78ca4 100644 --- a/src/mongo/db/exec/working_set.cpp +++ b/src/mongo/db/exec/working_set.cpp @@ -245,10 +245,11 @@ void WorkingSetMember::serialize(BufBuilder& buf) const { recordId.withFormat([&](RecordId::Null n) { MONGO_UNREACHABLE_TASSERT(5472100); }, [&](int64_t rid) { buf.appendChar(static_cast<char>(RecordIdFormat::Long)); - buf.appendNum(recordId.asLong()); + buf.appendNum(recordId.getLong()); }, [&](const char* str, int size) { buf.appendChar(static_cast<char>(RecordIdFormat::String)); + buf.appendNum(size); buf.appendBuf(static_cast<const void*>(str), size); }); } @@ -293,9 +294,9 @@ WorkingSetMember WorkingSetMember::deserialize(BufReader& buf) { wsm.recordId = RecordId{buf.read<LittleEndian<int64_t>>()}; } else { invariant(recordIdFormat == RecordIdFormat::String); - invariant(static_cast<int>(RecordId::kSmallStrSize) == static_cast<int>(OID::kOIDSize)); - const char* recordIdStr = static_cast<const char*>(buf.skip(RecordId::kSmallStrSize)); - wsm.recordId = RecordId{recordIdStr, RecordId::kSmallStrSize}; + auto size = buf.read<LittleEndian<int32_t>>(); + const char* recordIdStr = static_cast<const char*>(buf.skip(size)); + wsm.recordId = RecordId{recordIdStr, size}; } } diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp index 79fb463107c..9204f74f652 100644 --- a/src/mongo/db/exec/working_set_test.cpp +++ b/src/mongo/db/exec/working_set_test.cpp @@ -34,6 +34,7 @@ #include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/json.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/snapshot.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/unittest.h" @@ -218,7 +219,7 @@ TEST_F(WorkingSetFixture, RecordIdLongAndObjStateCanRoundtripThroughSerializatio ASSERT_EQ(WorkingSetMember::RID_AND_OBJ, roundtripped.getState()); ASSERT_DOCUMENT_EQ(roundtripped.doc.value(), doc); ASSERT_EQ(roundtripped.doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(roundtripped.recordId.asLong(), 43); + ASSERT_EQ(roundtripped.recordId.getLong(), 43); ASSERT_FALSE(roundtripped.metadata()); } @@ -227,13 +228,14 @@ TEST_F(WorkingSetFixture, RecordIdStrAndObjStateCanRoundtripThroughSerialization member->doc.setValue(doc); member->doc.setSnapshotId(SnapshotId{42u}); const OID oid = OID::gen(); - member->recordId = RecordId{oid.view().view(), RecordId::kSmallStrSize}; + + member->recordId = record_id_helpers::keyForOID(oid); ws->transitionToRecordIdAndObj(id); auto roundtripped = roundtripWsmThroughSerialization(*member); ASSERT_EQ(WorkingSetMember::RID_AND_OBJ, roundtripped.getState()); ASSERT_DOCUMENT_EQ(roundtripped.doc.value(), doc); ASSERT_EQ(roundtripped.doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(OID::from(roundtripped.recordId.strData()), oid); + ASSERT_EQ(record_id_helpers::toBSONAs(roundtripped.recordId, "").firstElement().OID(), oid); ASSERT_FALSE(roundtripped.metadata()); } @@ -259,7 +261,7 @@ TEST_F(WorkingSetFixture, RecordIdAndIdxStateCanRoundtripThroughSerialization) { auto roundtripped = roundtripWsmThroughSerialization(*member); ASSERT_EQ(WorkingSetMember::RID_AND_IDX, roundtripped.getState()); - ASSERT_EQ(roundtripped.recordId.asLong(), 43); + ASSERT_EQ(roundtripped.recordId.getLong(), 43); ASSERT_EQ(roundtripped.keyData.size(), 2u); ASSERT_BSONOBJ_EQ(roundtripped.keyData[0].indexKeyPattern, BSON("a" << 1 << "b" << 1)); @@ -313,7 +315,7 @@ TEST_F(WorkingSetFixture, WsmCanBeExtractedAndReinserted) { ASSERT_EQ(extractedWsm.getState(), WorkingSetMember::RID_AND_OBJ); ASSERT_DOCUMENT_EQ(extractedWsm.doc.value(), doc); ASSERT_EQ(extractedWsm.doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(extractedWsm.recordId.asLong(), 43); + ASSERT_EQ(extractedWsm.recordId.getLong(), 43); ASSERT_FALSE(extractedWsm.metadata()); auto emplacedId = ws->emplace(std::move(extractedWsm)); @@ -324,7 +326,7 @@ TEST_F(WorkingSetFixture, WsmCanBeExtractedAndReinserted) { ASSERT_EQ(emplacedWsm->getState(), WorkingSetMember::RID_AND_OBJ); ASSERT_DOCUMENT_EQ(emplacedWsm->doc.value(), doc); ASSERT_EQ(emplacedWsm->doc.snapshotId().toNumber(), 42u); - ASSERT_EQ(emplacedWsm->recordId.asLong(), 43); + ASSERT_EQ(emplacedWsm->recordId.getLong(), 43); ASSERT_FALSE(emplacedWsm->metadata()); } diff --git a/src/mongo/db/index/skipped_record_tracker.cpp b/src/mongo/db/index/skipped_record_tracker.cpp index a25293b643c..7891cda5478 100644 --- a/src/mongo/db/index/skipped_record_tracker.cpp +++ b/src/mongo/db/index/skipped_record_tracker.cpp @@ -68,12 +68,9 @@ void SkippedRecordTracker::finalizeTemporaryTable(OperationContext* opCtx, } void SkippedRecordTracker::record(OperationContext* opCtx, const RecordId& recordId) { - BSONObj toInsert; - recordId.withFormat([](RecordId::Null n) { invariant(false); }, - [&](int64_t rid) { toInsert = BSON(kRecordIdField << rid); }, - [&](const char* str, int size) { - toInsert = BSON(kRecordIdField << std::string(str, size)); - }); + BSONObjBuilder builder; + recordId.serializeToken(kRecordIdField, &builder); + BSONObj toInsert = builder.obj(); // Lazily initialize table when we record the first document. if (!_skippedRecordsTable) { @@ -143,16 +140,7 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, const BSONObj doc = record->data.toBson(); // This is the RecordId of the skipped record from the collection. - RecordId skippedRecordId; - const KeyFormat keyFormat = collection->getRecordStore()->keyFormat(); - if (keyFormat == KeyFormat::Long) { - skippedRecordId = RecordId(doc[kRecordIdField].Long()); - } else { - invariant(keyFormat == KeyFormat::String); - const std::string recordIdStr = doc[kRecordIdField].String(); - skippedRecordId = RecordId(recordIdStr.c_str(), recordIdStr.size()); - } - + RecordId skippedRecordId = RecordId::deserializeToken(doc[kRecordIdField]); WriteUnitOfWork wuow(opCtx); // If the record still exists, get a potentially new version of the document to index. diff --git a/src/mongo/db/index/wildcard_key_generator.cpp b/src/mongo/db/index/wildcard_key_generator.cpp index 7ec1adc5b3d..bcf4655c2db 100644 --- a/src/mongo/db/index/wildcard_key_generator.cpp +++ b/src/mongo/db/index/wildcard_key_generator.cpp @@ -246,7 +246,7 @@ void WildcardKeyGenerator::_addMultiKey(SharedBufferFragmentBuilder& pooledBuffe _keyStringVersion, key, _ordering, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); multikeyPaths->push_back(keyString.release()); } } diff --git a/src/mongo/db/index/wildcard_key_generator_test.cpp b/src/mongo/db/index/wildcard_key_generator_test.cpp index 5f37133f72c..fc5a99ec345 100644 --- a/src/mongo/db/index/wildcard_key_generator_test.cpp +++ b/src/mongo/db/index/wildcard_key_generator_test.cpp @@ -163,12 +163,12 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ShouldIndexNonNestedEmptyArrayAsUnd fromjson("{'': 'b.c', '': undefined}"), fromjson("{'': 'd', '': []}"), fromjson("{'': 'd.e', '': undefined}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'b.c'}"), - fromjson("{'': 1, '': 'd'}"), - fromjson("{'': 1, '': 'd.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'b.c'}"), + fromjson("{'': 1, '': 'd'}"), + fromjson("{'': 1, '': 'd.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -192,9 +192,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPath) { fromjson("{'': 'a.c', '': 2}"), fromjson("{'': 'a.d', '': 3}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -218,9 +218,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPathAndDedupKeys) { fromjson("{'': 'a.c', '': 2}"), fromjson("{'': 'a.d', '': 3}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -245,9 +245,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractZeroElementMultikeyPath) { fromjson("{'': 'a.d', '': 3}"), fromjson("{'': 'e', '': undefined}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -281,9 +281,9 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractNestedMultikeyPaths) { fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -321,12 +321,12 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMixedPathTypesAndAllSubpaths fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'a.e'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'a.e'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -356,9 +356,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithSinglePathCompon fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -385,9 +385,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithMultiplePathComp fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -412,9 +412,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractMultikeySubtree) { fromjson("{'': 'g.h.j.k', '': 11}"), fromjson("{'': 'g.h.j.k', '': 11.5}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -441,9 +441,9 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractNestedMultikeySubtree) { fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -473,9 +473,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionSingleSubtree) { fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -502,9 +502,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedSubtree) { fromjson("{'': 'g.h.j.k', '': 11.5}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -529,9 +529,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultikeySubtree) { fromjson("{'': 'g.h.j.k', '': 11}"), fromjson("{'': 'g.h.j.k', '': 11.5}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'g.h.j'}"), fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -556,9 +556,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedMultikeySubtr fromjson("{'': 'a.e', '': 4}"), fromjson("{'': 'a.e', '': 5}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -586,9 +586,9 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultipleSubtrees) { fromjson("{'': 'a.e', '': 5}"), fromjson("{'': 'g.h.i', '': 9}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -623,9 +623,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionSingleSubtree) { fromjson("{'': 'a.e', '': 5}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -658,9 +658,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedSubtree) { fromjson("{'': 'g', '': {}}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -694,9 +694,9 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultikeySubtree) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -731,11 +731,11 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedMultikeySubtr fromjson("{'': 'g.h.k', '': 12}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -767,11 +767,11 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultipleSubtrees) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'string'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -801,9 +801,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfProjectionIsEmpty) { fromjson("{'': 'g.h.i', '': 9}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -828,9 +828,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldForSingleSubtreeKeyPattern) { fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -900,9 +900,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldByDefaultForInclusionProjection fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -975,9 +975,9 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdSubfieldExclusionInExplicitProjection fromjson("{'': 'g.h.i', '': 9}"), fromjson("{'': 'g.h.k', '': 12.0}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1004,9 +1004,9 @@ TEST_F(WildcardKeyGeneratorIdTest, IncludeIdFieldIfExplicitlySpecifiedInProjecti fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1031,9 +1031,9 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfExplicitlySpecifiedInProjecti fromjson("{'': 'a.b', '': 1}"), fromjson("{'': 'a.e', '': 4}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), fromjson("{'': 1, '': 'a.e'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1107,12 +1107,12 @@ TEST_F(WildcardKeyGeneratorCollationTest, CollationMixedPathAndKeyTypes) { fromjson("{'': 'g.h.k', '': 12.0}"), fromjson("{'': 'l', '': 'gnirts'}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'a'}"), - fromjson("{'': 1, '': 'a.e'}"), - fromjson("{'': 1, '': 'g.h.j'}"), - fromjson("{'': 1, '': 'g.h.j.k'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'a'}"), + fromjson("{'': 1, '': 'a.e'}"), + fromjson("{'': 1, '': 'g.h.j'}"), + fromjson("{'': 1, '': 'g.h.j.k'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); @@ -1140,9 +1140,9 @@ TEST_F(WildcardKeyGeneratorDottedFieldsTest, DoNotIndexDottedFields) { fromjson("{'': 'b', '': [{'a.b': 9}]}"), fromjson("{'': 'c', '': 10}")}); - auto expectedMultikeyPaths = makeKeySet( - {fromjson("{'': 1, '': 'b'}")}, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + auto expectedMultikeyPaths = + makeKeySet({fromjson("{'': 1, '': 'b'}")}, + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); auto outputKeys = makeKeySet(); auto multikeyMetadataKeys = makeKeySet(); diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index 59cf52abfcf..271b7757640 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -2185,12 +2185,7 @@ void IndexBuildsCoordinator::_resumeIndexBuildFromPhase( resumeInfo.getPhase() == IndexBuildPhaseEnum::kCollectionScan) { boost::optional<RecordId> resumeAfterRecordId; if (resumeInfo.getCollectionScanPosition()) { - auto scanPosition = *resumeInfo.getCollectionScanPosition(); - if (auto recordIdOIDPtr = stdx::get_if<OID>(&scanPosition)) { - resumeAfterRecordId.emplace(recordIdOIDPtr->view().view(), OID::kOIDSize); - } else if (auto recordIdLongPtr = stdx::get_if<int64_t>(&scanPosition)) { - resumeAfterRecordId.emplace(RecordId(*recordIdLongPtr)); - } + resumeAfterRecordId = *resumeInfo.getCollectionScanPosition(); } _scanCollectionAndInsertSortedKeysIntoIndex(opCtx, replState, resumeAfterRecordId); diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp index 530442a9868..e826017290d 100644 --- a/src/mongo/db/pipeline/expression.cpp +++ b/src/mongo/db/pipeline/expression.cpp @@ -2888,7 +2888,7 @@ Value ExpressionMeta::evaluate(const Document& root, Variables* variables) const return metadata.hasGeoNearDistance() ? Value(metadata.getGeoNearDistance()) : Value(); case MetaType::kGeoNearPoint: return metadata.hasGeoNearPoint() ? Value(metadata.getGeoNearPoint()) : Value(); - case MetaType::kRecordId: + case MetaType::kRecordId: { // Be sure that a RecordId can be represented by a long long. static_assert(RecordId::kMinRepr >= std::numeric_limits<long long>::min()); static_assert(RecordId::kMaxRepr <= std::numeric_limits<long long>::max()); @@ -2896,10 +2896,10 @@ Value ExpressionMeta::evaluate(const Document& root, Variables* variables) const return Value(); } - return metadata.getRecordId().withFormat( - [](RecordId::Null n) { return Value(); }, - [](const int64_t rid) { return Value{static_cast<long long>(rid)}; }, - [](const char* str, int len) { return Value(OID::from(str)); }); + BSONObjBuilder builder; + metadata.getRecordId().serializeToken("", &builder); + return Value(builder.done().firstElement()); + } case MetaType::kIndexKey: return metadata.hasIndexKey() ? Value(metadata.getIndexKey()) : Value(); case MetaType::kSortKey: diff --git a/src/mongo/db/query/classic_stage_builder.cpp b/src/mongo/db/query/classic_stage_builder.cpp index ea83f27bd56..5e8387e83cd 100644 --- a/src/mongo/db/query/classic_stage_builder.cpp +++ b/src/mongo/db/query/classic_stage_builder.cpp @@ -63,7 +63,6 @@ #include "mongo/db/exec/text_or.h" #include "mongo/db/index/fts_access_method.h" #include "mongo/db/matcher/extensions_callback_real.h" -#include "mongo/db/record_id_helpers.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/logv2/log.h" diff --git a/src/mongo/db/query/plan_explainer_impl.cpp b/src/mongo/db/query/plan_explainer_impl.cpp index 17023980407..a9bdd1c1454 100644 --- a/src/mongo/db/query/plan_explainer_impl.cpp +++ b/src/mongo/db/query/plan_explainer_impl.cpp @@ -50,6 +50,7 @@ #include "mongo/db/keypattern.h" #include "mongo/db/query/explain.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -256,16 +257,10 @@ void statsToBSON(const PlanStageStats& stats, CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get()); bob->append("direction", spec->direction > 0 ? "forward" : "backward"); if (spec->minRecord) { - spec->minRecord->withFormat( - [&](RecordId::Null n) { bob->appendNull("minRecord"); }, - [&](int64_t rid) { bob->append("minRecord", rid); }, - [&](const char* str, int size) { bob->append("minRecord", OID::from(str)); }); + record_id_helpers::appendToBSONAs(*spec->minRecord, bob, "minRecord"); } if (spec->maxRecord) { - spec->maxRecord->withFormat( - [&](RecordId::Null n) { bob->appendNull("maxRecord"); }, - [&](int64_t rid) { bob->append("maxRecord", rid); }, - [&](const char* str, int size) { bob->append("maxRecord", OID::from(str)); }); + record_id_helpers::appendToBSONAs(*spec->maxRecord, bob, "maxRecord"); } if (verbosity >= ExplainOptions::Verbosity::kExecStats) { bob->appendNumber("docsExamined", static_cast<long long>(spec->docsTested)); diff --git a/src/mongo/db/query/plan_explainer_sbe.cpp b/src/mongo/db/query/plan_explainer_sbe.cpp index d028083e18c..de59388072c 100644 --- a/src/mongo/db/query/plan_explainer_sbe.cpp +++ b/src/mongo/db/query/plan_explainer_sbe.cpp @@ -38,6 +38,7 @@ #include "mongo/db/query/plan_explainer_impl.h" #include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id_helpers.h" namespace mongo { namespace { @@ -67,16 +68,10 @@ void statsToBSON(const QuerySolutionNode* node, auto csn = static_cast<const CollectionScanNode*>(node); bob->append("direction", csn->direction > 0 ? "forward" : "backward"); if (csn->minRecord) { - csn->minRecord->withFormat( - [&](RecordId::Null n) { bob->appendNull("minRecord"); }, - [&](int64_t rid) { bob->append("minRecord", rid); }, - [&](const char* str, int size) { bob->append("minRecord", OID::from(str)); }); + record_id_helpers::appendToBSONAs(*csn->minRecord, bob, "minRecord"); } if (csn->maxRecord) { - csn->maxRecord->withFormat( - [&](RecordId::Null n) { bob->appendNull("maxRecord"); }, - [&](int64_t rid) { bob->append("maxRecord", rid); }, - [&](const char* str, int size) { bob->append("maxRecord", OID::from(str)); }); + record_id_helpers::appendToBSONAs(*csn->maxRecord, bob, "maxRecord"); } break; } diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp index 9e7855a44c7..00fc68f0587 100644 --- a/src/mongo/db/query/planner_access.cpp +++ b/src/mongo/db/query/planner_access.cpp @@ -244,18 +244,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::makeCollectionScan( const BSONObj& resumeAfterObj = query.getFindCommandRequest().getResumeAfter(); if (!resumeAfterObj.isEmpty()) { BSONElement recordIdElem = resumeAfterObj["$recordId"]; - switch (recordIdElem.type()) { - case jstNULL: - csn->resumeAfterRecordId = RecordId(); - break; - case jstOID: - csn->resumeAfterRecordId = - RecordId(recordIdElem.OID().view().view(), OID::kOIDSize); - break; - case NumberLong: - default: - csn->resumeAfterRecordId = RecordId(recordIdElem.numberLong()); - } + csn->resumeAfterRecordId = RecordId::deserializeToken(recordIdElem); } const bool assertMinTsHasNotFallenOffOplog = diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp index a7637728603..b466781d5a3 100644 --- a/src/mongo/db/query/query_request_helper.cpp +++ b/src/mongo/db/query/query_request_helper.cpp @@ -310,7 +310,7 @@ Status validateFindCommandRequest(const FindCommandRequest& findCommand) { if (!findCommand.getResumeAfter().isEmpty()) { if (findCommand.getResumeAfter().nFields() != 1 || (findCommand.getResumeAfter()["$recordId"].type() != BSONType::NumberLong && - findCommand.getResumeAfter()["$recordId"].type() != BSONType::jstOID && + findCommand.getResumeAfter()["$recordId"].type() != BSONType::String && findCommand.getResumeAfter()["$recordId"].type() != BSONType::jstNULL)) { return Status( ErrorCodes::BadValue, diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp index 8f48d68aaf7..2930e323994 100644 --- a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp +++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp @@ -46,7 +46,6 @@ #include "mongo/db/query/sbe_stage_builder_filter.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/db/record_id_helpers.h" #include "mongo/logv2/log.h" #include "mongo/util/str.h" @@ -185,7 +184,7 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> generateOptimizedOplo sbe::makeS<sbe::CoScanStage>(csn->nodeId()), 1, boost::none, csn->nodeId()), csn->nodeId(), *seekRecordIdSlot, - makeConstant(sbe::value::TypeTags::RecordId, seekRecordId->asLong())), + makeConstant(sbe::value::TypeTags::RecordId, seekRecordId->getLong())), std::move(stage), sbe::makeSV(), sbe::makeSV(*seekRecordIdSlot), @@ -308,7 +307,7 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> generateOptimizedOplo std::move(stage), makeBinaryOp(sbe::EPrimBinary::lessEq, makeVariable(*tsSlot), - makeConstant(sbe::value::TypeTags::Timestamp, csn->maxRecord->asLong())), + makeConstant(sbe::value::TypeTags::Timestamp, csn->maxRecord->getLong())), csn->nodeId()); } @@ -459,7 +458,7 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> generateGenericCollSc sbe::makeS<sbe::CoScanStage>(csn->nodeId()), 1, boost::none, csn->nodeId()), csn->nodeId(), seekSlot, - makeConstant(sbe::value::TypeTags::RecordId, csn->resumeAfterRecordId->asLong())); + makeConstant(sbe::value::TypeTags::RecordId, csn->resumeAfterRecordId->getLong())); // Construct a 'seek' branch of the 'union'. If we're succeeded to reposition the cursor, // the branch will output the 'seekSlot' to start the real scan from, otherwise it will diff --git a/src/mongo/db/query/wildcard_multikey_paths.cpp b/src/mongo/db/query/wildcard_multikey_paths.cpp index 37577f877f2..b6372dd9cd7 100644 --- a/src/mongo/db/query/wildcard_multikey_paths.cpp +++ b/src/mongo/db/query/wildcard_multikey_paths.cpp @@ -41,10 +41,10 @@ namespace mongo { * Extracts the multikey path from a metadata key stored within a wildcard index. */ static FieldRef extractMultikeyPathFromIndexKey(const IndexKeyEntry& entry) { - invariant(RecordId::isReserved<int64_t>(entry.loc)); - invariant(entry.loc.asLong() == - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId) - .asLong()); + invariant(RecordIdReservations::isReserved(entry.loc)); + invariant( + entry.loc.getLong() == + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId).getLong()); // Validate that the first piece of the key is the integer 1. BSONObjIterator iter(entry.key); diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h index 9189e77f1ab..27f46d6705d 100644 --- a/src/mongo/db/record_id.h +++ b/src/mongo/db/record_id.h @@ -52,9 +52,6 @@ public: static constexpr int64_t kMinRepr = LLONG_MIN; static constexpr int64_t kMaxRepr = LLONG_MAX; - // Fixed size of a RecordId that holds a char array - enum { kSmallStrSize = 12 }; - /** * A RecordId that compares less than all int64_t RecordIds that represent documents in a * collection. @@ -71,14 +68,33 @@ public: return RecordId(kMaxRepr); } - RecordId() : _format(Format::kNull) {} + RecordId() { + _buffer[kBufEnd] = Format::kNull; + } /** - * RecordId supports holding either an int64_t or a 12 byte char array. + * Construct a RecordId that holds an int64_t. The raw value for RecordStore storage may be + * retrieved using getLong(). */ - explicit RecordId(int64_t repr) : _storage(repr), _format(Format::kLong) {} - explicit RecordId(const char* str, int32_t size) - : _storage(str, size), _format(Format::kSmallStr) {} + explicit RecordId(int64_t s) { + memcpy(_buffer, &s, sizeof(s)); + _buffer[kBufEnd] = Format::kLong; + } + + /** + * Construct a RecordId that holds a small binary string. The raw value for RecordStore storage + * may be retrieved using getStr(). + */ + explicit RecordId(const char* str, int32_t size) { + invariant(size > 0, "key size must be greater than 0"); + // Must fit into the 16 byte buffer minus 1 byte for size and 1 format byte. + uassert(ErrorCodes::BadValue, + fmt::format("key size {} greater than maximum {}", size, kBufMaxSize - 2), + size + 2 <= kBufMaxSize); + _buffer[0] = static_cast<char>(size); + memcpy(_buffer + 1, str, size); + _buffer[kBufEnd] = Format::kSmallStr; + } /** * Construct a RecordId from two halves. @@ -89,79 +105,104 @@ public: class Null {}; /** - * Helpers to dispatch based on the underlying type. + * Helper to dispatch based on the underlying type. In most cases the RecordId type will be + * known in advance, but this may be used when the type is not known. */ template <typename OnNull, typename OnLong, typename OnStr> auto withFormat(OnNull&& onNull, OnLong&& onLong, OnStr&& onStr) const { - switch (_format) { + switch (auto f = _format()) { case Format::kNull: return onNull(Null()); case Format::kLong: - return onLong(_storage._long); - case Format::kSmallStr: - return onStr(_storage._str, kSmallStrSize); + return onLong(getLong()); + case Format::kSmallStr: { + auto str = getStr(); + return onStr(str.rawData(), str.size()); + } default: MONGO_UNREACHABLE; } } - int64_t asLong() const { + /** + * Returns the raw value to be used as a key in a RecordStore. Requires that this RecordId was + * constructed with a 64-bit integer value or null; invariants otherwise. + */ + int64_t getLong() const { // In the the int64_t format, null can also be represented by '0'. - if (_format == Format::kNull) { + if (_format() == Format::kNull) { return 0; } - invariant(_format == Format::kLong); - return _storage._long; + invariant(_format() == Format::kLong); + int64_t val; + memcpy(&val, _buffer, sizeof(val)); + return val; } - const char* strData() const { - invariant(_format == Format::kSmallStr); - return _storage._str; + /** + * Returns the raw value to be used as a key in a RecordStore. Requires that this RecordId was + * constructed with a binary string value, and invariants otherwise. + */ + const StringData getStr() const { + invariant(_format() == Format::kSmallStr); + char size = _buffer[0]; + invariant(size > 0); + invariant(size < kBufMaxSize - 1); + return StringData(_buffer + 1, size); } + /** + * Returns true if this RecordId is not suitable for storage in a RecordStore. + */ bool isNull() const { - // In the the int64_t format, null can also represented by '0'. - if (_format == Format::kLong) { - return _storage._long == 0; + // In the the int64_t format, null can also be represented by '0'. + if (_format() == Format::kLong) { + return getLong() == 0; } - return _format == Format::kNull; + return _format() == Format::kNull; } + /** * Valid RecordIds are the only ones which may be used to represent Records. The range of valid * RecordIds includes both "normal" ids that refer to user data, and "reserved" ids that are * used internally. All RecordIds outside of the valid range are sentinel values. */ bool isValid() const { - return withFormat([](Null n) { return false; }, - [&](int64_t rid) { return rid > 0; }, - [&](const char* str, int size) { return true; }); + return withFormat( + [](Null n) { return false; }, + [&](int64_t rid) { return rid > 0; }, + [&](const char* str, int size) { return size > 0 && size + 2 <= kBufMaxSize; }); } + /** + * Compares two RecordIds. Requires that both RecordIds are of the same format, unless one or + * both are null. Null always compares less than every other RecordId format. + */ int compare(const RecordId& rhs) const { - // Null always compares less than every other RecordId format. - if (_format == Format::kNull && rhs._format == Format::kNull) { + if (_format() == Format::kNull && rhs._format() == Format::kNull) { return 0; - } else if (_format == Format::kNull) { + } else if (_format() == Format::kNull) { return -1; - } else if (rhs._format == Format::kNull) { + } else if (rhs._format() == Format::kNull) { return 1; } - invariant(_format == rhs._format); + invariant(_format() == rhs._format()); return withFormat( [](Null n) { return 0; }, [&](const int64_t rid) { - return rid == rhs._storage._long ? 0 : rid < rhs._storage._long ? -1 : 1; + return rid == rhs.getLong() ? 0 : (rid > rhs.getLong()) ? 1 : -1; }, - [&](const char* str, int size) { return memcmp(str, rhs._storage._str, size); }); + [&](const char* str, int size) { return StringData(str, size).compare(rhs.getStr()); }); } size_t hash() const { size_t hash = 0; - withFormat( - [](Null n) {}, - [&](int64_t rid) { boost::hash_combine(hash, rid); }, - [&](const char* str, int size) { boost::hash_combine(hash, std::string(str, size)); }); + withFormat([](Null n) {}, + [&](int64_t rid) { boost::hash_combine(hash, rid); }, + [&](const char* str, int size) { + boost::hash_combine(hash, std::string_view(str, size)); + }); return hash; } @@ -182,102 +223,98 @@ public: } }; - void serialize(fmt::memory_buffer& buffer) const { - withFormat([&](Null n) { fmt::format_to(buffer, "RecordId(null)"); }, - [&](int64_t rid) { fmt::format_to(buffer, "RecordId({})", rid); }, + /** + * Formats this RecordId into a human-readable BSON object that may be passed around and + * deserialized with deserializeToken(). + * Note: This is not to be used as a key to a RecordStore. + */ + void serializeToken(StringData fieldName, BSONObjBuilder* builder) const { + // Preserve the underlying format by using a different BSON type for each format. + withFormat([&](Null n) { builder->appendNull(fieldName); }, + [&](int64_t rid) { builder->append(fieldName, rid); }, [&](const char* str, int size) { - fmt::format_to(buffer, "RecordId({})", hexblob::encodeLower(str, size)); + builder->append(fieldName, hexblob::encodeLower(str, size)); }); } - void serialize(BSONObjBuilder* builder) const { - withFormat([&](Null n) { builder->append("RecordId", "null"); }, - [&](int64_t rid) { builder->append("RecordId"_sd, rid); }, - [&](const char* str, int size) { - builder->appendBinData("RecordId"_sd, size, BinDataGeneral, str); - }); + /** + * Decode a token created by serializeToken(). + */ + static RecordId deserializeToken(const BSONElement& elem) { + if (elem.isNull()) { + return RecordId(); + } else if (elem.isNumber()) { + return RecordId(elem.numberLong()); + } else if (elem.type() == BSONType::String) { + auto str = hexblob::decode(elem.String()); + return RecordId(str.c_str(), str.size()); + } else { + uasserted(ErrorCodes::BadValue, + fmt::format("Could not deserialize RecordId with type {}", elem.type())); + } } +private: + enum { kBufEnd = 15, kBufMaxSize = 16 }; + /** - * Enumerates all reserved ids that have been allocated for a specific purpose. - * The underlying value of the reserved Record ID is data-type specific and must be - * retrieved by the reservedIdFor() helper. + * Specifies the storage format of this RecordId. */ - enum class Reservation { kWildcardMultikeyMetadataId }; + enum Format : int8_t { + /** Contains no value */ + kNull, + /** int64_t */ + kLong, + /** variable-length binary string, up to 14 bytes */ + kSmallStr + }; + + Format _format() const { + return static_cast<Format>(_buffer[kBufEnd]); + } - // These reserved ranges leave 2^20 possible reserved values. + // Storage for this RecordId. + // - The last byte stores the Format. + // - For the kLong type, the first 8 bytes encode the value in machine-endian order. + // - For the kSmallStr type, the first byte encodes the length and the remaining bytes encode + // the string. + char _buffer[kBufMaxSize]; +}; + +/** + * Enumerates all reserved ids that have been allocated for a specific purpose. These IDs may not be + * stored in RecordStores. + */ +enum class ReservationId { kWildcardMultikeyMetadataId }; + +/** + * Reservations tracks RecordId values that are reserved for specific usages and may not be stored + * in RecordStores. + */ +class RecordIdReservations { +public: static constexpr int64_t kMinReservedLong = RecordId::kMaxRepr - (1024 * 1024); - static constexpr unsigned char kMinReservedOID[OID::kOIDSize] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00, 0x00}; /** * Returns the reserved RecordId value for a given Reservation. */ - template <typename T> - static RecordId reservedIdFor(Reservation res) { + static RecordId reservedIdFor(ReservationId res) { // There is only one reservation at the moment. - invariant(res == Reservation::kWildcardMultikeyMetadataId); - if constexpr (std::is_same_v<T, int64_t>) { - return RecordId(kMinReservedLong); - } else { - static_assert(std::is_same_v<T, OID>, "Unsupported RecordId type"); - OID minReserved(kMinReservedOID); - return RecordId(minReserved.view().view(), OID::kOIDSize); - } + invariant(res == ReservationId::kWildcardMultikeyMetadataId); + return RecordId(kMinReservedLong); } /** * Returns true if this RecordId falls within the reserved range for a given RecordId type. */ - template <typename T> static bool isReserved(RecordId id) { if (id.isNull()) { return false; } - if constexpr (std::is_same_v<T, int64_t>) { - return id.asLong() >= kMinReservedLong && id.asLong() < RecordId::kMaxRepr; - } else { - static_assert(std::is_same_v<T, OID>, "Unsupported RecordId type"); - return memcmp(id.strData(), kMinReservedOID, OID::kOIDSize) >= 0; - } + return id.getLong() >= kMinReservedLong && id.getLong() < RecordId::kMaxRepr; } - -private: - /** - * Specifies the storage format of this RecordId. - */ - enum class Format : uint32_t { - /** Contains no value */ - kNull, - /** int64_t */ - kLong, - /** char[12] */ - kSmallStr - }; - -// Pack our union so that it only uses 12 bytes. The union will default to a 8 byte alignment, -// making it 16 bytes total with 4 bytes of padding. Instead, we force the union to use a 4 byte -// alignment, so it packs into 12 bytes. This leaves 4 bytes for our Format, allowing the RecordId -// to use 16 bytes total. -#pragma pack(push, 4) - union Storage { - int64_t _long; - char _str[kSmallStrSize]; - - Storage() {} - Storage(int64_t s) : _long(s) {} - Storage(const char* str, int32_t size) { - invariant(size == kSmallStrSize); - memcpy(_str, str, size); - } - }; -#pragma pack(pop) - - Storage _storage; - Format _format; }; - inline bool operator==(RecordId lhs, RecordId rhs) { return lhs.compare(rhs) == 0; } diff --git a/src/mongo/db/record_id.idl b/src/mongo/db/record_id.idl new file mode 100644 index 00000000000..395400ab7fe --- /dev/null +++ b/src/mongo/db/record_id.idl @@ -0,0 +1,39 @@ +# Copyright (C) 2021-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# <http://www.mongodb.com/licensing/server-side-public-license>. +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. + +global: + cpp_namespace: "mongo" + cpp_includes: + - "mongo/db/record_id.h" + +types: + RecordId: + bson_serialization_type: any + cpp_type: RecordId + description: RecordId value + serializer: RecordId::serializeToken + deserializer: RecordId::deserializeToken diff --git a/src/mongo/db/record_id_helpers.cpp b/src/mongo/db/record_id_helpers.cpp index 2cc3bb43353..4a4f2dfcfab 100644 --- a/src/mongo/db/record_id_helpers.cpp +++ b/src/mongo/db/record_id_helpers.cpp @@ -37,6 +37,8 @@ #include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/debug_util.h" namespace mongo { @@ -64,7 +66,7 @@ StatusWith<RecordId> keyForOptime(const Timestamp& opTime) { /** * data and len must be the arguments from RecordStore::insert() on an oplog collection. */ -StatusWith<RecordId> extractKey(const char* data, int len) { +StatusWith<RecordId> extractKeyOptime(const char* data, int len) { // Use the latest BSON validation version. Oplog entries are allowed to contain decimal data // even if decimal is disabled. if (kDebugBuild) @@ -80,5 +82,46 @@ StatusWith<RecordId> extractKey(const char* data, int len) { return keyForOptime(elem.timestamp()); } +StatusWith<RecordId> keyForDoc(const BSONObj& doc) { + // Build a KeyString as the RecordId using the "_id" field. + BSONElement idElem; + bool foundId = doc.getObjectID(idElem); + if (!foundId) { + return {ErrorCodes::BadValue, + str::stream() << "Document " << redact(doc) << " is missing the '_id' field"}; + } + + return keyForElem(idElem); +} + +RecordId keyForElem(const BSONElement& elem) { + // Intentionally discard the TypeBits since the type information will be stored in the _id of + // the original document. The consequence of this behavior is that _id values that compare + // similarly, but are of different types may not be used concurrently. + KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + keyBuilder.appendBSONElement(elem); + return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); +} + +RecordId keyForOID(OID oid) { + KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + keyBuilder.appendOID(oid); + return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); +} + +void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName) { + rid.withFormat([&](RecordId::Null) { builder->appendNull(fieldName); }, + [&](int64_t val) { builder->append(fieldName, val); }, + [&](const char* str, int len) { + KeyString::appendSingleFieldToBSONAs(str, len, fieldName, builder); + }); +} + +BSONObj toBSONAs(RecordId rid, StringData fieldName) { + BSONObjBuilder builder; + appendToBSONAs(rid, &builder, fieldName); + return builder.obj(); +} + } // namespace record_id_helpers } // namespace mongo diff --git a/src/mongo/db/record_id_helpers.h b/src/mongo/db/record_id_helpers.h index ed9016822f3..c4fad5d6086 100644 --- a/src/mongo/db/record_id_helpers.h +++ b/src/mongo/db/record_id_helpers.h @@ -31,11 +31,11 @@ #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" namespace mongo { -class RecordId; class Timestamp; - +class RecordId; namespace record_id_helpers { /** @@ -45,9 +45,27 @@ namespace record_id_helpers { StatusWith<RecordId> keyForOptime(const Timestamp& opTime); /** + * For collections that use clustering by _id, converts various values into a RecordId. + */ +StatusWith<RecordId> keyForDoc(const BSONObj& doc); +RecordId keyForElem(const BSONElement& elem); +RecordId keyForOID(OID oid); + +/** * data and len must be the arguments from RecordStore::insert() on an oplog collection. */ -StatusWith<RecordId> extractKey(const char* data, int len); +StatusWith<RecordId> extractKeyOptime(const char* data, int len); + +/** + * Helpers to append RecordIds to a BSON object builder. Note that this resolves the underlying BSON + * type of the RecordId if it stores a KeyString. + * + * This should be used for informational purposes only. This cannot be 'round-tripped' back into a + * RecordId because it loses information about the original RecordId format. If you require passing + * a RecordId as a token or storing for a resumable scan, for example, use RecordId::serializeToken. + */ +void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName); +BSONObj toBSONAs(RecordId rid, StringData fieldName); } // namespace record_id_helpers } // namespace mongo diff --git a/src/mongo/db/record_id_test.cpp b/src/mongo/db/record_id_test.cpp index 63f36c07029..5e40c3ea05d 100644 --- a/src/mongo/db/record_id_test.cpp +++ b/src/mongo/db/record_id_test.cpp @@ -31,6 +31,7 @@ #include "mongo/db/record_id.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/unittest/death_test.h" #include "mongo/unittest/unittest.h" @@ -47,7 +48,7 @@ TEST(RecordId, HashEqual) { } TEST(RecordId, HashEqualOid) { - RecordId locA(OID::gen().view().view(), OID::kOIDSize); + RecordId locA(record_id_helpers::keyForOID(OID::gen())); RecordId locB; locB = locA; ASSERT_EQUALS(locA, locB); @@ -76,9 +77,9 @@ TEST(RecordId, HashNotEqual) { } TEST(RecordId, HashNotEqualOid) { - RecordId loc1(OID::gen().view().view(), OID::kOIDSize); - RecordId loc2(OID::gen().view().view(), OID::kOIDSize); - RecordId loc3(OID::gen().view().view(), OID::kOIDSize); + RecordId loc1(record_id_helpers::keyForOID(OID::gen())); + RecordId loc2(record_id_helpers::keyForOID(OID::gen())); + RecordId loc3(record_id_helpers::keyForOID(OID::gen())); ASSERT_NOT_EQUALS(loc1, loc2); ASSERT_NOT_EQUALS(loc1, loc3); ASSERT_NOT_EQUALS(loc2, loc3); @@ -91,20 +92,19 @@ TEST(RecordId, HashNotEqualOid) { ASSERT_NOT_EQUALS(hasher(loc2), hasher(loc3)); } -TEST(RecordId, OidTest) { +TEST(RecordId, KeyStringTest) { RecordId ridNull; ASSERT(ridNull.isNull()); - ASSERT(!RecordId::isReserved<OID>(ridNull)); ASSERT(!ridNull.isValid()); RecordId null2; ASSERT(null2 == ridNull); OID oid1 = OID::gen(); - RecordId rid1(oid1.view().view(), OID::kOIDSize); - ASSERT(!RecordId::isReserved<OID>(rid1)); + RecordId rid1(record_id_helpers::keyForOID(oid1)); ASSERT(rid1.isValid()); - ASSERT_EQ(OID::from(rid1.strData()), oid1); + auto obj = record_id_helpers::toBSONAs(rid1, ""); + ASSERT_EQ(oid1, obj.firstElement().OID()); ASSERT_GT(rid1, ridNull); ASSERT_LT(ridNull, rid1); } @@ -117,28 +117,28 @@ TEST(RecordId, NullTest) { RecordId nullRid; ASSERT(nullRid.isNull()); - ASSERT_EQ(0, nullRid.asLong()); + ASSERT_EQ(0, nullRid.getLong()); ASSERT_NE(rid0, nullRid); } TEST(RecordId, OidTestCompare) { RecordId ridNull; - RecordId rid0(OID::createFromString("000000000000000000000000").view().view(), OID::kOIDSize); + RecordId rid0 = record_id_helpers::keyForOID(OID::createFromString("000000000000000000000000")); ASSERT_GT(rid0, ridNull); - RecordId rid1(OID::createFromString("000000000000000000000001").view().view(), OID::kOIDSize); + RecordId rid1 = record_id_helpers::keyForOID(OID::createFromString("000000000000000000000001")); ASSERT_GT(rid1, rid0); - RecordId oidMin = RecordId(OID().view().view(), OID::kOIDSize); + RecordId oidMin = record_id_helpers::keyForOID(OID()); ASSERT_EQ(oidMin, rid0); ASSERT_GT(oidMin, ridNull); - RecordId rid2(OID::createFromString("000000000000000000000002").view().view(), OID::kOIDSize); + RecordId rid2 = record_id_helpers::keyForOID(OID::createFromString("000000000000000000000002")); ASSERT_GT(rid2, rid1); - RecordId rid3(OID::createFromString("ffffffffffffffffffffffff").view().view(), OID::kOIDSize); + RecordId rid3 = record_id_helpers::keyForOID(OID::createFromString("ffffffffffffffffffffffff")); ASSERT_GT(rid3, rid2); ASSERT_GT(rid3, rid0); - RecordId oidMax = RecordId(OID::max().view().view(), OID::kOIDSize); + RecordId oidMax = record_id_helpers::keyForOID(OID::max()); ASSERT_EQ(oidMax, rid3); ASSERT_GT(oidMax, rid0); } @@ -147,22 +147,57 @@ TEST(RecordId, Reservations) { // It's important that reserved IDs like this never change. RecordId ridReserved(RecordId::kMaxRepr - (1024 * 1024)); ASSERT_EQ(ridReserved, - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); - ASSERT(RecordId::isReserved<int64_t>(ridReserved)); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); + ASSERT(RecordIdReservations::isReserved(ridReserved)); ASSERT(ridReserved.isValid()); +} - RecordId oidReserved(OID::createFromString("fffffffffffffffffff00000").view().view(), - OID::kOIDSize); - ASSERT_EQ(oidReserved, - RecordId::reservedIdFor<OID>(RecordId::Reservation::kWildcardMultikeyMetadataId)); - ASSERT(RecordId::isReserved<OID>(oidReserved)); - ASSERT(oidReserved.isValid()); +TEST(RecordId, RoundTripSerialize) { + { + RecordId id(1); + BSONObjBuilder builder; + id.serializeToken("rid", &builder); + BSONObj obj = builder.done(); + ASSERT_EQ(id, RecordId::deserializeToken(obj["rid"])); + } + + { + RecordId id(4611686018427387904); + BSONObjBuilder builder; + id.serializeToken("rid", &builder); + BSONObj obj = builder.done(); + ASSERT_EQ(id, RecordId::deserializeToken(obj["rid"])); + } + + { + RecordId id; + BSONObjBuilder builder; + id.serializeToken("rid", &builder); + BSONObj obj = builder.done(); + ASSERT_EQ(id, RecordId::deserializeToken(obj["rid"])); + } + + { + RecordId id(record_id_helpers::keyForOID(OID::gen())); + BSONObjBuilder builder; + id.serializeToken("rid", &builder); + BSONObj obj = builder.done(); + ASSERT_EQ(id, RecordId::deserializeToken(obj["rid"])); + } + + { + BSONObjBuilder builder; + builder.append("rid", OID::gen()); + BSONObj obj = builder.done(); + ASSERT_THROWS_CODE( + RecordId::deserializeToken(obj["rid"]), DBException, ErrorCodes::BadValue); + } } // RecordIds of different formats may not be compared. DEATH_TEST(RecordId, UnsafeComparison, "Invariant failure") { RecordId rid1(1); - RecordId rid2(OID::createFromString("000000000000000000000001").view().view(), OID::kOIDSize); + RecordId rid2 = record_id_helpers::keyForOID(OID::createFromString("000000000000000000000001")); ASSERT_NOT_EQUALS(rid1, rid2); } diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript index 994f6a086b8..f5ef2b61f82 100644 --- a/src/mongo/db/repl/SConscript +++ b/src/mongo/db/repl/SConscript @@ -244,6 +244,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', + '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/storage/oplog_cap_maintainer_thread', '$BUILD_DIR/mongo/db/storage/storage_control', '$BUILD_DIR/mongo/db/vector_clock', diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp index 2355148df65..675800c3cad 100644 --- a/src/mongo/db/repl/replication_recovery.cpp +++ b/src/mongo/db/repl/replication_recovery.cpp @@ -686,7 +686,7 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx, str::stream() << "Should have found a oplog entry timestamp lte to " << truncateAfterTimestamp.toString() << ", but instead found " << redact(truncateAfterOplogEntryBSON.get()) << " with timestamp " - << Timestamp(truncateAfterRecordId.asLong()).toString()); + << Timestamp(truncateAfterRecordId.getLong()).toString()); // Truncate the oplog AFTER the oplog entry found to be <= truncateAfterTimestamp. LOGV2(21553, diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 6eecb902615..19a1a544494 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -69,6 +69,7 @@ #include "mongo/db/ops/update_request.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/collection_bulk_loader_impl.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator.h" @@ -723,13 +724,11 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments( boost::optional<RecordId> minRecord, maxRecord; if (!startKey.isEmpty()) { - auto oid = startKey.firstElement().OID(); - minRecord = RecordId(oid.view().view(), OID::kOIDSize); + minRecord = RecordId(record_id_helpers::keyForElem(startKey.firstElement())); } if (!endKey.isEmpty()) { - auto oid = endKey.firstElement().OID(); - maxRecord = RecordId(oid.view().view(), OID::kOIDSize); + maxRecord = RecordId(record_id_helpers::keyForElem(endKey.firstElement())); } planExecutor = isFind diff --git a/src/mongo/db/resumable_index_builds.idl b/src/mongo/db/resumable_index_builds.idl index f27b45e3ec2..f318d586ffd 100644 --- a/src/mongo/db/resumable_index_builds.idl +++ b/src/mongo/db/resumable_index_builds.idl @@ -37,6 +37,7 @@ global: imports: - "mongo/idl/basic_types.idl" + - "mongo/db/record_id.idl" - "mongo/db/sorter/sorter.idl" enums: @@ -116,8 +117,7 @@ structs: type: uuid collectionScanPosition: description: "The last record id inserted into the sorter before shutdown" - type: - variant: [objectid, safeInt64] + type: RecordId optional: true indexes: description: "The information needed to resume each specific index in this build" diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript index 230b3a4c82b..ba1f2108815 100644 --- a/src/mongo/db/storage/SConscript +++ b/src/mongo/db/storage/SConscript @@ -245,6 +245,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/collection_options', + '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/unittest/unittest', @@ -399,6 +400,7 @@ env.Benchmark( ], LIBDEPS=[ '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/record_id_helpers', ], ) diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp index 84f67fe8f1b..80f11d50aa8 100644 --- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp +++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp @@ -356,7 +356,7 @@ void EphemeralForTestRecordStore::deleteRecord(WithLock lk, StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(WithLock, const char* data, int len) const { - StatusWith<RecordId> status = record_id_helpers::extractKey(data, len); + StatusWith<RecordId> status = record_id_helpers::extractKeyOptime(data, len); if (!status.isOK()) return status; diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h index dd77b1ae635..dfd69a05653 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h @@ -147,7 +147,7 @@ public: virtual Timestamp getAllDurableTimestamp() const override { RecordId id = _visibilityManager->getAllCommittedRecord(); - return Timestamp(id.asLong()); + return Timestamp(id.getLong()); } boost::optional<Timestamp> getOplogNeededForCrashRecovery() const final { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp index d7072c957a6..798ddf5f960 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp @@ -111,7 +111,7 @@ int64_t RecordStore::storageSize(OperationContext* opCtx, bool RecordStore::findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const { StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - auto it = workingCopy->find(createKey(_ident, loc.asLong())); + auto it = workingCopy->find(createKey(_ident, loc.getLong())); if (it == workingCopy->end()) { return false; } @@ -124,7 +124,7 @@ void RecordStore::deleteRecord(OperationContext* opCtx, const RecordId& dl) { auto ru = RecoveryUnit::get(opCtx); StringStore* workingCopy(ru->getHead()); SizeAdjuster adjuster(opCtx, this); - invariant(workingCopy->erase(createKey(_ident, dl.asLong()))); + invariant(workingCopy->erase(createKey(_ident, dl.getLong()))); ru->makeDirty(); } @@ -139,10 +139,10 @@ Status RecordStore::insertRecords(OperationContext* opCtx, int64_t thisRecordId = 0; if (_isOplog) { StatusWith<RecordId> status = - record_id_helpers::extractKey(record.data.data(), record.data.size()); + record_id_helpers::extractKeyOptime(record.data.data(), record.data.size()); if (!status.isOK()) return status.getStatus(); - thisRecordId = status.getValue().asLong(); + thisRecordId = status.getValue().getLong(); _visibilityManager->addUncommittedRecord(opCtx, this, RecordId(thisRecordId)); } else { thisRecordId = _nextRecordId(opCtx); @@ -164,7 +164,7 @@ Status RecordStore::updateRecord(OperationContext* opCtx, StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); SizeAdjuster adjuster(opCtx, this); { - std::string key = createKey(_ident, oldLocation.asLong()); + std::string key = createKey(_ident, oldLocation.getLong()); StringStore::const_iterator it = workingCopy->find(key); invariant(it != workingCopy->end()); workingCopy->update(StringStore::value_type{key, std::string(data, len)}); @@ -229,7 +229,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo auto ru = RecoveryUnit::get(opCtx); StringStore* workingCopy(ru->getHead()); WriteUnitOfWork wuow(opCtx); - const auto recordKey = createKey(_ident, end.asLong()); + const auto recordKey = createKey(_ident, end.getLong()); auto recordIt = inclusive ? workingCopy->lower_bound(recordKey) : workingCopy->upper_bound(recordKey); auto endIt = workingCopy->upper_bound(_postfix); @@ -301,7 +301,7 @@ void RecordStore::_initHighestIdIfNeeded(OperationContext* opCtx) { // Find the largest RecordId currently in use. std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false); if (auto record = cursor->next()) { - nextId = record->id.asLong() + 1; + nextId = record->id.getLong() + 1; } _highestRecordId.store(nextId); @@ -355,7 +355,7 @@ boost::optional<Record> RecordStore::Cursor::seekExact(const RecordId& id) { _savedPosition = boost::none; _lastMoveWasRestore = false; StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - std::string key = createKey(_rs._ident, id.asLong()); + std::string key = createKey(_rs._ident, id.getLong()); it = workingCopy->find(key); if (it == workingCopy->end() || !inPrefix(it->first)) @@ -384,7 +384,7 @@ boost::optional<Record> RecordStore::Cursor::seekNear(const RecordId& id) { return boost::none; StringStore* workingCopy{RecoveryUnit::get(opCtx)->getHead()}; - std::string key = createKey(_rs._ident, search.asLong()); + std::string key = createKey(_rs._ident, search.getLong()); // We may land higher and that is fine per the API contract. it = workingCopy->lower_bound(key); @@ -497,7 +497,7 @@ boost::optional<Record> RecordStore::ReverseCursor::seekExact(const RecordId& id _needFirstSeek = false; _savedPosition = boost::none; StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead()); - std::string key = createKey(_rs._ident, id.asLong()); + std::string key = createKey(_rs._ident, id.getLong()); StringStore::const_iterator canFind = workingCopy->find(key); if (canFind == workingCopy->end() || !inPrefix(canFind->first)) { it = workingCopy->rend(); @@ -518,7 +518,7 @@ boost::optional<Record> RecordStore::ReverseCursor::seekNear(const RecordId& id) return boost::none; StringStore* workingCopy{RecoveryUnit::get(opCtx)->getHead()}; - std::string key = createKey(_rs._ident, id.asLong()); + std::string key = createKey(_rs._ident, id.getLong()); it = StringStore::const_reverse_iterator(workingCopy->upper_bound(key)); // Since there is at least 1 record, if we hit the beginning we need to return the only record. diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp index 16e7bad74e7..d92a1adefeb 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_sorted_impl.cpp @@ -140,7 +140,7 @@ IndexDataEntry::IndexDataEntry(const std::string& indexDataEntry) : _buffer(reinterpret_cast<const uint8_t*>(indexDataEntry.data())) {} std::string IndexDataEntry::create(RecordId loc, const KeyString::TypeBits& typeBits) { - uint64_t repr = loc.asLong(); + uint64_t repr = loc.getLong(); uint64_t typebitsSize = typeBits.getSize(); std::string output(sizeof(repr) + sizeof(typebitsSize) + typebitsSize, '\0'); diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp index 00677757b1d..745e87ddf63 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_visibility_manager.cpp @@ -101,7 +101,7 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx, RecordId VisibilityManager::getAllCommittedRecord() { stdx::lock_guard<Latch> lock(_stateLock); return _uncommittedRecords.empty() ? _highestSeen - : RecordId(_uncommittedRecords.begin()->asLong() - 1); + : RecordId(_uncommittedRecords.begin()->getLong() - 1); } bool VisibilityManager::isFirstHidden(RecordId rid) { diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h index 767385283cf..e021bb26f20 100644 --- a/src/mongo/db/storage/index_entry_comparison.h +++ b/src/mongo/db/storage/index_entry_comparison.h @@ -70,7 +70,7 @@ struct IndexKeyEntry { void serialize(BSONObjBuilder* builder) const { builder->append("key"_sd, key); - loc.serialize(builder); + loc.serializeToken("RecordId", builder); } BSONObj key; diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp index 89c4561b46b..f5cb04f3b20 100644 --- a/src/mongo/db/storage/key_string.cpp +++ b/src/mongo/db/storage/key_string.cpp @@ -236,6 +236,9 @@ const uint8_t kEnd = 0x4; // the encoding of NUL bytes in strings as "\x00\xff". const uint8_t kLess = 1; const uint8_t kGreater = 254; + +// The maximum length of a RecordId binary string that may be appended to a KeyString. +const int8_t kMaxRecordIdStrLen = 127; } // namespace // some utility functions @@ -275,7 +278,7 @@ StringData readCString(BufReader* reader) { */ StringData readCStringWithNuls(BufReader* reader, std::string* scratch) { const StringData initial = readCString(reader); - if (reader->peek<unsigned char>() != 0xFF) + if (!reader->remaining() || reader->peek<unsigned char>() != 0xFF) return initial; // Don't alloc or copy for simple case with no NUL bytes. scratch->append(initial.rawData(), initial.size()); @@ -406,6 +409,13 @@ void BuilderBase<BufferT>::appendSetAsArray(const BSONElementSet& set, const Str } template <class BufferT> +void BuilderBase<BufferT>::appendOID(OID oid) { + _verifyAppendingState(); + _appendOID(oid, _shouldInvertOnAppend()); + _elemCount++; +} + +template <class BufferT> void BuilderBase<BufferT>::appendDiscriminator(const Discriminator discriminator) { // The discriminator forces this KeyString to compare Less/Greater than any KeyString with // the same prefix of keys. As an example, this can be used to land on the first key in the @@ -474,7 +484,7 @@ void BuilderBase<BufferT>::_appendRecordIdLong(int64_t val) { if (raw < 0) { // Note: we encode RecordId::minLong() and RecordId() the same which is ok, as they // are never stored so they will never be compared to each other. - invariant(raw == RecordId::minLong().asLong()); + invariant(raw == RecordId::minLong().getLong()); raw = 0; } const uint64_t value = static_cast<uint64_t>(raw); @@ -506,10 +516,20 @@ void BuilderBase<BufferT>::_appendRecordIdLong(int64_t val) { template <class BufferT> void BuilderBase<BufferT>::_appendRecordIdStr(const char* str, int size) { - // Only 12 byte strings can be encoded as RecordIds. - invariant(size == RecordId::kSmallStrSize); + // This encoding for RecordId binary strings stores the size at the end. This means that a + // RecordId may only be appended at the end of a KeyString. That is, it cannot be appended in + // the middle of a KeyString and also be binary-comparable. + + // The current maximum string length is 127. The high bit is reserved for future usage. + invariant(size <= kMaxRecordIdStrLen); + invariant(size > 0); + const bool invert = false; + + // String is encoded with a single byte for the size at the end. _appendBytes(str, size, invert); + auto encodedSize = static_cast<uint8_t>(size); + _append(encodedSize, invert); } template <class BufferT> @@ -2488,19 +2508,20 @@ RecordId decodeRecordIdLong(BufReader* reader) { } RecordId decodeRecordIdStrAtEnd(const void* bufferRaw, size_t bufSize) { + invariant(bufSize > 0); const uint8_t* buffer = static_cast<const uint8_t*>(bufferRaw); - // We currently require all RecordId strings to be 12 bytes. - const int ridSize = RecordId::kSmallStrSize; - invariant(bufSize >= ridSize); - const uint8_t* firstBytePtr = (buffer + bufSize - ridSize); - BufReader reader(firstBytePtr, ridSize); - return decodeRecordIdStr(&reader); -} -RecordId decodeRecordIdStr(BufReader* reader) { - // We currently require all RecordId strings to be 12 bytes. - const int size = RecordId::kSmallStrSize; - return RecordId(static_cast<const char*>(reader->skip(size)), size); + // The current encoding for strings supports strings up to 128 bytes. The high bit is reserved + // for future usage. + uint8_t len = buffer[bufSize - 1]; + keyStringAssert(5577900, + fmt::format("Cannot decode record id string longer than {} bytes; size is {}", + kMaxRecordIdStrLen, + len), + len <= kMaxRecordIdStrLen); + invariant(bufSize > len); + const uint8_t* firstBytePtr = (buffer + bufSize - len - 1); + return RecordId(reinterpret_cast<const char*>(firstBytePtr), len); } int compare(const char* leftBuf, const char* rightBuf, size_t leftSize, size_t rightSize) { @@ -2551,6 +2572,41 @@ bool readSBEValue(BufReader* reader, return true; } +void appendSingleFieldToBSONAs( + const char* buf, int len, StringData fieldName, BSONObjBuilder* builder, Version version) { + const bool inverted = false; + + BufReader reader(buf, len); + invariant(reader.remaining()); + uint8_t ctype = readType<uint8_t>(&reader, inverted); + invariant(ctype != kEnd && ctype > kLess && ctype < kGreater); + + const uint32_t depth = 1; // This function only gets called for a top-level KeyString::Value. + // Callers discard their TypeBits. + TypeBits typeBits(version); + TypeBits::Reader typeBitsReader(typeBits); + + BSONObjBuilderValueStream& stream = *builder << fieldName; + toBsonValue(ctype, &reader, &typeBitsReader, inverted, version, &stream, depth); +} + +void appendToBSONArray(const char* buf, int len, BSONArrayBuilder* builder, Version version) { + const bool inverted = false; + + BufReader reader(buf, len); + invariant(reader.remaining()); + uint8_t ctype = readType<uint8_t>(&reader, inverted); + invariant(ctype != kEnd && ctype > kLess && ctype < kGreater); + + // This function only gets called for a top-level KeyString::Value. + const uint32_t depth = 1; + // All users of this currently discard type bits. + TypeBits typeBits(version); + TypeBits::Reader typeBitsReader(typeBits); + + toBsonValue(ctype, &reader, &typeBitsReader, inverted, version, builder, depth); +} + void Value::serializeWithoutRecordId(BufBuilder& buf) const { dassert(decodeRecordIdLongAtEnd(_buffer.get(), _ksSize).isValid()); diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h index d4e45aea650..57d16141241 100644 --- a/src/mongo/db/storage/key_string.h +++ b/src/mongo/db/storage/key_string.h @@ -544,6 +544,7 @@ public: void appendUndefined(); void appendBinData(const BSONBinData& data); void appendSetAsArray(const BSONElementSet& set, const StringTransformFn& f = nullptr); + void appendOID(OID oid); /** * Appends a Discriminator byte and kEnd byte to a key string. @@ -949,6 +950,7 @@ RecordId decodeRecordIdLongAtEnd(const void* buf, size_t size); /** * Decodes a RecordId string from the end of a buffer. + * The RecordId string length cannot be determined by looking at the start of the string. */ RecordId decodeRecordIdStrAtEnd(const void* buf, size_t size); @@ -961,7 +963,6 @@ size_t sizeWithoutRecordIdAtEnd(const void* bufferRaw, size_t bufSize); * Decodes a RecordId, consuming all bytes needed from reader. */ RecordId decodeRecordIdLong(BufReader* reader); -RecordId decodeRecordIdStr(BufReader* reader); int compare(const char* leftBuf, const char* rightBuf, size_t leftSize, size_t rightSize); @@ -977,6 +978,16 @@ bool readSBEValue(BufReader* reader, Version version, sbe::value::ValueBuilder* valueBuilder); +/* + * Appends the first field of a key string to a BSON object. + * This does not accept TypeBits because callers of this function discard TypeBits. + */ +void appendSingleFieldToBSONAs(const char* buf, + int len, + StringData fieldName, + BSONObjBuilder* builder, + Version version = KeyString::Version::kLatestVersion); + template <class BufferT> template <class T> int BuilderBase<BufferT>::compare(const T& other) const { diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp index 9a3093f4f93..1721fce9773 100644 --- a/src/mongo/db/storage/key_string_test.cpp +++ b/src/mongo/db/storage/key_string_test.cpp @@ -1279,6 +1279,67 @@ std::vector<BSONObj> thinElements(std::vector<BSONObj> elements, } } // namespace +namespace { +RecordId ridFromOid(const OID& oid) { + KeyString::Builder builder(KeyString::Version::kLatestVersion); + builder.appendOID(oid); + return RecordId(builder.getBuffer(), builder.getSize()); +} +} // namespace + +TEST_F(KeyStringBuilderTest, RecordIdStr) { + const int kSize = 12; + for (int i = 0; i < kSize; i++) { + unsigned char buf[kSize]; + memset(buf, 0x80, kSize); + buf[i] = 0xFE; + + const RecordId rid = ridFromOid(OID::from(buf)); + + { // Test encoding / decoding of single RecordIds + const KeyString::Builder ks(version, rid); + invariant(ks.getSize() == 14); + + ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + + if (rid.isValid()) { + ASSERT_GT(ks, KeyString::Builder(version, RecordId(1))); + ASSERT_GT(ks, KeyString::Builder(version, ridFromOid(OID()))); + ASSERT_LT(ks, KeyString::Builder(version, ridFromOid(OID::max()))); + + char bufLt[kSize]; + memcpy(bufLt, buf, kSize); + bufLt[kSize - 1] -= 1; + auto ltRid = ridFromOid(OID::from(bufLt)); + ASSERT(ltRid < rid); + ASSERT_GT(ks, KeyString::Builder(version, ltRid)); + + char bufGt[kSize]; + memcpy(bufGt, buf, kSize); + bufGt[kSize - 1] += 1; + auto gtRid = ridFromOid(OID::from(bufGt)); + ASSERT(gtRid > rid); + ASSERT_LT(ks, KeyString::Builder(version, gtRid)); + } + } + + for (int j = 0; j < kSize; j++) { + unsigned char otherBuf[kSize] = {0}; + otherBuf[j] = 0xFE; + RecordId other = ridFromOid(OID::from(otherBuf)); + + if (rid == other) { + ASSERT_EQ(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + } + if (rid < other) { + ASSERT_LT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + } + if (rid > other) { + ASSERT_GT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + } + } + } +} TEST_F(KeyStringBuilderTest, AllPermCompare) { std::vector<BSONObj> elements = getInterestingElements(version); @@ -1435,8 +1496,8 @@ TEST_F(KeyStringBuilderTest, RecordIds) { ASSERT_GT(ks, KeyString::Builder(version, RecordId::minLong())); ASSERT_LT(ks, KeyString::Builder(version, RecordId::maxLong())); - ASSERT_GT(ks, KeyString::Builder(version, RecordId(rid.asLong() - 1))); - ASSERT_LT(ks, KeyString::Builder(version, RecordId(rid.asLong() + 1))); + ASSERT_GT(ks, KeyString::Builder(version, RecordId(rid.getLong() - 1))); + ASSERT_LT(ks, KeyString::Builder(version, RecordId(rid.getLong() + 1))); } } @@ -1481,64 +1542,6 @@ TEST_F(KeyStringBuilderTest, RecordIds) { } } -TEST_F(KeyStringBuilderTest, RecordIdStr) { - const int kSize = 12; - for (int i = 0; i < kSize; i++) { - unsigned char buf[kSize]; - memset(buf, 0x80, kSize); - buf[i] = 0xFE; - const RecordId rid = RecordId(reinterpret_cast<char*>(buf), kSize); - - { // Test encoding / decoding of single RecordIds - const KeyString::Builder ks(version, rid); - ASSERT_EQ(ks.getSize(), 12u); - - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - - { - BufReader reader(ks.getBuffer(), ks.getSize()); - ASSERT_EQ(KeyString::decodeRecordIdStr(&reader), rid); - ASSERT(reader.atEof()); - } - - if (rid.isValid()) { - ASSERT_GT(ks, KeyString::Builder(version, RecordId(1))); - ASSERT_GT( - ks, KeyString::Builder(version, RecordId(OID().view().view(), OID::kOIDSize))); - ASSERT_LT( - ks, - KeyString::Builder(version, RecordId(OID::max().view().view(), OID::kOIDSize))); - - char bufLt[kSize]; - memcpy(bufLt, buf, kSize); - bufLt[kSize - 1] -= 1; - ASSERT_GT(ks, KeyString::Builder(version, RecordId(bufLt, kSize))); - - char bufGt[kSize]; - memcpy(bufGt, buf, kSize); - bufGt[kSize - 1] += 1; - ASSERT_LT(ks, KeyString::Builder(version, RecordId(bufGt, kSize))); - } - } - - for (int j = 0; j < kSize; j++) { - unsigned char otherBuf[kSize] = {0}; - otherBuf[j] = 0xFE; - RecordId other = RecordId(reinterpret_cast<char*>(otherBuf), kSize); - - if (rid == other) { - ASSERT_EQ(KeyString::Builder(version, rid), KeyString::Builder(version, other)); - } - if (rid < other) { - ASSERT_LT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); - } - if (rid > other) { - ASSERT_GT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); - } - } - } -} - TEST_F(KeyStringBuilderTest, KeyWithLotsOfTypeBits) { BSONObj obj; { diff --git a/src/mongo/db/storage/record_id_bm.cpp b/src/mongo/db/storage/record_id_bm.cpp index 1f28a970f3e..2a79f275e3e 100644 --- a/src/mongo/db/storage/record_id_bm.cpp +++ b/src/mongo/db/storage/record_id_bm.cpp @@ -30,40 +30,56 @@ #include "mongo/platform/basic.h" #include "mongo/db/record_id.h" +#include "mongo/db/record_id_helpers.h" #include <benchmark/benchmark.h> namespace mongo { namespace { -RecordId incInt(RecordId r) { - return RecordId(r.asLong() + 1); +void BM_RecordIdCopyLong(benchmark::State& state) { + RecordId rid(1 << 31); + for (auto _ : state) { + RecordId tmp; + benchmark::ClobberMemory(); + benchmark::DoNotOptimize(tmp = rid); + } } -RecordId incOID(RecordId r) { - OID o = OID::from(r.strData()); - o.setTimestamp(o.getTimestamp() + 1); - return RecordId(o.view().view(), OID::kOIDSize); +void BM_RecordIdCopyString(benchmark::State& state) { + RecordId rid = record_id_helpers::keyForOID(OID::gen()); + for (auto _ : state) { + RecordId tmp; + benchmark::ClobberMemory(); + benchmark::DoNotOptimize(tmp = rid); + } } -void BM_RecordIdCopyLong(benchmark::State& state) { +void BM_RecordIdFormatLong(benchmark::State& state) { RecordId rid(1 << 31); for (auto _ : state) { benchmark::ClobberMemory(); - benchmark::DoNotOptimize(rid = incInt(rid)); + benchmark::DoNotOptimize(rid.withFormat([](RecordId::Null) { return false; }, + [](std::int64_t val) { return false; }, + [](const char* str, int size) { return false; })); } } -void BM_RecordIdCopyOID(benchmark::State& state) { - RecordId rid(OID::gen().view().view(), OID::kOIDSize); +void BM_RecordIdFormatString(benchmark::State& state) { + RecordId rid = record_id_helpers::keyForOID(OID::gen()); for (auto _ : state) { benchmark::ClobberMemory(); - benchmark::DoNotOptimize(rid = incOID(rid)); + benchmark::DoNotOptimize(rid.withFormat([](RecordId::Null) { return false; }, + [](std::int64_t val) { return false; }, + [](const char* str, int size) { return false; })); } } BENCHMARK(BM_RecordIdCopyLong); -BENCHMARK(BM_RecordIdCopyOID); +BENCHMARK(BM_RecordIdCopyString); + +BENCHMARK(BM_RecordIdFormatLong); +BENCHMARK(BM_RecordIdFormatString); } // namespace } // namespace mongo diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index 12e9a4bf912..ec8aed31d1d 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -32,6 +32,7 @@ #include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/record_store.h" #include "mongo/unittest/unittest.h" @@ -425,11 +426,12 @@ TEST(RecordStoreTestHarness, ClusteredRecordStore) { std::vector<Timestamp> timestamps(numRecords, Timestamp()); for (int i = 0; i < numRecords; i++) { - BSONObj doc = BSON("i" << i); + BSONObj doc = BSON("_id" << OID::gen() << "i" << i); RecordData recordData = RecordData(doc.objdata(), doc.objsize()); recordData.makeOwned(); - records.push_back({RecordId(OID::gen().view().view(), OID::kOIDSize), recordData}); + RecordId id = uassertStatusOK(record_id_helpers::keyForDoc(doc)); + records.push_back({id, recordData}); } { @@ -476,10 +478,12 @@ TEST(RecordStoreTestHarness, ClusteredRecordStore) { ASSERT_EQ(0, strcmp(records.at(i).data.data(), rd.data())); } - RecordId minOid(OID().view().view(), OID::kOIDSize); - RecordId maxOid(OID::max().view().view(), OID::kOIDSize); - ASSERT_FALSE(rs->findRecord(opCtx.get(), minOid, nullptr)); - ASSERT_FALSE(rs->findRecord(opCtx.get(), maxOid, nullptr)); + + RecordId minId = record_id_helpers::keyForOID(OID()); + ASSERT_FALSE(rs->findRecord(opCtx.get(), minId, nullptr)); + + RecordId maxId = record_id_helpers::keyForOID(OID::max()); + ASSERT_FALSE(rs->findRecord(opCtx.get(), maxId, nullptr)); } { @@ -544,7 +548,9 @@ TEST(RecordStoreTestHarness, ClusteredRecordStoreSeekNear) { auto oid = OID::gen(); oid.setTimestamp(timestamps[i].getSecs()); - auto record = Record{RecordId(oid.view().view(), OID::kOIDSize), recordData}; + + auto id = record_id_helpers::keyForOID(oid); + auto record = Record{id, recordData}; std::vector<Record> recVec = {record}; WriteUnitOfWork wuow(opCtx.get()); @@ -557,9 +563,12 @@ TEST(RecordStoreTestHarness, ClusteredRecordStoreSeekNear) { for (int i = 0; i < numRecords; i++) { // Generate an OID RecordId with a timestamp part and high bits elsewhere such that it // always compares greater than or equal to the OIDs we inserted. + + auto oid = OID::max(); oid.setTimestamp(i); - auto rid = RecordId(oid.view().view(), OID::kOIDSize); + + auto rid = record_id_helpers::keyForOID(oid); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(rid); ASSERT(rec); @@ -569,9 +578,11 @@ TEST(RecordStoreTestHarness, ClusteredRecordStoreSeekNear) { for (int i = 0; i < numRecords; i++) { // Generate an OID RecordId with only a timestamp part and zeroes elsewhere such that it // always compares less than or equal to the OIDs we inserted. + auto oid = OID(); oid.setTimestamp(i); - auto rid = RecordId(oid.view().view(), OID::kOIDSize); + + auto rid = record_id_helpers::keyForOID(oid); auto cur = rs->getCursor(opCtx.get(), false /* forward */); auto rec = cur->seekNear(rid); ASSERT(rec); diff --git a/src/mongo/db/storage/record_store_test_oplog.cpp b/src/mongo/db/storage/record_store_test_oplog.cpp index d037bb0bef8..2382f77acaf 100644 --- a/src/mongo/db/storage/record_store_test_oplog.cpp +++ b/src/mongo/db/storage/record_store_test_oplog.cpp @@ -302,7 +302,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekNear(RecordId(id1.asLong() + 1)); + auto record = cursor->seekNear(RecordId(id1.getLong() + 1)); ASSERT(record); ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); diff --git a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp index 6ae7a83205d..0bdfa6db0b6 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp @@ -111,8 +111,8 @@ TEST(SortedDataInterface, BuilderAddKeyWithReservedRecordId) { sorted->makeBulkBuilder(opCtx.get(), true)); RecordId reservedLoc( - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); - invariant(RecordId::isReserved<int64_t>(reservedLoc)); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); + invariant(RecordIdReservations::isReserved(reservedLoc)); WriteUnitOfWork wuow(opCtx.get()); ASSERT_OK(builder->addKey(makeKeyString(sorted.get(), key1, reservedLoc))); diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp index eede384528a..62d85fa6128 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp @@ -708,8 +708,8 @@ TEST(SortedDataInterface, InsertReservedRecordId) { ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); RecordId reservedLoc( - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); - invariant(RecordId::isReserved<int64_t>(reservedLoc)); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); + invariant(RecordIdReservations::isReserved(reservedLoc)); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, reservedLoc), /*dupsAllowed*/ true)); diff --git a/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp b/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp index 4b6c465e6a6..2812765b95a 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp @@ -186,47 +186,6 @@ TEST(SortedDataInterface, KeyFormatStringSetEndPosition) { } } -TEST(SortedDataInterface, KeyFormatStringInsertReserved) { - const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); - const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface( - /*unique=*/false, /*partial=*/false, KeyFormat::String)); - if (!sorted) { - // Not supported by this storage engine. - return; - } - const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); - ASSERT(sorted->isEmpty(opCtx.get())); - - RecordId reservedLoc( - RecordId::reservedIdFor<OID>(RecordId::Reservation::kWildcardMultikeyMetadataId)); - invariant(RecordId::isReserved<OID>(reservedLoc)); - { - WriteUnitOfWork uow(opCtx.get()); - ASSERT_OK(sorted->insert(opCtx.get(), - makeKeyString(sorted.get(), key1, reservedLoc), - /*dupsAllowed*/ true)); - uow.commit(); - } - ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); - - auto ksSeek = makeKeyStringForSeek(sorted.get(), key1, true, true); - { - auto cursor = sorted->newCursor(opCtx.get()); - auto entry = cursor->seek(ksSeek); - ASSERT(entry); - ASSERT_EQ(*entry, IndexKeyEntry(key1, reservedLoc)); - } - - { - auto cursor = sorted->newCursor(opCtx.get()); - auto entry = cursor->seekForKeyString(ksSeek); - ASSERT(entry); - ASSERT_EQ(entry->loc, reservedLoc); - auto ks1 = makeKeyString(sorted.get(), key1, reservedLoc); - ASSERT_EQ(entry->keyString, ks1); - } -} - TEST(SortedDataInterface, KeyFormatStringUnindex) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface( diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp index 4c4cb283643..7c68769472e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp @@ -63,7 +63,7 @@ void WiredTigerOplogManager::startVisibilityThread(OperationContext* opCtx, // event of a secondary crashing, replication recovery will truncate the oplog, resetting // visibility to the truncate point. In the event of a primary crashing, it will perform // rollback before servicing oplog reads. - auto topOfOplogTimestamp = Timestamp(lastRecord->id.asLong()); + auto topOfOplogTimestamp = Timestamp(lastRecord->id.getLong()); setOplogReadTimestamp(topOfOplogTimestamp); LOGV2_DEBUG(22368, 1, @@ -174,7 +174,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( LOGV2_DEBUG(22371, 2, "Operation is waiting for an entry to become visible in the oplog.", - "awaitedOplogEntryTimestamp"_attr = Timestamp(waitingFor.asLong()), + "awaitedOplogEntryTimestamp"_attr = Timestamp(waitingFor.getLong()), "currentLatestVisibleOplogEntryTimestamp"_attr = Timestamp(currentLatestVisibleTimestamp)); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 481d1ba160e..3f339863f62 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -90,13 +90,13 @@ struct RecordIdAndWall { RecordIdAndWall(RecordId lastRecord, Date_t wallTime) : id(lastRecord), wall(wallTime) {} }; -WiredTigerRecordStore::CursorKey makeCursorKey(const RecordId& rid) { - WiredTigerRecordStore::CursorKey cursorKey; - rid.withFormat( - [](RecordId::Null n) { invariant(false); }, - [&](int64_t rid) { cursorKey.emplace<int64_t>(rid); }, - [&](const char* str, int size) { cursorKey.emplace<WiredTigerItem>(str, size); }); - return cursorKey; +WiredTigerRecordStore::CursorKey makeCursorKey(const RecordId& rid, KeyFormat format) { + if (format == KeyFormat::Long) { + return rid.getLong(); + } else { + auto str = rid.getStr(); + return WiredTigerItem(str.rawData(), str.size()); + } } static const int kMinimumRecordStoreVersion = 1; @@ -258,7 +258,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() { "wallTime"_attr = stone.wallTime, "pinnedOplog"_attr = _rs->getPinnedOplog()); - if (static_cast<std::uint64_t>(stone.lastRecord.asLong()) < + if (static_cast<std::uint64_t>(stone.lastRecord.getLong()) < _rs->getPinnedOplog().asULL()) { break; } @@ -515,7 +515,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon _calculateStonesByScanning(opCtx); return; } - earliestOpTime = Timestamp(record->id.asLong()); + earliestOpTime = Timestamp(record->id.getLong()); } { @@ -530,7 +530,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon _calculateStonesByScanning(opCtx); return; } - latestOpTime = Timestamp(record->id.asLong()); + latestOpTime = Timestamp(record->id.getLong()); } LOGV2(22389, @@ -1040,7 +1040,7 @@ bool WiredTigerRecordStore::findRecord(OperationContext* opCtx, WiredTigerCursor curwrap(_uri, _tableId, true, opCtx); WT_CURSOR* c = curwrap.get(); invariant(c); - CursorKey key = makeCursorKey(id); + CursorKey key = makeCursorKey(id, _keyFormat); setKey(c, &key); int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); }); if (ret == WT_NOTFOUND) { @@ -1070,7 +1070,7 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId WiredTigerCursor cursor(_uri, _tableId, true, opCtx); cursor.assertInActiveTxn(); WT_CURSOR* c = cursor.get(); - CursorKey key = makeCursorKey(id); + CursorKey key = makeCursorKey(id, _keyFormat); setKey(c, &key); int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); }); invariantWTOK(ret); @@ -1136,7 +1136,7 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT while (auto stone = _oplogStones->peekOldestStoneIfNeeded()) { invariant(stone->lastRecord.isValid()); - if (static_cast<std::uint64_t>(stone->lastRecord.asLong()) >= mayTruncateUpTo.asULL()) { + if (static_cast<std::uint64_t>(stone->lastRecord.getLong()) >= mayTruncateUpTo.asULL()) { // Do not truncate oplogs needed for replication recovery. return; } @@ -1176,7 +1176,7 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT // It is necessary that there exists a record after the stone but before or including // the mayTruncateUpTo point. Since the mayTruncateUpTo point may fall between // records, the stone check is not sufficient. - CursorKey key = makeCursorKey(stone->lastRecord); + CursorKey key = makeCursorKey(stone->lastRecord, _keyFormat); setKey(cursor, &key); ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return cursor->search(cursor); }); invariantWTOK(ret); @@ -1187,12 +1187,12 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayT } invariantWTOK(ret); RecordId nextRecord = getKey(cursor); - if (static_cast<std::uint64_t>(nextRecord.asLong()) > mayTruncateUpTo.asULL()) { + if (static_cast<std::uint64_t>(nextRecord.getLong()) > mayTruncateUpTo.asULL()) { LOGV2_DEBUG(5140901, 0, "Cannot truncate as there are no oplog entries after the stone but " "before the truncate-up-to point", - "nextRecord"_attr = Timestamp(nextRecord.asLong()), + "nextRecord"_attr = Timestamp(nextRecord.getLong()), "mayTruncateUpTo"_attr = mayTruncateUpTo); return; } @@ -1259,7 +1259,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, auto& record = records[i]; if (_isOplog) { StatusWith<RecordId> status = - record_id_helpers::extractKey(record.data.data(), record.data.size()); + record_id_helpers::extractKeyOptime(record.data.data(), record.data.size()); if (!status.isOK()) return status.getStatus(); record.id = status.getValue(); @@ -1282,7 +1282,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, // flush. Because these are direct writes into the oplog, the machinery to trigger a // journal flush is bypassed. A followup oplog read will require a fresh visibility // value to make progress. - ts = Timestamp(record.id.asLong()); + ts = Timestamp(record.id.getLong()); opCtx->recoveryUnit()->setOrderedCommit(false); } else { ts = timestamps[i]; @@ -1291,7 +1291,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, LOGV2_DEBUG(22403, 4, "inserting record with timestamp {ts}", "ts"_attr = ts); fassert(39001, opCtx->recoveryUnit()->setTimestamp(ts)); } - CursorKey key = makeCursorKey(record.id); + CursorKey key = makeCursorKey(record.id, _keyFormat); setKey(c, &key); WiredTigerItem value(record.data.data(), record.data.size()); c->set_value(c, value.Get()); @@ -1303,9 +1303,8 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, // Generate a useful error message that is consistent with duplicate key error messages // on indexes. - BSONObjBuilder builder; - builder.append("", OID::from(record.id.strData())); - return buildDupKeyErrorStatus(builder.obj(), + BSONObj obj = record_id_helpers::toBSONAs(record.id, ""); + return buildDupKeyErrorStatus(obj, NamespaceString(ns()), "" /* indexName */, BSON("_id" << 1), @@ -1336,10 +1335,10 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const { invariant(_isOplog); - invariant(id.asLong() > 0); + invariant(id.getLong() > 0); invariant(_kvEngine->getOplogManager()->isRunning()); return _kvEngine->getOplogManager()->getOplogReadTimestamp() < - static_cast<std::uint64_t>(id.asLong()); + static_cast<std::uint64_t>(id.getLong()); } bool WiredTigerRecordStore::haveCappedWaiters() { @@ -1376,7 +1375,7 @@ StatusWith<Timestamp> WiredTigerRecordStore::getLatestOplogTimestamp( RecordId recordId = getKey(cursor); - return {Timestamp(static_cast<unsigned long long>(recordId.asLong()))}; + return {Timestamp(static_cast<unsigned long long>(recordId.getLong()))}; } StatusWith<Timestamp> WiredTigerRecordStore::getEarliestOplogTimestamp(OperationContext* opCtx) { @@ -1402,7 +1401,7 @@ StatusWith<Timestamp> WiredTigerRecordStore::getEarliestOplogTimestamp(Operation _oplogFirstRecord = getKey(cursor); } - return {Timestamp(static_cast<unsigned long long>(_oplogFirstRecord.asLong()))}; + return {Timestamp(static_cast<unsigned long long>(_oplogFirstRecord.getLong()))}; } Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx, @@ -1416,7 +1415,7 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx, curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); invariant(c); - CursorKey key = makeCursorKey(id); + CursorKey key = makeCursorKey(id, _keyFormat); setKey(c, &key); int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); }); @@ -1526,7 +1525,7 @@ StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages( curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); invariant(c); - CursorKey key = makeCursorKey(id); + CursorKey key = makeCursorKey(id, _keyFormat); setKey(c, &key); // The test harness calls us with empty damage vectors which WiredTiger doesn't allow. @@ -1745,7 +1744,7 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) { // Find the largest RecordId currently in use. std::unique_ptr<SeekableRecordCursor> cursor = getCursor(opCtx, /*forward=*/false); if (auto record = cursor->next()) { - nextId = record->id.asLong() + 1; + nextId = record->id.getLong() + 1; } _nextIdNum.store(nextId); @@ -1900,7 +1899,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx, WiredTigerCursor startwrap(_uri, _tableId, true, opCtx); WT_CURSOR* start = startwrap.get(); - CursorKey key = makeCursorKey(firstRemovedId); + CursorKey key = makeCursorKey(firstRemovedId, _keyFormat); setKey(start, &key); WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); @@ -1914,7 +1913,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx, if (_isOplog) { // Immediately rewind visibility to our truncation point, to prevent new // transactions from appearing. - Timestamp truncTs(lastKeptId.asLong()); + Timestamp truncTs(lastKeptId.getLong()); if (!serverGlobalParams.enableMajorityReadConcern && _kvEngine->getOldestTimestamp() > truncTs) { @@ -2007,7 +2006,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::next() { id = getKey(c); } - if (_forward && _oplogVisibleTs && id.asLong() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && id.getLong() > *_oplogVisibleTs) { _eof = true; return {}; } @@ -2040,7 +2039,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::next() { boost::optional<Record> WiredTigerRecordStoreCursorBase::seekExact(const RecordId& id) { invariant(_hasRestored); - if (_forward && _oplogVisibleTs && id.asLong() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && id.getLong() > *_oplogVisibleTs) { _eof = true; return {}; } @@ -2052,7 +2051,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekExact(const RecordI _skipNextAdvance = false; WT_CURSOR* c = _cursor->get(); - WiredTigerRecordStore::CursorKey key = makeCursorKey(id); + auto key = makeCursorKey(id, _rs.keyFormat()); setKey(c, &key); // Nothing after the next line can throw WCEs. int seekRet = wiredTigerPrepareConflictRetry(_opCtx, [&] { return c->search(c); }); @@ -2080,7 +2079,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekNear(const RecordId // Forward scans on the oplog must round down to the oplog visibility timestamp. RecordId start = id; - if (_forward && _oplogVisibleTs && start.asLong() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && start.getLong() > *_oplogVisibleTs) { start = RecordId(*_oplogVisibleTs); } @@ -2088,7 +2087,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekNear(const RecordId WiredTigerRecoveryUnit::get(_opCtx)->getSession(); WT_CURSOR* c = _cursor->get(); - WiredTigerRecordStore::CursorKey key = makeCursorKey(start); + auto key = makeCursorKey(start, _rs.keyFormat()); setKey(c, &key); int cmp; @@ -2129,7 +2128,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekNear(const RecordId // For forward cursors on the oplog, the oplog visible timestamp is treated as the end of the // record store. So if we are positioned past this point, then there are no visible records. - if (_forward && _oplogVisibleTs && curId.asLong() > *_oplogVisibleTs) { + if (_forward && _oplogVisibleTs && curId.getLong() > *_oplogVisibleTs) { _eof = true; return boost::none; } @@ -2186,7 +2185,7 @@ bool WiredTigerRecordStoreCursorBase::restore() { } WT_CURSOR* c = _cursor->get(); - WiredTigerRecordStore::CursorKey key = makeCursorKey(_lastReturnedId); + auto key = makeCursorKey(_lastReturnedId, _rs.keyFormat()); setKey(c, &key); int cmp; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index d9c37841b71..879f1147ae2 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -863,7 +863,7 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) { ServiceContext::UniqueOperationContext op1(harnessHelper->newOperationContext()); op1->recoveryUnit()->beginUnitOfWork(op1.get()); Timestamp tsOne = Timestamp( - static_cast<unsigned long long>(_oplogOrderInsertOplog(op1.get(), rs, 1).asLong())); + static_cast<unsigned long long>(_oplogOrderInsertOplog(op1.get(), rs, 1).getLong())); op1->recoveryUnit()->commitUnitOfWork(); // Asserting on a recovery unit without a snapshot. ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op1.get())); @@ -882,7 +882,7 @@ TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) { ServiceContext::UniqueOperationContext op2(harnessHelper->newOperationContext()); op2->recoveryUnit()->beginUnitOfWork(op2.get()); Timestamp tsThree = Timestamp( - static_cast<unsigned long long>(_oplogOrderInsertOplog(op2.get(), rs, 3).asLong())); + static_cast<unsigned long long>(_oplogOrderInsertOplog(op2.get(), rs, 3).getLong())); // Before committing, the query still only sees timestamp "1". ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op2.get())); op2->recoveryUnit()->commitUnitOfWork(); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp index 7d3326f57db..a894d189afb 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp @@ -33,6 +33,7 @@ #include <string> #include "mongo/base/string_data.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/unittest/temp_dir.h" diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp index 547caa92b5a..0e82868891d 100644 --- a/src/mongo/db/ttl.cpp +++ b/src/mongo/db/ttl.cpp @@ -49,6 +49,7 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/ops/insert.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/service_context.h" @@ -470,7 +471,8 @@ private: // timestamp or lower. auto endOID = OID(); endOID.init(expirationDate, true /* max */); - const auto endId = RecordId(endOID.view().view(), OID::kOIDSize); + + const auto endId = record_id_helpers::keyForOID(endOID); auto params = std::make_unique<DeleteStageParams>(); params->isMulti = true; diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index a79f93c7be2..31da0fd9220 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -48,6 +48,7 @@ #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/record_store.h" #include "mongo/dbtests/dbtests.h" #include "mongo/unittest/unittest.h" @@ -600,8 +601,8 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredNonExistentRecord params.tailable = false; // Use RecordIds that don't exist. Expect to see all records. - params.minRecord = RecordId(OID().view().view(), OID::kOIDSize); - params.maxRecord = RecordId(OID::max().view().view(), OID::kOIDSize); + params.minRecord = record_id_helpers::keyForOID(OID()); + params.maxRecord = record_id_helpers::keyForOID(OID::max()); WorkingSet ws; auto scan = std::make_unique<CollectionScan>(_expCtx.get(), coll, params, &ws, nullptr); @@ -708,9 +709,10 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi // Provide RecordId bounds with exclusive filters. StatusWithMatchExpression swMatch = MatchExpressionParser::parse( - fromjson(fmt::sprintf("{_id: {$gt: ObjectId('%s'), $lt: ObjectId('%s')}}", - params.minRecord->toString(), - params.maxRecord->toString())), + fromjson(fmt::sprintf( + "{_id: {$gt: ObjectId('%s'), $lt: ObjectId('%s')}}", + record_id_helpers::toBSONAs(*params.minRecord, "").firstElement().OID().toString(), + record_id_helpers::toBSONAs(*params.maxRecord, "").firstElement().OID().toString())), _expCtx.get()); ASSERT_OK(swMatch.getStatus()); auto filter = std::move(swMatch.getValue()); @@ -772,9 +774,10 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi // Provide RecordId bounds with exclusive filters. StatusWithMatchExpression swMatch = MatchExpressionParser::parse( - fromjson(fmt::sprintf("{_id: {$gt: ObjectId('%s'), $lt: ObjectId('%s')}}", - params.minRecord->toString(), - params.maxRecord->toString())), + fromjson(fmt::sprintf( + "{_id: {$gt: ObjectId('%s'), $lt: ObjectId('%s')}}", + record_id_helpers::toBSONAs(*params.minRecord, "").firstElement().OID().toString(), + record_id_helpers::toBSONAs(*params.maxRecord, "").firstElement().OID().toString())), _expCtx.get()); ASSERT_OK(swMatch.getStatus()); auto filter = std::move(swMatch.getValue()); diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index 058bc026ebf..126d6e37cd6 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -960,7 +960,7 @@ public: // Insert additional multikey path metadata index keys. lockDb(MODE_X); const RecordId recordId( - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId)); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); const IndexCatalog* indexCatalog = coll->getIndexCatalog(); auto descriptor = indexCatalog->findIndexByName(&_opCtx, indexName); auto accessMethod = @@ -1088,8 +1088,8 @@ public: lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); - RecordId recordId(RecordId::reservedIdFor<int64_t>( - RecordId::Reservation::kWildcardMultikeyMetadataId)); + RecordId recordId( + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId)); const KeyString::Value indexKey = KeyString::HeapBuilder(sortedDataInterface->getKeyStringVersion(), BSON("" << 1 << "" diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp index 7ad3bf7a10d..bd912930467 100644 --- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp +++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp @@ -46,7 +46,7 @@ namespace { using namespace unittest; static const RecordId kMetadataId = - RecordId::reservedIdFor<int64_t>(RecordId::Reservation::kWildcardMultikeyMetadataId); + RecordIdReservations::reservedIdFor(ReservationId::kWildcardMultikeyMetadataId); static const int kIndexVersion = static_cast<int>(IndexDescriptor::kLatestIndexVersion); static const NamespaceString kDefaultNSS{"wildcard_multikey_persistence.test"}; |