summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2021-02-09 09:17:11 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-02-09 15:35:45 +0000
commitb8a9330fe1591763ef40d26595b5668e581a1248 (patch)
tree2c9a659742c01ab1432c06bb7f063f027643b2e4 /src/mongo
parent81cf90e5078c7481f1cb6a468c25accdc75c24ce (diff)
downloadmongo-b8a9330fe1591763ef40d26595b5668e581a1248.tar.gz
SERVER-53989 RecordStore determines its key format from the durable catalog
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/clustered_index_options.idl10
-rw-r--r--src/mongo/db/catalog/collection.h6
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp29
-rw-r--r--src/mongo/db/catalog/collection_impl.h5
-rw-r--r--src/mongo/db/catalog/collection_mock.h4
-rw-r--r--src/mongo/db/catalog/create_collection.cpp1
-rw-r--r--src/mongo/db/exec/collection_scan.cpp3
-rw-r--r--src/mongo/db/exec/collection_scan.h16
-rw-r--r--src/mongo/db/storage/SConscript1
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp4
-rw-r--r--src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp5
-rw-r--r--src/mongo/db/storage/record_store.h37
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp6
-rw-r--r--src/mongo/db/storage/record_store_test_harness.h8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp39
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp17
21 files changed, 130 insertions, 83 deletions
diff --git a/src/mongo/db/catalog/clustered_index_options.idl b/src/mongo/db/catalog/clustered_index_options.idl
index 7355abff331..32ed09cb0d5 100644
--- a/src/mongo/db/catalog/clustered_index_options.idl
+++ b/src/mongo/db/catalog/clustered_index_options.idl
@@ -31,21 +31,11 @@ global:
imports:
- "mongo/idl/basic_types.idl"
-enums:
- KeyFormat:
- description: "The storage format of the RecordId keys for this collection"
- type: string
- values:
- OID: OID
-
structs:
ClusteredIndexOptions:
description: "The options that define a clustered _id index on a collection."
strict: true
fields:
- keyFormat:
- description: "The storage format of the RecordId keys for this collection"
- type: KeyFormat
expireAfterSeconds:
description: "The number of seconds after which old data should be deleted."
type: safeInt64
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index f789a492680..1b4aadca79f 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -502,6 +502,12 @@ public:
*/
virtual bool isTemporary(OperationContext* opCtx) const = 0;
+ /**
+ * Returns true if this collection is clustered on _id values. That is, its RecordIds are _id
+ * values and has no separate _id index.
+ */
+ virtual bool isClustered() const = 0;
+
//
// Stats
//
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 8565f8cc65e..152859a2d0c 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -329,6 +329,10 @@ void CollectionImpl::init(OperationContext* opCtx) {
"validatorStatus"_attr = _validator.getStatus());
}
+ if (collectionOptions.clusteredIndex) {
+ _clustered = true;
+ }
+
getIndexCatalog()->init(opCtx).transitional_ignore();
_initialized = true;
}
@@ -622,10 +626,21 @@ Status CollectionImpl::insertDocumentForBulkLoader(
dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
+ RecordId recordId;
+ if (isClustered()) {
+ // Collections clustered by _id require ObjectId values.
+ BSONElement oidElem;
+ bool foundId = doc.getObjectID(oidElem);
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Document " << redact(doc) << " is missing the '_id' field",
+ foundId);
+ recordId = RecordId(oidElem.OID());
+ }
+
// Using timestamp 0 for these inserts, which are non-oplog so we don't have an appropriate
// timestamp to use.
- StatusWith<RecordId> loc =
- _shared->_recordStore->insertRecord(opCtx, doc.objdata(), doc.objsize(), Timestamp());
+ StatusWith<RecordId> loc = _shared->_recordStore->insertRecord(
+ opCtx, recordId, doc.objdata(), doc.objsize(), Timestamp());
if (!loc.isOK())
return loc.getStatus();
@@ -692,8 +707,8 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
const auto& doc = it->doc;
RecordId recordId;
- if (_shared->_recordStore->isClustered()) {
- // Extract the ObjectId from the document's _id field.
+ if (isClustered()) {
+ // Collections clustered by _id require ObjectId values.
BSONElement oidElem;
bool foundId = doc.getObjectID(oidElem);
uassert(ErrorCodes::BadValue,
@@ -722,7 +737,7 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
int recordIndex = 0;
for (auto it = begin; it != end; it++) {
RecordId loc = records[recordIndex++].id;
- if (_shared->_recordStore->isClustered()) {
+ if (isClustered()) {
invariant(RecordId::min<OID>() < loc);
invariant(loc < RecordId::max<OID>());
} else {
@@ -962,6 +977,10 @@ bool CollectionImpl::isTemporary(OperationContext* opCtx) const {
return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, getCatalogId()).temp;
}
+bool CollectionImpl::isClustered() const {
+ return _clustered;
+}
+
bool CollectionImpl::getRecordPreImages() const {
return _recordPreImages;
}
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 553c9e541f8..8a8ab870a09 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -293,6 +293,8 @@ public:
bool isTemporary(OperationContext* opCtx) const final;
+ bool isClustered() const final;
+
//
// Stats
//
@@ -463,6 +465,9 @@ private:
boost::optional<ValidationActionEnum> _validationAction;
boost::optional<ValidationLevelEnum> _validationLevel;
+ // Whether or not this collection is clustered on _id values.
+ bool _clustered = false;
+
bool _recordPreImages = false;
// The earliest snapshot that is allowed to use this collection.
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 5fcd1c5ca83..d5142353fd7 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -228,6 +228,10 @@ public:
std::abort();
}
+ bool isClustered() const {
+ std::abort();
+ }
+
bool getRecordPreImages() const {
std::abort();
}
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 81df2b85d43..fbf259fe699 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -243,7 +243,6 @@ Status _createTimeseries(OperationContext* opCtx,
// Time-series buckets collections are clustered by _id using the ObjectId type by default.
ClusteredIndexOptions clusteredOptions;
- clusteredOptions.setKeyFormat(KeyFormatEnum::OID);
if (auto expireAfterSeconds = options.timeseries->getExpireAfterSeconds()) {
clusteredOptions.setExpireAfterSeconds(*expireAfterSeconds);
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 04a43ecc30e..559f6cc360a 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -64,8 +64,7 @@ CollectionScan::CollectionScan(ExpressionContext* expCtx,
: RequiresCollectionStage(kStageType, expCtx, collection),
_workingSet(workingSet),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
- _params(params),
- _isClustered(collection->getRecordStore()->isClustered()) {
+ _params(params) {
// Explain reports the direction of the collection scan.
_specificStats.direction = params.direction;
_specificStats.minTs = params.minTs;
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index b74986a8698..7ec2b58fdcb 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -77,15 +77,10 @@ public:
BSONObj getPostBatchResumeToken() const {
// Return a resume token compatible with resumable initial sync.
if (_params.requestResumeToken) {
- if (_lastSeenId.isNull()) {
- return BSON("$recordId" << NullLabeler{});
- }
-
- if (_isClustered) {
- return BSON("$recordId" << _lastSeenId.as<OID>());
- } else {
- return BSON("$recordId" << _lastSeenId.as<int64_t>());
- }
+ return _lastSeenId.withFormat(
+ [](RecordId::Null n) { return BSON("$recordId" << NullLabeler{}); },
+ [](int64_t rid) { return BSON("$recordId" << rid); },
+ [](const OID& oid) { return BSON("$recordId" << oid); });
}
// Return a resume token compatible with resharding oplog sync.
if (_params.shouldTrackLatestOplogTimestamp) {
@@ -138,9 +133,6 @@ private:
CollectionScanParams _params;
- // Collections with clustered indexes on _id use the ObjectId format for RecordId. All other
- // collections use int64_t for RecordId.
- const bool _isClustered;
RecordId _lastSeenId; // Null if nothing has been returned from _cursor yet.
// If _params.shouldTrackLatestOplogTimestamp is set and the collection is the oplog, the latest
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 0d5409af7af..43f8ee4820b 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -255,6 +255,7 @@ env.Library(
'record_store_test_updatewithdamages.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/catalog/collection_options',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/storage/storage_options',
'$BUILD_DIR/mongo/unittest/unittest',
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index 7e4c3c6770e..0ddc0af6b6e 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -81,8 +81,8 @@ public:
return _options.capped;
}
- virtual bool isClustered() const {
- return false;
+ virtual KeyFormat keyFormat() const {
+ return KeyFormat::Long;
}
virtual int64_t storageSize(OperationContext* opCtx,
diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
index a18d4cefb03..2a405eddb93 100644
--- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
+++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
@@ -58,8 +58,8 @@ public:
virtual const char* name() const;
- virtual bool isClustered() const {
- return false;
+ virtual KeyFormat keyFormat() const {
+ return KeyFormat::Long;
}
virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index bf8eddbaa40..1758343b211 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -58,8 +58,8 @@ public:
~RecordStore() = default;
virtual const char* name() const;
- virtual bool isClustered() const {
- return false;
+ virtual KeyFormat keyFormat() const {
+ return KeyFormat::Long;
}
virtual long long dataSize(OperationContext* opCtx) const;
virtual long long numRecords(OperationContext* opCtx) const;
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
index 278510f7c16..af44537dfd1 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
@@ -52,10 +52,11 @@ public:
RecordStoreHarnessHelper() {}
virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore() {
- return newNonCappedRecordStore("a.b");
+ return newNonCappedRecordStore("a.b", CollectionOptions());
}
- virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore(const std::string& ns) {
+ virtual std::unique_ptr<mongo::RecordStore> newNonCappedRecordStore(
+ const std::string& ns, const CollectionOptions& collOptions) {
return std::make_unique<RecordStore>(ns,
"ident"_sd /* ident */,
false /* isCapped */,
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index f4d377399ca..77fcfdc374c 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -61,6 +61,16 @@ struct Record {
};
/**
+ * The format of a RecordStore's RecordId keys.
+ */
+enum class KeyFormat {
+ /** Signed 64-bit integer */
+ Long,
+ /** Variable-length binary comparable data */
+ String,
+};
+
+/**
* Retrieves Records from a RecordStore.
*
* A cursor is constructed with a direction flag with the following effects:
@@ -228,12 +238,13 @@ public:
}
/**
- * Collections with clustered indexes on _id use the ObjectId format for RecordId. All other
- * RecordStores use int64_t for RecordId. Clustered RecordStores require callers to provide
- * RecordIds and will not generate them automatically. The oplog is already clustered internally
- * by timestamp, and cannot be clustered by ObjectId.
+ * The key format for this RecordStore's RecordIds.
+ *
+ * Collections with clustered indexes on _id may use the String format, however most
+ * RecordStores use Long. RecordStores with the String format require callers to provide
+ * RecordIds and will not generate them automatically.
*/
- virtual bool isClustered() const = 0;
+ virtual KeyFormat keyFormat() const = 0;
/**
* The dataSize is an approximation of the sum of the sizes (in bytes) of the
@@ -329,11 +340,19 @@ public:
const char* data,
int len,
Timestamp timestamp) {
- // Clustered record stores do not generate unique ObjectIds for RecordIds. The expectation
- // is for the caller to pass a non-null RecordId.
- invariant(!isClustered());
+ // Record stores with the Long key format accept a null RecordId, as the storage engine will
+ // generate one.
+ invariant(keyFormat() == KeyFormat::Long);
+ return insertRecord(opCtx, RecordId(), data, len, timestamp);
+ }
- std::vector<Record> inOutRecords{Record{RecordId(), RecordData(data, len)}};
+ /**
+ * A thin wrapper around insertRecords() to simplify handling of single document inserts.
+ * If RecordId is null, the storage engine will generate one and return it.
+ */
+ StatusWith<RecordId> insertRecord(
+ OperationContext* opCtx, RecordId rid, const char* data, int len, Timestamp timestamp) {
+ std::vector<Record> inOutRecords{Record{rid, RecordData(data, len)}};
Status status = insertRecords(opCtx, &inOutRecords, std::vector<Timestamp>{timestamp});
if (!status.isOK())
return status;
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 4b8317987d5..7d4fed29240 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -408,8 +408,10 @@ TEST(RecordStoreTestHarness, Cursor1) {
TEST(RecordStoreTestHarness, ClusteredRecordStore) {
const std::string ns = "test.system.buckets.a";
const auto harnessHelper = newRecordStoreHarnessHelper();
- std::unique_ptr<RecordStore> rs = harnessHelper->newNonCappedRecordStore(ns);
- if (!rs->isClustered()) {
+ CollectionOptions options;
+ options.clusteredIndex = ClusteredIndexOptions{};
+ std::unique_ptr<RecordStore> rs = harnessHelper->newNonCappedRecordStore(ns, options);
+ if (rs->keyFormat() == KeyFormat::Long) {
// ephemeralForTest does not support clustered indexes.
return;
}
diff --git a/src/mongo/db/storage/record_store_test_harness.h b/src/mongo/db/storage/record_store_test_harness.h
index a620cf1aa31..638154c9b03 100644
--- a/src/mongo/db/storage/record_store_test_harness.h
+++ b/src/mongo/db/storage/record_store_test_harness.h
@@ -32,6 +32,7 @@
#include <cstdint>
#include <memory>
+#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/test_harness_helper.h"
@@ -45,7 +46,12 @@ class RecordStoreHarnessHelper : public HarnessHelper {
public:
virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() = 0;
- virtual std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) = 0;
+ std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
+ return newNonCappedRecordStore(ns, CollectionOptions());
+ }
+
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore(
+ const std::string& ns, const CollectionOptions& options) = 0;
static const int64_t kDefaultCapedSizeBytes = 16 * 1024 * 1024;
virtual std::unique_ptr<RecordStore> newCappedRecordStore(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 76a85bc5d81..ae00ec30fab 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1469,7 +1469,7 @@ std::unique_ptr<RecordStore> WiredTigerKVEngine::getRecordStore(OperationContext
params.ident = ident.toString();
params.engineName = _canonicalName;
params.isCapped = options.capped;
- params.isClustered = NamespaceString(ns).isTimeseriesBucketsCollection();
+ params.keyFormat = (options.clusteredIndex) ? KeyFormat::String : KeyFormat::Long;
params.isEphemeral = _ephemeral;
params.cappedCallback = nullptr;
params.sizeStorer = _sizeStorer.get();
@@ -1607,7 +1607,7 @@ std::unique_ptr<RecordStore> WiredTigerKVEngine::makeTemporaryRecordStore(Operat
params.ident = ident.toString();
params.engineName = _canonicalName;
params.isCapped = false;
- params.isClustered = false;
+ params.keyFormat = KeyFormat::Long;
params.isEphemeral = _ephemeral;
params.cappedCallback = nullptr;
// Temporary collections do not need to persist size information to the size storer.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 00fb8c68f87..124a3711896 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -205,7 +205,7 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTi
invariant(rs->isCapped());
invariant(rs->cappedMaxSize() > 0);
- invariant(!rs->isClustered());
+ invariant(rs->keyFormat() == KeyFormat::Long);
unsigned long long maxSize = rs->cappedMaxSize();
// The minimum oplog stone size should be BSONObjMaxInternalSize.
@@ -685,7 +685,7 @@ public:
invariantWTOK(advanceRet);
RecordId id;
- if (_rs->isClustered()) {
+ if (_rs->keyFormat() == KeyFormat::String) {
const char* oidBytes;
invariantWTOK(_cursor->get_key(_cursor, &oidBytes));
id = RecordId(OID::from(oidBytes));
@@ -802,9 +802,8 @@ StatusWith<std::string> WiredTigerRecordStore::generateCreateString(
// WARNING: No user-specified config can appear below this line. These options are required
// for correct behavior of the server.
- if (nss.isTimeseriesBucketsCollection()) {
- // Time-series bucket collections use ObjectIds as their table keys. ObjectIds are
- // described by a 12-byte string key format.
+ if (options.clusteredIndex) {
+ // If the RecordId format is a String, assume a 12-byte fix-length string key format.
ss << "key_format=12s";
} else {
// All other collections use an int64_t as their table keys.
@@ -841,7 +840,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
_tableId(WiredTigerSession::genTableId()),
_engineName(params.engineName),
_isCapped(params.isCapped),
- _isClustered(params.isClustered),
+ _keyFormat(params.keyFormat),
_isEphemeral(params.isEphemeral),
_isLogged(!isTemp() &&
WiredTigerUtil::useTableLogging(
@@ -888,7 +887,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
}
if (_isOplog) {
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
checkOplogFormatVersion(ctx, _uri);
// The oplog always needs to be marked for size adjustment since it is journaled and also
// may change during replication recovery (if truncated).
@@ -990,8 +989,8 @@ const char* WiredTigerRecordStore::name() const {
return _engineName.c_str();
}
-bool WiredTigerRecordStore::isClustered() const {
- return _isClustered;
+KeyFormat WiredTigerRecordStore::keyFormat() const {
+ return _keyFormat;
}
bool WiredTigerRecordStore::inShutdown() const {
@@ -1089,7 +1088,7 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId
invariant(opCtx->lockState()->inAWriteUnitOfWork() || opCtx->lockState()->isNoop());
// SERVER-48453: Initialize the next record id counter before deleting. This ensures we won't
// reuse record ids, which can be problematic for the _mdb_catalog.
- if (!_isClustered) {
+ if (_keyFormat == KeyFormat::Long) {
_initNextIdIfNeeded(opCtx);
}
@@ -1430,7 +1429,7 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx) {
}
void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx, Timestamp mayTruncateUpTo) {
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
Timer timer;
while (auto stone = _oplogStones->peekOldestStoneIfNeeded()) {
@@ -1558,7 +1557,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx,
Record highestIdRecord;
invariant(nRecords != 0);
- if (!_isClustered) {
+ if (_keyFormat == KeyFormat::Long) {
// Non-clustered record stores will extract the RecordId key for the oplog and generate
// unique int64_t RecordId's for everything else.
for (size_t i = 0; i < nRecords; i++) {
@@ -1649,7 +1648,7 @@ void WiredTigerRecordStore::notifyCappedWaitersIfNeeded() {
StatusWith<Timestamp> WiredTigerRecordStore::getLatestOplogTimestamp(
OperationContext* opCtx) const {
invariant(_isOplog);
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
dassert(opCtx->lockState()->isReadLocked());
WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
@@ -1672,7 +1671,7 @@ StatusWith<Timestamp> WiredTigerRecordStore::getLatestOplogTimestamp(
StatusWith<Timestamp> WiredTigerRecordStore::getEarliestOplogTimestamp(OperationContext* opCtx) {
invariant(_isOplog);
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
dassert(opCtx->lockState()->isReadLocked());
stdx::lock_guard<stdx::timed_mutex> lk(_cappedDeleterMutex);
@@ -2036,7 +2035,7 @@ void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* opCtx,
void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) {
// Clustered record stores do not generate unique ObjectId's for RecordId's as the expectation
// is for the caller to set the RecordId using the server generated ObjectId.
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
// In the normal case, this will already be initialized, so use a weak load. Since this value
// will only change from 0 to a positive integer, the only risk is reading an outdated value, 0,
@@ -2066,7 +2065,7 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) {
RecordId WiredTigerRecordStore::_nextId(OperationContext* opCtx) {
// Clustered record stores do not generate unique ObjectId's for RecordId's as the expectation
// is for the caller to set the RecordId using the server generated ObjectId.
- invariant(!_isClustered);
+ invariant(_keyFormat == KeyFormat::Long);
invariant(!_isOplog);
_initNextIdIfNeeded(opCtx);
RecordId out = RecordId(_nextIdNum.fetchAndAdd(1));
@@ -2485,7 +2484,7 @@ StandardWiredTigerRecordStore::StandardWiredTigerRecordStore(WiredTigerKVEngine*
: WiredTigerRecordStore(kvEngine, opCtx, params) {}
RecordId StandardWiredTigerRecordStore::getKey(WT_CURSOR* cursor) const {
- if (_isClustered) {
+ if (_keyFormat == KeyFormat::String) {
const char* oidBytes;
invariantWTOK(cursor->get_key(cursor, &oidBytes));
return RecordId(OID::from(oidBytes));
@@ -2497,7 +2496,7 @@ RecordId StandardWiredTigerRecordStore::getKey(WT_CURSOR* cursor) const {
}
void StandardWiredTigerRecordStore::setKey(WT_CURSOR* cursor, RecordId id) const {
- if (_isClustered) {
+ if (_keyFormat == KeyFormat::String) {
cursor->set_key(cursor, id.as<OID>().view().view());
} else {
cursor->set_key(cursor, id.as<int64_t>());
@@ -2529,7 +2528,7 @@ WiredTigerRecordStoreStandardCursor::WiredTigerRecordStoreStandardCursor(
: WiredTigerRecordStoreCursorBase(opCtx, rs, forward) {}
void WiredTigerRecordStoreStandardCursor::setKey(WT_CURSOR* cursor, RecordId id) const {
- if (_rs.isClustered()) {
+ if (_rs.keyFormat() == KeyFormat::String) {
cursor->set_key(cursor, id.as<OID>().view().view());
} else {
cursor->set_key(cursor, id.as<int64_t>());
@@ -2537,7 +2536,7 @@ void WiredTigerRecordStoreStandardCursor::setKey(WT_CURSOR* cursor, RecordId id)
}
RecordId WiredTigerRecordStoreStandardCursor::getKey(WT_CURSOR* cursor) const {
- if (_rs.isClustered()) {
+ if (_rs.keyFormat() == KeyFormat::String) {
const char* oidBytes;
invariantWTOK(cursor->get_key(cursor, &oidBytes));
return RecordId(OID::from(oidBytes));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 82efd45ba07..bdf19060444 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -104,7 +104,7 @@ public:
std::string ident;
std::string engineName;
bool isCapped;
- bool isClustered;
+ KeyFormat keyFormat;
bool isEphemeral;
int64_t cappedMaxSize;
int64_t cappedMaxDocs;
@@ -125,7 +125,7 @@ public:
// name of the RecordStore implementation
virtual const char* name() const;
- virtual bool isClustered() const;
+ virtual KeyFormat keyFormat() const;
virtual long long dataSize(OperationContext* opCtx) const;
@@ -355,8 +355,8 @@ private:
const std::string _engineName;
// The capped settings should not be updated once operations have started
const bool _isCapped;
- // True if this record store is clustered.
- const bool _isClustered;
+ // The format of this RecordStore's RecordId keys.
+ const KeyFormat _keyFormat;
// True if the storage engine is an in-memory storage engine
const bool _isEphemeral;
// True if WiredTiger is logging updates to this table
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index d35e178ba6e..008653fe9ff 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -97,7 +97,7 @@ public:
params.ident = ident;
params.engineName = kWiredTigerEngineName;
params.isCapped = false;
- params.isClustered = NamespaceString(ns).isTimeseriesBucketsCollection();
+ params.keyFormat = KeyFormat::Long;
params.isEphemeral = false;
params.cappedMaxSize = -1;
params.cappedMaxDocs = -1;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 10c518459d0..92b2848b80c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -90,18 +90,23 @@ public:
~WiredTigerHarnessHelper() {}
- virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() override {
return newNonCappedRecordStore("a.b");
}
virtual std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
+ return newNonCappedRecordStore(ns, CollectionOptions());
+ }
+
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore(
+ const std::string& ns, const CollectionOptions& collOptions) override {
WiredTigerRecoveryUnit* ru =
checked_cast<WiredTigerRecoveryUnit*>(_engine.newRecoveryUnit());
OperationContextNoop opCtx(ru);
string uri = WiredTigerKVEngine::kTableUriPrefix + ns;
- StatusWith<std::string> result = WiredTigerRecordStore::generateCreateString(
- kWiredTigerEngineName, ns, CollectionOptions(), "");
+ StatusWith<std::string> result =
+ WiredTigerRecordStore::generateCreateString(kWiredTigerEngineName, ns, collOptions, "");
ASSERT_TRUE(result.isOK());
std::string config = result.getValue();
@@ -117,7 +122,7 @@ public:
params.ident = ns;
params.engineName = kWiredTigerEngineName;
params.isCapped = false;
- params.isClustered = NamespaceString(ns).isTimeseriesBucketsCollection();
+ params.keyFormat = collOptions.clusteredIndex ? KeyFormat::String : KeyFormat::Long;
params.isEphemeral = false;
params.cappedMaxSize = -1;
params.cappedMaxDocs = -1;
@@ -165,7 +170,7 @@ public:
params.ident = ident;
params.engineName = kWiredTigerEngineName;
params.isCapped = true;
- params.isClustered = NamespaceString(ns).isTimeseriesBucketsCollection();
+ params.keyFormat = KeyFormat::Long;
params.isEphemeral = false;
params.cappedMaxSize = cappedMaxSize;
params.cappedMaxDocs = cappedMaxDocs;
@@ -254,7 +259,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) {
params.ident = ident;
params.engineName = kWiredTigerEngineName;
params.isCapped = false;
- params.isClustered = false;
+ params.keyFormat = KeyFormat::Long;
params.isEphemeral = false;
params.cappedMaxSize = -1;
params.cappedMaxDocs = -1;