summaryrefslogtreecommitdiff
path: root/src/mongo/db/storage
diff options
context:
space:
mode:
authorGregory Noma <gregory.noma@gmail.com>2019-07-17 09:20:36 -0400
committerGregory Noma <gregory.noma@gmail.com>2019-07-17 09:20:36 -0400
commit18da05b7e79beb6c6eb68c82d94d2f76003d9f8d (patch)
treed4ec21f409fd827117d9c598ac5af9e1ef748a4b /src/mongo/db/storage
parent8696830b93eaea25cb8573a3b63db52d283933da (diff)
downloadmongo-18da05b7e79beb6c6eb68c82d94d2f76003d9f8d.tar.gz
SERVER-41719 Overload SortedDataInterface::insert, unindex, and addKey to accept KeyString
Diffstat (limited to 'src/mongo/db/storage')
-rw-r--r--src/mongo/db/storage/SConscript1
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl.cpp134
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl.h10
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp19
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/SConscript1
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp51
-rw-r--r--src/mongo/db/storage/key_string.cpp22
-rw-r--r--src/mongo/db/storage/key_string.h1
-rw-r--r--src/mongo/db/storage/mobile/mobile_index.cpp131
-rw-r--r--src/mongo/db/storage/mobile/mobile_index.h38
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h54
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp134
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_insert.cpp162
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_unindex.cpp146
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp367
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h58
16 files changed, 1053 insertions, 276 deletions
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 4bc7dbaf540..545fa31e0dc 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -190,6 +190,7 @@ env.Library(
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/unittest/unittest',
'index_entry_comparison',
+ 'key_string',
'test_harness_helper',
],
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
index b5fa1d6339c..e4aa5eee384 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
@@ -82,6 +82,20 @@ std::unique_ptr<KeyString::Builder> keyToKeyStringBuilder(const BSONObj& key, Or
return retKs;
}
+void prefixKeyString(KeyString::Builder* keyString,
+ const RecordId& loc,
+ std::string prefixToUse,
+ bool isUnique) {
+ BSONObjBuilder b;
+ b.append("", prefixToUse); // prefix
+ b.append("", std::string(keyString->getBuffer(), keyString->getSize())); // key
+
+ if (isUnique)
+ keyString->resetToKey(b.obj(), allAscending);
+ else
+ keyString->resetToKey(b.obj(), allAscending, loc);
+}
+
std::string createKeyString(const BSONObj& key,
const RecordId& loc,
std::string prefixToUse,
@@ -90,14 +104,20 @@ std::string createKeyString(const BSONObj& key,
KeyString::Version version = KeyString::Version::V1;
KeyString::Builder ks(version, key, order);
- BSONObjBuilder b;
- b.append("", prefixToUse); // prefix
- b.append("", std::string(ks.getBuffer(), ks.getSize())); // key
+ prefixKeyString(&ks, loc, prefixToUse, isUnique);
+ return std::string(ks.getBuffer(), ks.getSize());
+}
- if (isUnique)
- ks.resetToKey(b.obj(), allAscending);
- else
- ks.resetToKey(b.obj(), allAscending, loc);
+std::string createKeyString(const KeyString::Builder& keyString,
+ const RecordId& loc,
+ std::string prefixToUse,
+ bool isUnique) {
+ KeyString::Builder ks(KeyString::Version::V1);
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ ks.resetFromBuffer(keyString.getBuffer(), sizeWithoutRecordId);
+
+ prefixKeyString(&ks, loc, prefixToUse, isUnique);
return std::string(ks.getBuffer(), ks.getSize());
}
@@ -186,13 +206,22 @@ void SortedDataBuilderInterface::commit(bool mayInterrupt) {
}
Status SortedDataBuilderInterface::addKey(const BSONObj& key, const RecordId& loc) {
- StringStore* workingCopy(RecoveryUnit::get(_opCtx)->getHead());
-
invariant(loc.isNormal() || loc.isReserved());
invariant(!hasFieldNames(key));
- std::unique_ptr<KeyString::Builder> newKS = keyToKeyStringBuilder(key, _order);
- std::string newKSToString = std::string(newKS->getBuffer(), newKS->getSize());
+ KeyString::Builder keyString(KeyString::Version::V1, key, _order, loc);
+
+ return addKey(keyString, loc);
+}
+
+Status SortedDataBuilderInterface::addKey(const KeyString::Builder& keyString,
+ const RecordId& loc) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
+ StringStore* workingCopy(RecoveryUnit::get(_opCtx)->getHead());
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ std::string newKSToString = std::string(keyString.getBuffer(), sizeWithoutRecordId);
int twoKeyCmp = 1;
int twoRIDCmp = 1;
@@ -208,18 +237,21 @@ Status SortedDataBuilderInterface::addKey(const BSONObj& key, const RecordId& lo
}
std::string workingCopyInsertKey =
- createKeyString(key, loc, _prefix, _order, /* isUnique */ _unique);
+ createKeyString(keyString, loc, _prefix, /* isUnique */ _unique);
if (twoKeyCmp == 0 && twoRIDCmp != 0) {
if (!_dupsAllowed) {
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _order, keyString.getTypeBits());
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
}
// Duplicate index entries are allowed on this unique index, so we put the RecordId in the
// KeyString until the unique constraint is resolved.
- workingCopyInsertKey = createKeyString(key, loc, _prefix, _order, /* isUnique */ false);
+ workingCopyInsertKey = createKeyString(keyString, loc, _prefix, /* isUnique */ false);
}
- std::string internalTbString(newKS->getTypeBits().getBuffer(), newKS->getTypeBits().getSize());
+ std::string internalTbString(keyString.getTypeBits().getBuffer(),
+ keyString.getTypeBits().getSize());
// Since this is an in-memory storage engine, we don't need to take endianness into account.
int64_t recIdRepr = loc.repr();
@@ -242,7 +274,7 @@ SortedDataBuilderInterface* SortedDataInterface::getBulkBuilder(OperationContext
return new SortedDataBuilderInterface(opCtx,
_isUnique,
dupsAllowed,
- _order,
+ _ordering,
_prefix,
_identEnd,
_collectionNamespace,
@@ -256,7 +288,7 @@ SortedDataBuilderInterface* SortedDataInterface::getBulkBuilder(OperationContext
SortedDataInterface::SortedDataInterface(OperationContext* opCtx,
StringData ident,
const IndexDescriptor* desc)
- : _order(Ordering::make(desc->keyPattern())),
+ : ::mongo::SortedDataInterface(KeyString::Version::V1, Ordering::make(desc->keyPattern())),
// All entries in this ident will have a prefix of ident + \1.
_prefix(ident.toString().append(1, '\1')),
// Therefore, the string ident + \2 will be greater than all elements in this ident.
@@ -269,21 +301,21 @@ SortedDataInterface::SortedDataInterface(OperationContext* opCtx,
// This is the string representation of the KeyString before elements in this ident, which is
// ident + \0. This is before all elements in this ident.
_KSForIdentStart = createKeyString(
- BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _order, _isUnique);
+ BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _ordering, _isUnique);
// Similarly, this is the string representation of the KeyString for something greater than
// all other elements in this ident.
- _KSForIdentEnd = createKeyString(BSONObj(), RecordId::min(), _identEnd, _order, _isUnique);
+ _KSForIdentEnd = createKeyString(BSONObj(), RecordId::min(), _identEnd, _ordering, _isUnique);
}
SortedDataInterface::SortedDataInterface(const Ordering& ordering, bool isUnique, StringData ident)
- : _order(ordering),
+ : ::mongo::SortedDataInterface(KeyString::Version::V1, ordering),
_prefix(ident.toString().append(1, '\1')),
_identEnd(ident.toString().append(1, '\2')),
_isUnique(isUnique),
_isPartial(false) {
_KSForIdentStart = createKeyString(
- BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _order, _isUnique);
- _KSForIdentEnd = createKeyString(BSONObj(), RecordId::min(), _identEnd, _order, _isUnique);
+ BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), _ordering, _isUnique);
+ _KSForIdentEnd = createKeyString(BSONObj(), RecordId::min(), _identEnd, _ordering, _isUnique);
}
Status SortedDataInterface::insert(OperationContext* opCtx,
@@ -291,10 +323,19 @@ Status SortedDataInterface::insert(OperationContext* opCtx,
const RecordId& loc,
bool dupsAllowed) {
// The KeyString representation of the key.
- std::unique_ptr<KeyString::Builder> workingCopyInternalKs = keyToKeyStringBuilder(key, _order);
+ KeyString::Builder keyString(_keyStringVersion, key, _ordering, loc);
+
+ return insert(opCtx, keyString, loc, dupsAllowed);
+}
+
+Status SortedDataInterface::insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- std::string insertKeyString = createKeyString(key, loc, _prefix, _order, _isUnique);
+ std::string insertKeyString = createKeyString(keyString, loc, _prefix, _isUnique);
// For unique indexes, if duplicate keys are allowed then we do the following:
// - Create the KeyString without the RecordId in it and see if anything exists with that.
@@ -307,17 +348,21 @@ Status SortedDataInterface::insert(OperationContext* opCtx,
auto workingCopyIt = workingCopy->find(insertKeyString);
if (workingCopyIt != workingCopy->end()) {
IndexKeyEntry entry =
- keyStringToIndexKeyEntry(workingCopyIt->first, workingCopyIt->second, _order);
+ keyStringToIndexKeyEntry(workingCopyIt->first, workingCopyIt->second, _ordering);
if (entry.loc != loc) {
if (dupsAllowed) {
// Duplicate index entries are allowed on this unique index, so we put the
// RecordId in the KeyString until the unique constraint is resolved.
insertKeyString =
- createKeyString(key, loc, _prefix, _order, /* isUnique */ false);
+ createKeyString(keyString, loc, _prefix, /* isUnique */ false);
} else {
// There was an attempt to create an index entry with a different RecordId while
// dups were not allowed.
+ auto key = KeyString::toBson(keyString.getBuffer(),
+ keyString.getSize(),
+ _ordering,
+ keyString.getTypeBits());
return buildDupKeyErrorStatus(
key, _collectionNamespace, _indexName, _keyPattern);
}
@@ -333,8 +378,8 @@ Status SortedDataInterface::insert(OperationContext* opCtx,
return Status::OK();
// The value we insert is the RecordId followed by the typebits.
- std::string internalTbString = std::string(workingCopyInternalKs->getTypeBits().getBuffer(),
- workingCopyInternalKs->getTypeBits().getSize());
+ std::string internalTbString =
+ std::string(keyString.getTypeBits().getBuffer(), keyString.getTypeBits().getSize());
// Since this is an in-memory storage engine, we don't need to take endianness into account.
int64_t recIdRepr = loc.repr();
@@ -352,6 +397,17 @@ void SortedDataInterface::unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
+ KeyString::Builder keyString(_keyStringVersion, key, _ordering, loc);
+
+ unindex(opCtx, keyString, loc, dupsAllowed);
+}
+
+void SortedDataInterface::unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
std::string removeKeyString;
bool erased;
@@ -365,9 +421,9 @@ void SortedDataInterface::unindex(OperationContext* opCtx,
// RecordId in it.
// This is required because of the way we insert on unique indexes when dups are allowed.
if (dupsAllowed)
- removeKeyString = createKeyString(key, loc, _prefix, _order, /* isUnique */ false);
+ removeKeyString = createKeyString(keyString, loc, _prefix, /* isUnique */ false);
else
- removeKeyString = createKeyString(key, loc, _prefix, _order, /* isUnique */ true);
+ removeKeyString = createKeyString(keyString, loc, _prefix, /* isUnique */ true);
// Check that the record id matches when using partial indexes. We may be called to unindex
// records that are not present in the index due to the partial filter expression.
@@ -380,16 +436,16 @@ void SortedDataInterface::unindex(OperationContext* opCtx,
// the RecordId in it, and erase that. This could only happen on unique indexes where
// duplicate index entries were/are allowed.
if (dupsAllowed)
- removeKeyString = createKeyString(key, loc, _prefix, _order, /* isUnique */ true);
+ removeKeyString = createKeyString(keyString, loc, _prefix, /* isUnique */ true);
else
- removeKeyString = createKeyString(key, loc, _prefix, _order, /* isUnique */ false);
+ removeKeyString = createKeyString(keyString, loc, _prefix, /* isUnique */ false);
if (!ifPartialCheckRecordIdEquals(opCtx, removeKeyString, loc))
return;
erased = workingCopy->erase(removeKeyString);
}
} else {
- removeKeyString = createKeyString(key, loc, _prefix, _order, /* isUnique */ false);
+ removeKeyString = createKeyString(keyString, loc, _prefix, /* isUnique */ false);
erased = workingCopy->erase(removeKeyString);
}
@@ -419,8 +475,8 @@ Status SortedDataInterface::dupKeyCheck(OperationContext* opCtx, const BSONObj&
invariant(_isUnique);
StringStore* workingCopy(RecoveryUnit::get(opCtx)->getHead());
- std::string minKey = createKeyString(key, RecordId::min(), _prefix, _order, _isUnique);
- std::string maxKey = createKeyString(key, RecordId::max(), _prefix, _order, _isUnique);
+ std::string minKey = createKeyString(key, RecordId::min(), _prefix, _ordering, _isUnique);
+ std::string maxKey = createKeyString(key, RecordId::max(), _prefix, _ordering, _isUnique);
// We effectively do the same check as in insert. However, we also check to make sure that
// the iterator returned to us by lower_bound also happens to be inside out ident.
@@ -432,7 +488,7 @@ Status SortedDataInterface::dupKeyCheck(OperationContext* opCtx, const BSONObj&
return Status::OK();
}
auto lower =
- keyStringToIndexKeyEntry(lowerBoundIterator->first, lowerBoundIterator->second, _order);
+ keyStringToIndexKeyEntry(lowerBoundIterator->first, lowerBoundIterator->second, _ordering);
++lowerBoundIterator;
if (lowerBoundIterator == workingCopy->end()) {
@@ -440,8 +496,8 @@ Status SortedDataInterface::dupKeyCheck(OperationContext* opCtx, const BSONObj&
}
auto next =
- keyStringToIndexKeyEntry(lowerBoundIterator->first, lowerBoundIterator->second, _order);
- if (key.woCompare(next.key, _order, false) == 0) {
+ keyStringToIndexKeyEntry(lowerBoundIterator->first, lowerBoundIterator->second, _ordering);
+ if (key.woCompare(next.key, _ordering, false) == 0) {
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
}
@@ -495,7 +551,7 @@ std::unique_ptr<mongo::SortedDataInterface::Cursor> SortedDataInterface::newCurs
_prefix,
_identEnd,
workingCopy,
- _order,
+ _ordering,
_isUnique,
_KSForIdentStart,
_KSForIdentEnd);
@@ -517,7 +573,7 @@ bool SortedDataInterface::ifPartialCheckRecordIdEquals(OperationContext* opCtx,
return true;
IndexKeyEntry entry =
- keyStringToIndexKeyEntry(workingCopyIt->first, workingCopyIt->second, _order);
+ keyStringToIndexKeyEntry(workingCopyIt->first, workingCopyIt->second, _ordering);
return entry.loc == rid;
}
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.h b/src/mongo/db/storage/biggie/biggie_sorted_impl.h
index 4afd08b633f..6c7e9aacb21 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.h
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl.h
@@ -49,6 +49,7 @@ public:
const BSONObj& keyPattern);
void commit(bool mayInterrupt) override;
virtual Status addKey(const BSONObj& key, const RecordId& loc);
+ virtual Status addKey(const KeyString::Builder& keyString, const RecordId& loc);
private:
OperationContext* _opCtx;
@@ -84,10 +85,18 @@ public:
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) override;
+ virtual Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) override;
virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) override;
+ virtual void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) override;
virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key) override;
virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
@@ -184,7 +193,6 @@ private:
bool keyExists(OperationContext* opCtx, const BSONObj& key);
- const Ordering _order;
// These two are the same as before.
std::string _prefix;
std::string _identEnd;
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index 9b69642f9b0..a661666c19d 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -172,10 +172,17 @@ public:
virtual Status addKey(const BSONObj& key, const RecordId& loc) {
return Status::OK();
}
+
+ virtual Status addKey(const KeyString::Builder& keyString, const RecordId& loc) {
+ return Status::OK();
+ }
};
class DevNullSortedDataInterface : public SortedDataInterface {
public:
+ DevNullSortedDataInterface()
+ : SortedDataInterface(KeyString::Version::kLatestVersion, Ordering::make(BSONObj())) {}
+
virtual ~DevNullSortedDataInterface() {}
virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
@@ -189,11 +196,23 @@ public:
return Status::OK();
}
+ virtual Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ return Status::OK();
+ }
+
virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {}
+ virtual void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {}
+
virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key) {
return Status::OK();
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/SConscript b/src/mongo/db/storage/ephemeral_for_test/SConscript
index 566f3e893ef..3ca7c7070c9 100644
--- a/src/mongo/db/storage/ephemeral_for_test/SConscript
+++ b/src/mongo/db/storage/ephemeral_for_test/SConscript
@@ -31,6 +31,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/storage/index_entry_comparison',
'$BUILD_DIR/mongo/db/storage/journal_listener',
+ '$BUILD_DIR/mongo/db/storage/key_string',
'$BUILD_DIR/mongo/db/storage/recovery_unit_base',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
]
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index f55c4d8a87d..3e9476642bd 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
#include "mongo/db/storage/index_entry_comparison.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/util/str.h"
namespace mongo {
@@ -90,12 +91,14 @@ class EphemeralForTestBtreeBuilderImpl : public SortedDataBuilderInterface {
public:
EphemeralForTestBtreeBuilderImpl(IndexSet* data,
long long* currentKeySize,
+ const Ordering& ordering,
bool dupsAllowed,
const NamespaceString& collectionNamespace,
const std::string& indexName,
const BSONObj& keyPattern)
: _data(data),
_currentKeySize(currentKeySize),
+ _ordering(ordering),
_dupsAllowed(dupsAllowed),
_comparator(_data->key_comp()),
_collectionNamespace(collectionNamespace),
@@ -128,9 +131,19 @@ public:
return Status::OK();
}
+ Status addKey(const KeyString::Builder& keyString, const RecordId& loc) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
+
+ return addKey(key, loc);
+ }
+
private:
IndexSet* const _data;
long long* _currentKeySize;
+ const Ordering& _ordering;
const bool _dupsAllowed;
IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
@@ -144,11 +157,13 @@ private:
class EphemeralForTestBtreeImpl : public SortedDataInterface {
public:
EphemeralForTestBtreeImpl(IndexSet* data,
+ const Ordering& ordering,
bool isUnique,
const NamespaceString& collectionNamespace,
const std::string& indexName,
const BSONObj& keyPattern)
- : _data(data),
+ : SortedDataInterface(KeyString::Version::kLatestVersion, ordering),
+ _data(data),
_isUnique(isUnique),
_collectionNamespace(collectionNamespace),
_indexName(indexName),
@@ -157,8 +172,13 @@ public:
}
virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
- return new EphemeralForTestBtreeBuilderImpl(
- _data, &_currentKeySize, dupsAllowed, _collectionNamespace, _indexName, _keyPattern);
+ return new EphemeralForTestBtreeBuilderImpl(_data,
+ &_currentKeySize,
+ _ordering,
+ dupsAllowed,
+ _collectionNamespace,
+ _indexName,
+ _keyPattern);
}
virtual Status insert(OperationContext* opCtx,
@@ -181,6 +201,18 @@ public:
return Status::OK();
}
+ virtual Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
+
+ return insert(opCtx, key, loc, dupsAllowed);
+ }
+
virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
@@ -197,6 +229,18 @@ public:
}
}
+ virtual void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ dassert(loc == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
+
+ return unindex(opCtx, key, loc, dupsAllowed);
+ }
+
virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
@@ -533,6 +577,7 @@ std::unique_ptr<SortedDataInterface> getEphemeralForTestBtreeImpl(
*dataInOut = std::make_shared<IndexSet>(IndexEntryComparison(ordering));
}
return std::make_unique<EphemeralForTestBtreeImpl>(static_cast<IndexSet*>(dataInOut->get()),
+ ordering,
isUnique,
collectionNamespace,
indexName,
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index e4bd2d19dfc..9155149b189 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -2036,6 +2036,28 @@ int Builder::compare(const Builder& other) const {
return a < b ? -1 : 1;
}
+int Builder::compareWithoutRecordId(const Builder& other) const {
+ int a = !isEmpty() ? sizeWithoutRecordIdAtEnd(getBuffer(), getSize()) : 0;
+ int b = !other.isEmpty() ? sizeWithoutRecordIdAtEnd(other.getBuffer(), other.getSize()) : 0;
+
+ int min = std::min(a, b);
+
+ int cmp = memcmp(getBuffer(), other.getBuffer(), min);
+
+ if (cmp) {
+ if (cmp < 0)
+ return -1;
+ return 1;
+ }
+
+ // keys match
+
+ if (a == b)
+ return 0;
+
+ return a < b ? -1 : 1;
+}
+
int Value::compare(const Value& other) const {
int a = getSize();
int b = other.getSize();
diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h
index 88a4b499d07..e2d62f2c094 100644
--- a/src/mongo/db/storage/key_string.h
+++ b/src/mongo/db/storage/key_string.h
@@ -418,6 +418,7 @@ public:
}
int compare(const Builder& other) const;
+ int compareWithoutRecordId(const Builder& other) const;
/**
* @return a hex encoding of this key
diff --git a/src/mongo/db/storage/mobile/mobile_index.cpp b/src/mongo/db/storage/mobile/mobile_index.cpp
index 75de7966381..33d798a42d4 100644
--- a/src/mongo/db/storage/mobile/mobile_index.cpp
+++ b/src/mongo/db/storage/mobile/mobile_index.cpp
@@ -73,7 +73,8 @@ BSONObj stripFieldNames(const BSONObj& query) {
MobileIndex::MobileIndex(OperationContext* opCtx,
const IndexDescriptor* desc,
const std::string& ident)
- : _isUnique(desc->unique()),
+ : SortedDataInterface(KeyString::Version::kLatestVersion, Ordering::make(desc->keyPattern())),
+ _isUnique(desc->unique()),
_ordering(Ordering::make(desc->keyPattern())),
_ident(ident),
_collectionNamespace(desc->parentNS()),
@@ -87,12 +88,23 @@ Status MobileIndex::insert(OperationContext* opCtx,
invariant(recId.isValid());
invariant(!hasFieldNames(key));
- return _insert(opCtx, key, recId, dupsAllowed);
+ KeyString::Builder keyString(_keyStringVersion, key, _ordering, recId);
+
+ return insert(opCtx, keyString, recId, dupsAllowed);
+}
+
+Status MobileIndex::insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& recId,
+ bool dupsAllowed) {
+ return _insert(opCtx, keyString, recId, dupsAllowed);
}
template <typename ValueType>
Status MobileIndex::doInsert(OperationContext* opCtx,
- const KeyString::Builder& key,
+ const char* keyBuffer,
+ size_t keySize,
+ const KeyString::TypeBits& typeBits,
const ValueType& value,
bool isTransactional) {
MobileSession* session;
@@ -105,7 +117,7 @@ Status MobileIndex::doInsert(OperationContext* opCtx,
SqliteStatement insertStmt(
*session, "INSERT INTO \"", _ident, "\" (key, value) VALUES (?, ?);");
- insertStmt.bindBlob(0, key.getBuffer(), key.getSize());
+ insertStmt.bindBlob(0, keyBuffer, keySize);
insertStmt.bindBlob(1, value.getBuffer(), value.getSize());
int status = insertStmt.step();
@@ -113,8 +125,7 @@ Status MobileIndex::doInsert(OperationContext* opCtx,
insertStmt.setExceptionStatus(status);
if (isUnique()) {
// Return error if duplicate key inserted in a unique index.
- BSONObj bson =
- KeyString::toBson(key.getBuffer(), key.getSize(), _ordering, key.getTypeBits());
+ BSONObj bson = KeyString::toBson(keyBuffer, keySize, _ordering, typeBits);
return buildDupKeyErrorStatus(bson, _collectionNamespace, _indexName, _keyPattern);
} else {
// A record with same key could already be present in a standard index, that is OK. This
@@ -135,18 +146,28 @@ void MobileIndex::unindex(OperationContext* opCtx,
invariant(recId.isValid());
invariant(!hasFieldNames(key));
- return _unindex(opCtx, key, recId, dupsAllowed);
+ KeyString::Builder keyString(_keyStringVersion, key, _ordering, recId);
+
+ return unindex(opCtx, keyString, recId, dupsAllowed);
+}
+
+void MobileIndex::unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& recId,
+ bool dupsAllowed) {
+ return _unindex(opCtx, keyString, recId, dupsAllowed);
}
void MobileIndex::_doDelete(OperationContext* opCtx,
- const KeyString::Builder& key,
+ const char* keyBuffer,
+ size_t keySize,
KeyString::Builder* value) {
MobileSession* session = MobileRecoveryUnit::get(opCtx)->getSession(opCtx, false);
SqliteStatement deleteStmt(
*session, "DELETE FROM \"", _ident, "\" WHERE key = ?", value ? " AND value = ?" : "", ";");
- deleteStmt.bindBlob(0, key.getBuffer(), key.getSize());
+ deleteStmt.bindBlob(0, keyBuffer, keySize);
if (value) {
deleteStmt.bindBlob(1, value->getBuffer(), value->getSize());
}
@@ -264,6 +285,7 @@ public:
const BSONObj& keyPattern)
: _index(index),
_opCtx(opCtx),
+ _lastKeyString(index->getKeyStringVersion()),
_dupsAllowed(dupsAllowed),
_collectionNamespace(collectionNamespace),
_indexName(indexName),
@@ -275,14 +297,20 @@ public:
invariant(recId.isValid());
invariant(!hasFieldNames(key));
- Status status = _checkNextKey(key);
+ KeyString::Builder keyString(
+ _index->getKeyStringVersion(), key, _index->getOrdering(), recId);
+ return addKey(keyString, recId);
+ }
+
+ Status addKey(const KeyString::Builder& keyString, const RecordId& recId) override {
+ Status status = _checkNextKey(keyString);
if (!status.isOK()) {
return status;
}
- _lastKey = key.getOwned();
+ _lastKeyString.resetFromBuffer(keyString.getBuffer(), keyString.getSize());
- return _addKey(key, recId);
+ return _addKey(keyString, recId);
}
void commit(bool mayInterrupt) override {}
@@ -292,9 +320,13 @@ protected:
* Checks whether the new key to be inserted is > or >= the previous one depending
* on _dupsAllowed.
*/
- Status _checkNextKey(const BSONObj& key) {
- const int cmp = key.woCompare(_lastKey, _index->getOrdering());
+ Status _checkNextKey(const KeyString::Builder& keyString) {
+ const int cmp = keyString.compare(_lastKeyString);
if (!_dupsAllowed && cmp == 0) {
+ auto key = KeyString::toBson(keyString.getBuffer(),
+ keyString.getSize(),
+ _index->getOrdering(),
+ keyString.getTypeBits());
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
} else if (cmp < 0) {
return Status(ErrorCodes::InternalError, "expected higher RecordId in bulk builder");
@@ -302,11 +334,11 @@ protected:
return Status::OK();
}
- virtual Status _addKey(const BSONObj& key, const RecordId& recId) = 0;
+ virtual Status _addKey(const KeyString::Builder& keyString, const RecordId& recId) = 0;
MobileIndex* _index;
OperationContext* const _opCtx;
- BSONObj _lastKey;
+ KeyString::Builder _lastKeyString;
const bool _dupsAllowed;
const NamespaceString _collectionNamespace;
const std::string _indexName;
@@ -327,10 +359,10 @@ public:
: BulkBuilderBase(index, opCtx, dupsAllowed, collectionNamespace, indexName, keyPattern) {}
protected:
- Status _addKey(const BSONObj& key, const RecordId& recId) override {
- KeyString::Builder keyStr(_index->getKeyStringVersion(), key, _index->getOrdering(), recId);
- KeyString::TypeBits value = keyStr.getTypeBits();
- return _index->doInsert(_opCtx, keyStr, value, false);
+ Status _addKey(const KeyString::Builder& keyString, const RecordId&) override {
+ KeyString::TypeBits typeBits = keyString.getTypeBits();
+ return _index->doInsert(
+ _opCtx, keyString.getBuffer(), keyString.getSize(), typeBits, typeBits, false);
}
};
@@ -351,16 +383,24 @@ public:
}
protected:
- Status _addKey(const BSONObj& key, const RecordId& recId) override {
- const KeyString::Builder keyStr(_index->getKeyStringVersion(), key, _index->getOrdering());
+ Status _addKey(const KeyString::Builder& keyString, const RecordId& recId) override {
+ dassert(recId ==
+ KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
KeyString::Builder value(_index->getKeyStringVersion(), recId);
- KeyString::TypeBits typeBits = keyStr.getTypeBits();
+ KeyString::TypeBits typeBits = keyString.getTypeBits();
if (!typeBits.isAllZeros()) {
value.appendTypeBits(typeBits);
}
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
- return _index->doInsert(_opCtx, keyStr, value, false);
+ return _index->doInsert(_opCtx,
+ keyString.getBuffer(),
+ sizeWithoutRecordId,
+ keyString.getTypeBits(),
+ value,
+ false);
}
};
@@ -659,24 +699,23 @@ std::unique_ptr<SortedDataInterface::Cursor> MobileIndexStandard::newCursor(Oper
}
Status MobileIndexStandard::_insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) {
invariant(dupsAllowed);
+ dassert(recId == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
- const KeyString::Builder keyStr(_keyStringVersion, key, _ordering, recId);
- const KeyString::TypeBits value = keyStr.getTypeBits();
- return doInsert(opCtx, keyStr, value);
+ const KeyString::TypeBits typeBits = keyString.getTypeBits();
+ return doInsert(opCtx, keyString.getBuffer(), keyString.getSize(), typeBits, typeBits);
}
void MobileIndexStandard::_unindex(OperationContext* opCtx,
- const BSONObj& key,
- const RecordId& recId,
+ const KeyString::Builder& keyString,
+ const RecordId&,
bool dupsAllowed) {
invariant(dupsAllowed);
- const KeyString::Builder keyStr(_keyStringVersion, key, _ordering, recId);
- _doDelete(opCtx, keyStr);
+ _doDelete(opCtx, keyString.getBuffer(), keyString.getSize());
}
MobileIndexUnique::MobileIndexUnique(OperationContext* opCtx,
@@ -698,42 +737,52 @@ std::unique_ptr<SortedDataInterface::Cursor> MobileIndexUnique::newCursor(Operat
}
Status MobileIndexUnique::_insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) {
// Replication is not supported so dups are not allowed.
invariant(!dupsAllowed);
- const KeyString::Builder keyStr(_keyStringVersion, key, _ordering);
+
+ invariant(recId.isValid());
+ dassert(recId == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
KeyString::Builder value(_keyStringVersion, recId);
- KeyString::TypeBits typeBits = keyStr.getTypeBits();
+ KeyString::TypeBits typeBits = keyString.getTypeBits();
if (!typeBits.isAllZeros()) {
value.appendTypeBits(typeBits);
}
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
- return doInsert(opCtx, keyStr, value);
+ return doInsert(
+ opCtx, keyString.getBuffer(), sizeWithoutRecordId, keyString.getTypeBits(), value);
}
void MobileIndexUnique::_unindex(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) {
// Replication is not supported so dups are not allowed.
invariant(!dupsAllowed);
- const KeyString::Builder keyStr(_keyStringVersion, key, _ordering);
// A partial index may attempt to delete a non-existent record id. If it is a partial index, it
// must delete a row that matches both key and value.
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
if (_isPartial) {
+ invariant(recId.isValid());
+ dassert(recId ==
+ KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
KeyString::Builder value(_keyStringVersion, recId);
- KeyString::TypeBits typeBits = keyStr.getTypeBits();
+ KeyString::TypeBits typeBits = keyString.getTypeBits();
if (!typeBits.isAllZeros()) {
value.appendTypeBits(typeBits);
}
- _doDelete(opCtx, keyStr, &value);
+ _doDelete(opCtx, keyString.getBuffer(), sizeWithoutRecordId, &value);
} else {
- _doDelete(opCtx, keyStr);
+ _doDelete(opCtx, keyString.getBuffer(), sizeWithoutRecordId);
}
}
diff --git a/src/mongo/db/storage/mobile/mobile_index.h b/src/mongo/db/storage/mobile/mobile_index.h
index cfbc0c8b64b..eee3befe307 100644
--- a/src/mongo/db/storage/mobile/mobile_index.h
+++ b/src/mongo/db/storage/mobile/mobile_index.h
@@ -57,11 +57,21 @@ public:
const RecordId& recId,
bool dupsAllowed) override;
+ Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& recId,
+ bool dupsAllowed) override;
+
void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& recId,
bool dupsAllowed) override;
+ void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& recId,
+ bool dupsAllowed) override;
+
void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const override;
@@ -92,18 +102,12 @@ public:
*/
template <typename ValueType>
Status doInsert(OperationContext* opCtx,
- const KeyString::Builder& key,
+ const char* keyBuffer,
+ size_t keySize,
+ const KeyString::TypeBits& typeBits,
const ValueType& value,
bool isTransactional = true);
- Ordering getOrdering() const {
- return _ordering;
- }
-
- KeyString::Version getKeyStringVersion() const {
- return _keyStringVersion;
- }
-
bool isUnique() {
return _isUnique;
}
@@ -119,16 +123,17 @@ protected:
* Performs the deletion from the table matching the given key.
*/
void _doDelete(OperationContext* opCtx,
- const KeyString::Builder& key,
+ const char* keyBuffer,
+ size_t keySize,
KeyString::Builder* value = nullptr);
virtual Status _insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) = 0;
virtual void _unindex(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) = 0;
@@ -138,7 +143,6 @@ protected:
const bool _isUnique;
const Ordering _ordering;
- const KeyString::Version _keyStringVersion = KeyString::Version::kLatestVersion;
const std::string _ident;
const NamespaceString _collectionNamespace;
const std::string _indexName;
@@ -158,12 +162,12 @@ public:
protected:
Status _insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) override;
void _unindex(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) override;
};
@@ -181,12 +185,12 @@ public:
protected:
Status _insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) override;
void _unindex(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& recId,
bool dupsAllowed) override;
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index dd1fcdbd803..d2c3608a073 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -35,6 +35,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
#include "mongo/db/storage/index_entry_comparison.h"
+#include "mongo/db/storage/key_string.h"
#pragma once
@@ -52,6 +53,9 @@ struct ValidateResults;
*/
class SortedDataInterface {
public:
+ SortedDataInterface(KeyString::Version keyStringVersion, Ordering ordering)
+ : _keyStringVersion(keyStringVersion), _ordering(ordering) {}
+
virtual ~SortedDataInterface() {}
//
@@ -91,6 +95,23 @@ public:
bool dupsAllowed) = 0;
/**
+ * Insert an entry into the index with the specified KeyString and RecordId.
+ *
+ * @param opCtx the transaction under which the insert takes place
+ * @param dupsAllowed true if duplicate keys are allowed, and false
+ * otherwise
+ *
+ * @return Status::OK() if the insert succeeded,
+ *
+ * ErrorCodes::DuplicateKey if 'keyString' already exists in 'this' index
+ * at a RecordId other than 'loc' and duplicates were not allowed
+ */
+ virtual Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
+
+ /**
* Remove the entry from the index with the specified key and RecordId.
*
* @param opCtx the transaction under which the remove takes place
@@ -103,6 +124,18 @@ public:
bool dupsAllowed) = 0;
/**
+ * Remove the entry from the index with the specified KeyString and RecordId.
+ *
+ * @param opCtx the transaction under which the remove takes place
+ * @param dupsAllowed true if duplicate keys are allowed, and false
+ * otherwise
+ */
+ virtual void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
+
+ /**
* Return ErrorCodes::DuplicateKey if there is more than one occurence of 'key' in this index,
* and Status::OK() otherwise. This call is only allowed on a unique index, and will invariant
* otherwise.
@@ -174,6 +207,20 @@ public:
return x;
}
+ /*
+ * Return the KeyString version for 'this' index.
+ */
+ KeyString::Version getKeyStringVersion() const {
+ return _keyStringVersion;
+ }
+
+ /*
+ * Return the ordering for 'this' index.
+ */
+ Ordering getOrdering() const {
+ return _ordering;
+ }
+
/**
* Navigates over the sorted data.
*
@@ -354,6 +401,10 @@ public:
//
virtual Status initAsEmpty(OperationContext* opCtx) = 0;
+
+protected:
+ const KeyString::Version _keyStringVersion;
+ const Ordering _ordering;
};
/**
@@ -364,12 +415,13 @@ public:
virtual ~SortedDataBuilderInterface() {}
/**
- * Adds 'key' to intermediate storage.
+ * Adds 'key' or 'keyString' to intermediate storage.
*
* 'key' must be > or >= the last key passed to this function (depends on _dupsAllowed). If
* this is violated an error Status (ErrorCodes::InternalError) will be returned.
*/
virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
+ virtual Status addKey(const KeyString::Builder& keyString, const RecordId& loc) = 0;
/**
* Do any necessary work to finish building the tree.
diff --git a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
index fcfdfed03f5..e2cf3ff4757 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
@@ -63,6 +63,36 @@ TEST(SortedDataInterface, BuilderAddKey) {
}
}
+/*
+ * Add a KeyString using a bulk builder.
+ */
+TEST(SortedDataInterface, BuilderAddKeyString) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false));
+
+ KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(keyString1, loc1));
+ builder->commit(false);
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
// Add a reserved RecordId using a bulk builder.
TEST(SortedDataInterface, BuilderAddKeyWithReservedRecordId) {
const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
@@ -146,6 +176,41 @@ TEST(SortedDataInterface, BuilderAddSameKey) {
}
}
+/*
+ * Add the same KeyString multiple times using a bulk builder and verify that the returned status is
+ * ErrorCodes::DuplicateKey when duplicates are not allowed.
+ */
+TEST(SortedDataInterface, BuilderAddSameKeyString) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/true, /*partial=*/false));
+
+ KeyString::Builder keyStringLoc1(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyStringLoc2(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), false));
+
+ ASSERT_OK(builder->addKey(keyStringLoc1, loc1));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, builder->addKey(keyStringLoc2, loc2));
+ builder->commit(false);
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
// Add the same key multiple times using a bulk builder and verify that
// the returned status is OK when duplicates are allowed.
TEST(SortedDataInterface, BuilderAddSameKeyWithDupsAllowed) {
@@ -174,6 +239,41 @@ TEST(SortedDataInterface, BuilderAddSameKeyWithDupsAllowed) {
}
}
+/*
+ * Add the same KeyString multiple times using a bulk builder and verify that the returned status is
+ * OK when duplicates are allowed.
+ */
+TEST(SortedDataInterface, BuilderAddSameKeyStringWithDupsAllowed) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false));
+
+ KeyString::Builder keyStringLoc1(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyStringLoc2(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true /* allow duplicates */));
+
+ ASSERT_OK(builder->addKey(keyStringLoc1, loc1));
+ ASSERT_OK(builder->addKey(keyStringLoc2, loc2));
+ builder->commit(false);
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+}
+
// Add multiple keys using a bulk builder.
TEST(SortedDataInterface, BuilderAddMultipleKeys) {
const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
@@ -202,6 +302,40 @@ TEST(SortedDataInterface, BuilderAddMultipleKeys) {
}
}
+/*
+ * Add multiple KeyStrings using a bulk builder.
+ */
+TEST(SortedDataInterface, BuilderAddMultipleKeyStrings) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false));
+
+ KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyString2(sorted->getKeyStringVersion(), key2, sorted->getOrdering(), loc2);
+ KeyString::Builder keyString3(sorted->getKeyStringVersion(), key3, sorted->getOrdering(), loc3);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(keyString1, loc1));
+ ASSERT_OK(builder->addKey(keyString2, loc2));
+ ASSERT_OK(builder->addKey(keyString3, loc3));
+ builder->commit(false);
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+}
+
// Add multiple compound keys using a bulk builder.
TEST(SortedDataInterface, BuilderAddMultipleCompoundKeys) {
const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
index 59ce3a72aa2..ebb604cb943 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
@@ -31,6 +31,7 @@
#include <memory>
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/unittest/unittest.h"
@@ -60,6 +61,40 @@ TEST(SortedDataInterface, Insert) {
{
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ }
+}
+
+// Insert a KeyString and verify that the number of entries in the index equals 1.
+TEST(SortedDataInterface, InsertKeyString) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false));
+
+ KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyString1, loc1, true));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
}
}
@@ -200,6 +235,9 @@ TEST(SortedDataInterface, InsertSameKey) {
{
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
}
{
@@ -214,6 +252,64 @@ TEST(SortedDataInterface, InsertSameKey) {
{
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ }
+}
+
+/*
+ * Insert the same KeyString multiple times and verify that only 1 entry exists in the index when
+ * duplicates are not allowed.
+ */
+TEST(SortedDataInterface, InsertSameKeyString) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/true, /*partial=*/false));
+
+ KeyString::Builder keyStringLoc1(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyStringLoc2(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyStringLoc1, loc1, false));
+ ASSERT_NOT_OK(sorted->insert(opCtx.get(), keyStringLoc2, loc2, false));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_NOT_OK(sorted->insert(opCtx.get(), keyStringLoc2, loc2, false));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
}
}
@@ -310,6 +406,10 @@ TEST(SortedDataInterface, InsertMultiple) {
{
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
}
{
@@ -324,6 +424,68 @@ TEST(SortedDataInterface, InsertMultiple) {
{
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
+ }
+}
+
+/*
+ * Insert multiple KeyStrings and verify that the number of entries in the index equals the number
+ * that were inserted.
+ */
+TEST(SortedDataInterface, InsertMultipleKeyStrings) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/true, /*partial=*/false));
+
+ KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyString2(sorted->getKeyStringVersion(), key2, sorted->getOrdering(), loc2);
+ KeyString::Builder keyString3(sorted->getKeyStringVersion(), key3, sorted->getOrdering(), loc3);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyString1, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), keyString2, loc2, false));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyString3, loc3, false));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
index 267fff0f569..4281dc9010d 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
@@ -86,6 +86,59 @@ TEST(SortedDataInterface, UnindexPartial) {
unindex(true);
}
+/*
+ * Insert a KeyString and verify that it can be unindexed.
+ */
+void unindexKeyString(bool partial) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, partial));
+
+ KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyString1, loc1, true));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), keyString1, loc1, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+}
+
+TEST(SortedDataInterface, UnindexKeyString) {
+ unindexKeyString(false);
+}
+
+TEST(SortedDataInterface, UnindexKeyStringPartial) {
+ unindexKeyString(true);
+}
+
// Insert a compound key and verify that it can be unindexed.
void unindexCompoundKey(bool partial) {
const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
@@ -298,6 +351,99 @@ TEST(SortedDataInterface, UnindexMultipleSameKeyPartial) {
unindexMultipleSameKey(true);
}
+/*
+ * Insert the same KeyString multiple times and verify that each occurrence can be unindexed.
+ */
+void unindexMultipleSameKeyString(bool partial) {
+ const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(/*unique=*/false, partial));
+
+ KeyString::Builder keyStringLoc1(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1);
+ KeyString::Builder keyStringLoc2(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2);
+ KeyString::Builder keyStringLoc3(
+ sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc3);
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), keyStringLoc1, loc1, true));
+ ASSERT_OK(
+ sorted->insert(opCtx.get(), keyStringLoc2, loc2, true /* allow duplicates */));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), keyStringLoc2, loc2, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(
+ sorted->insert(opCtx.get(), keyStringLoc3, loc3, true /* allow duplicates */));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), keyStringLoc1, loc1, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ sorted->unindex(opCtx.get(), keyStringLoc3, loc3, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
+ }
+ }
+
+ {
+ const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+}
+
+
+TEST(SortedDataInterface, UnindexMultipleSameKeyString) {
+ unindexMultipleSameKeyString(false);
+}
+
+TEST(SortedDataInterface, UnindexMultipleSameKeyStringPartial) {
+ unindexMultipleSameKeyString(true);
+}
+
// Call unindex() on a nonexistent key and verify the result is false.
void unindexEmpty(bool partial) {
const auto harnessHelper(newSortedDataInterfaceHarnessHelper());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index e24254abc03..caf4bb11cbd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/json.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
@@ -256,90 +255,71 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
const IndexDescriptor* desc,
KVPrefix prefix,
bool isReadOnly)
- : _ordering(Ordering::make(desc->keyPattern())),
+ : SortedDataInterface(_handleVersionInfo(ctx, uri, desc, isReadOnly),
+ Ordering::make(desc->keyPattern())),
_uri(uri),
_tableId(WiredTigerSession::genTableId()),
_collectionNamespace(desc->parentNS()),
_indexName(desc->indexName()),
_keyPattern(desc->keyPattern()),
_prefix(prefix),
- _isIdIndex(desc->isIdIndex()) {
- auto version = WiredTigerUtil::checkApplicationMetadataFormatVersion(
- ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion);
- if (!version.isOK()) {
- Status versionStatus = version.getStatus();
- Status indexVersionStatus(
- ErrorCodes::UnsupportedFormat,
- str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName()
- << ", ns: "
- << desc->parentNS()
- << "} - version either too old or too new for this mongod.");
- fassertFailedWithStatusNoTrace(28579, indexVersionStatus);
- }
- _dataFormatVersion = version.getValue();
+ _isIdIndex(desc->isIdIndex()) {}
- if (!_isIdIndex && desc->unique()) {
- Status versionStatus = _dataFormatVersion == kDataFormatV3KeyStringV0UniqueIndexVersionV1 ||
- _dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2
- ? Status::OK()
- : Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Index: {name: " << desc->indexName() << ", ns: "
- << desc->parentNS()
- << "} has incompatible format version: "
- << _dataFormatVersion
- << ". MongoDB 4.2 onwards, WT secondary unique indexes use "
- "either format version 11 or 12. See "
- "https://dochub.mongodb.org/core/upgrade-4.2-procedures for "
- "detailed instructions on upgrading the index format.");
- fassertNoTrace(31179, versionStatus);
- }
+Status WiredTigerIndex::insert(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& id,
+ bool dupsAllowed) {
+ dassert(opCtx->lockState()->isWriteLocked());
+ invariant(id.isValid());
+ dassert(!hasFieldNames(key));
- // Index data format 6 and 11 correspond to KeyString version V0 and data format 8 and 12
- // correspond to KeyString version V1
- _keyStringVersion = (_dataFormatVersion == kDataFormatV2KeyStringV1IndexVersionV2 ||
- _dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2)
- ? KeyString::Version::V1
- : KeyString::Version::V0;
+ TRACE_INDEX << " key: " << key << " id: " << id;
- if (!isReadOnly) {
- bool replicatedWrites = getGlobalReplSettings().usingReplSets() ||
- repl::ReplSettings::shouldRecoverFromOplogAsStandalone();
- uassertStatusOK(WiredTigerUtil::setTableLogging(
- ctx,
- uri,
- WiredTigerUtil::useTableLogging(NamespaceString(desc->parentNS()), replicatedWrites)));
- }
+ KeyString::Builder keyString(getKeyStringVersion(), key, _ordering, id);
+
+ return insert(opCtx, keyString, id, dupsAllowed);
}
Status WiredTigerIndex::insert(OperationContext* opCtx,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
dassert(opCtx->lockState()->isWriteLocked());
- invariant(id.isValid());
- dassert(!hasFieldNames(key));
+ dassert(id == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
+
+ TRACE_INDEX << " KeyString: " << keyString;
WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
- return _insert(opCtx, c, key, id, dupsAllowed);
+ return _insert(opCtx, c, keyString, id, dupsAllowed);
}
void WiredTigerIndex::unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed) {
- dassert(opCtx->lockState()->isWriteLocked());
invariant(id.isValid());
dassert(!hasFieldNames(key));
+ KeyString::Builder keyString(getKeyStringVersion(), key, _ordering, id);
+
+ unindex(opCtx, keyString, id, dupsAllowed);
+}
+
+void WiredTigerIndex::unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& id,
+ bool dupsAllowed) {
+ dassert(opCtx->lockState()->isWriteLocked());
+ dassert(id == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
- _unindex(opCtx, c, key, id, dupsAllowed);
+ _unindex(opCtx, c, keyString, id, dupsAllowed);
}
void WiredTigerIndex::fullValidate(OperationContext* opCtx,
@@ -508,7 +488,7 @@ bool WiredTigerIndex::isDup(OperationContext* opCtx, WT_CURSOR* c, const BSONObj
invariant(unique());
// First check whether the key exists.
- KeyString::Builder data(keyStringVersion(), key, _ordering);
+ KeyString::Builder data(getKeyStringVersion(), key, _ordering);
WiredTigerItem item(data.getBuffer(), data.getSize());
setKey(c, item.Get());
@@ -528,7 +508,7 @@ bool WiredTigerIndex::isDup(OperationContext* opCtx, WT_CURSOR* c, const BSONObj
KeyString::decodeRecordId(&br);
records++;
- KeyString::TypeBits::fromBuffer(keyStringVersion(), &br); // Just advance the reader.
+ KeyString::TypeBits::fromBuffer(getKeyStringVersion(), &br); // Just advance the reader.
}
return records > 1;
}
@@ -550,6 +530,59 @@ Status WiredTigerIndex::compact(OperationContext* opCtx) {
return Status::OK();
}
+KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc,
+ bool isReadOnly) {
+ auto version = WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion);
+ if (!version.isOK()) {
+ Status versionStatus = version.getStatus();
+ Status indexVersionStatus(
+ ErrorCodes::UnsupportedFormat,
+ str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName()
+ << ", ns: "
+ << desc->parentNS()
+ << "} - version either too old or too new for this mongod.");
+ fassertFailedWithStatusNoTrace(28579, indexVersionStatus);
+ }
+ _dataFormatVersion = version.getValue();
+
+ if (!desc->isIdIndex() && desc->unique()) {
+ Status versionStatus = _dataFormatVersion == kDataFormatV3KeyStringV0UniqueIndexVersionV1 ||
+ _dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2
+ ? Status::OK()
+ : Status(ErrorCodes::UnsupportedFormat,
+ str::stream() << "Index: {name: " << desc->indexName() << ", ns: "
+ << desc->parentNS()
+ << "} has incompatible format version: "
+ << _dataFormatVersion
+ << ". MongoDB 4.2 onwards, WT secondary unique indexes use "
+ "either format version 11 or 12. See "
+ "https://dochub.mongodb.org/core/upgrade-4.2-procedures for "
+ "detailed instructions on upgrading the index format.");
+ fassertNoTrace(31179, versionStatus);
+ }
+
+ if (!isReadOnly) {
+ bool replicatedWrites = getGlobalReplSettings().usingReplSets() ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone();
+ uassertStatusOK(WiredTigerUtil::setTableLogging(
+ ctx,
+ uri,
+ WiredTigerUtil::useTableLogging(NamespaceString(desc->parentNS()), replicatedWrites)));
+ }
+
+ /*
+ * Index data format 6 and 11 correspond to KeyString version V0 and data format 8 and 12
+ * correspond to KeyString version V1.
+ */
+ return (_dataFormatVersion == kDataFormatV2KeyStringV1IndexVersionV2 ||
+ _dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2)
+ ? KeyString::Version::V1
+ : KeyString::Version::V0;
+}
+
/**
* Base class for WiredTigerIndex bulk builders.
*
@@ -618,15 +651,22 @@ public:
: BulkBuilder(idx, opCtx, prefix), _idx(idx) {}
Status addKey(const BSONObj& key, const RecordId& id) override {
- KeyString::Builder data(_idx->keyStringVersion(), key, _idx->_ordering, id);
+ KeyString::Builder keyString(_idx->getKeyStringVersion(), key, _idx->_ordering, id);
+
+ return addKey(keyString, id);
+ }
+
+ Status addKey(const KeyString::Builder& keyString, const RecordId& id) override {
+ dassert(id == KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()));
// Can't use WiredTigerCursor since we aren't using the cache.
- WiredTigerItem item(data.getBuffer(), data.getSize());
+ WiredTigerItem item(keyString.getBuffer(), keyString.getSize());
setKey(_cursor, item.Get());
- WiredTigerItem valueItem = data.getTypeBits().isAllZeros()
+ WiredTigerItem valueItem = keyString.getTypeBits().isAllZeros()
? emptyItem
- : WiredTigerItem(data.getTypeBits().getBuffer(), data.getTypeBits().getSize());
+ : WiredTigerItem(keyString.getTypeBits().getBuffer(),
+ keyString.getTypeBits().getSize());
_cursor->set_value(_cursor, valueItem.Get());
@@ -663,13 +703,22 @@ public:
: BulkBuilder(idx, opCtx, prefix),
_idx(idx),
_dupsAllowed(dupsAllowed),
- _keyString(idx->keyStringVersion()) {}
+ _previousKeyString(idx->getKeyStringVersion()) {}
Status addKey(const BSONObj& newKey, const RecordId& id) override {
+ KeyString::Builder newKeyString(
+ _idx->getKeyStringVersion(), newKey, _idx->getOrdering(), id);
+ return addKey(newKeyString, id);
+ }
+
+ Status addKey(const KeyString::Builder& newKeyString, const RecordId& id) override {
+ dassert(id ==
+ KeyString::decodeRecordIdAtEnd(newKeyString.getBuffer(), newKeyString.getSize()));
+
if (_idx->isTimestampSafeUniqueIdx()) {
- return addKeyTimestampSafe(newKey, id);
+ return addKeyTimestampSafe(newKeyString);
}
- return addKeyTimestampUnsafe(newKey, id);
+ return addKeyTimestampUnsafe(newKeyString, id);
}
void commit(bool mayInterrupt) override {
@@ -682,31 +731,35 @@ public:
}
private:
- Status addKeyTimestampSafe(const BSONObj& newKey, const RecordId& id) {
+ Status addKeyTimestampSafe(const KeyString::Builder& newKeyString) {
// Do a duplicate check, but only if dups aren't allowed.
if (!_dupsAllowed) {
- const int cmp = newKey.woCompare(_previousKey, _ordering);
+ const int cmp = newKeyString.compareWithoutRecordId(_previousKeyString);
if (cmp == 0) {
// Duplicate found!
+ auto newKey = KeyString::toBson(newKeyString.getBuffer(),
+ newKeyString.getSize(),
+ _idx->_ordering,
+ newKeyString.getTypeBits());
return buildDupKeyErrorStatus(
newKey, _idx->collectionNamespace(), _idx->indexName(), _idx->keyPattern());
} else {
- // _previousKey.isEmpty() is only true on the first call to addKey().
- // newKey must be > the last key
- invariant(_previousKey.isEmpty() || cmp > 0);
+ /*
+ * _previousKeyString.isEmpty() is only true on the first call to addKey().
+ * newKeyString must be greater than previous key.
+ */
+ invariant(_previousKeyString.isEmpty() || cmp > 0);
}
}
- _keyString.resetToKey(newKey, _idx->ordering(), id);
-
// Can't use WiredTigerCursor since we aren't using the cache.
- WiredTigerItem keyItem(_keyString.getBuffer(), _keyString.getSize());
+ WiredTigerItem keyItem(newKeyString.getBuffer(), newKeyString.getSize());
setKey(_cursor, keyItem.Get());
- WiredTigerItem valueItem = _keyString.getTypeBits().isAllZeros()
+ WiredTigerItem valueItem = newKeyString.getTypeBits().isAllZeros()
? emptyItem
- : WiredTigerItem(_keyString.getTypeBits().getBuffer(),
- _keyString.getTypeBits().getSize());
+ : WiredTigerItem(newKeyString.getTypeBits().getBuffer(),
+ newKeyString.getTypeBits().getSize());
_cursor->set_value(_cursor, valueItem.Get());
@@ -714,16 +767,16 @@ private:
// Don't copy the key again if dups are allowed.
if (!_dupsAllowed)
- _previousKey = newKey.getOwned();
+ _previousKeyString.resetFromBuffer(newKeyString.getBuffer(), newKeyString.getSize());
return Status::OK();
}
- Status addKeyTimestampUnsafe(const BSONObj& newKey, const RecordId& id) {
- const int cmp = newKey.woCompare(_previousKey, _ordering);
+ Status addKeyTimestampUnsafe(const KeyString::Builder& newKeyString, const RecordId& id) {
+ const int cmp = newKeyString.compareWithoutRecordId(_previousKeyString);
if (cmp != 0) {
- if (!_previousKey.isEmpty()) {
- // _previousKey.isEmpty() is only true on the first call to addKey().
+ if (!_previousKeyString.isEmpty()) {
+ // _previousKeyString.isEmpty() is only true on the first call to addKey().
invariant(cmp > 0); // newKey must be > the last key
// We are done with dups of the last key so we can insert it now.
doInsert();
@@ -732,6 +785,10 @@ private:
} else {
// Dup found!
if (!_dupsAllowed) {
+ auto newKey = KeyString::toBson(newKeyString.getBuffer(),
+ newKeyString.getSize(),
+ _idx->_ordering,
+ newKeyString.getTypeBits());
return buildDupKeyErrorStatus(
newKey, _idx->collectionNamespace(), _idx->indexName(), _idx->keyPattern());
}
@@ -741,9 +798,8 @@ private:
// _previousKey which is correct since any dups seen later are likely to be newer.
}
- _keyString.resetToKey(newKey, _idx->ordering());
- _records.push_back(std::make_pair(id, _keyString.getTypeBits()));
- _previousKey = newKey.getOwned();
+ _records.push_back(std::make_pair(id, newKeyString.getTypeBits()));
+ _previousKeyString.resetFromBuffer(newKeyString.getBuffer(), newKeyString.getSize());
return Status::OK();
}
@@ -751,7 +807,7 @@ private:
void doInsert() {
invariant(!_records.empty());
- KeyString::Builder value(_idx->keyStringVersion());
+ KeyString::Builder value(_idx->getKeyStringVersion());
for (size_t i = 0; i < _records.size(); i++) {
value.appendRecordId(_records[i].first);
// When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
@@ -761,7 +817,9 @@ private:
}
}
- WiredTigerItem keyItem(_keyString.getBuffer(), _keyString.getSize());
+ auto sizeWithoutRecordId = KeyString::sizeWithoutRecordIdAtEnd(
+ _previousKeyString.getBuffer(), _previousKeyString.getSize());
+ WiredTigerItem keyItem(_previousKeyString.getBuffer(), sizeWithoutRecordId);
WiredTigerItem valueItem(value.getBuffer(), value.getSize());
setKey(_cursor, keyItem.Get());
@@ -774,9 +832,8 @@ private:
WiredTigerIndex* _idx;
const bool _dupsAllowed;
- KeyString::Builder _keyString;
+ KeyString::Builder _previousKeyString;
std::vector<std::pair<RecordId, KeyString::TypeBits>> _records;
- BSONObj _previousKey;
};
namespace {
@@ -793,9 +850,9 @@ public:
: _opCtx(opCtx),
_idx(idx),
_forward(forward),
- _key(idx.keyStringVersion()),
- _typeBits(idx.keyStringVersion()),
- _query(idx.keyStringVersion()),
+ _key(idx.getKeyStringVersion()),
+ _typeBits(idx.getKeyStringVersion()),
+ _query(idx.getKeyStringVersion()),
_prefix(prefix) {
_cursor.emplace(_idx.uri(), _idx.tableId(), false, _opCtx);
}
@@ -823,8 +880,8 @@ public:
// end after the key if inclusive and before if exclusive.
const auto discriminator = _forward == inclusive ? KeyString::Builder::kExclusiveAfter
: KeyString::Builder::kExclusiveBefore;
- _endPosition = std::make_unique<KeyString::Builder>(_idx.keyStringVersion());
- _endPosition->resetToKey(stripFieldNames(key), _idx.ordering(), discriminator);
+ _endPosition = std::make_unique<KeyString::Builder>(_idx.getKeyStringVersion());
+ _endPosition->resetToKey(stripFieldNames(key), _idx.getOrdering(), discriminator);
}
boost::optional<IndexKeyEntry> seek(const BSONObj& key,
@@ -837,7 +894,7 @@ public:
// By using a discriminator other than kInclusive, there is no need to distinguish
// unique vs non-unique key formats since both start with the key.
- _query.resetToKey(finalKey, _idx.ordering(), discriminator);
+ _query.resetToKey(finalKey, _idx.getOrdering(), discriminator);
seekWTCursor(_query);
updatePosition();
return curr(parts);
@@ -852,7 +909,7 @@ public:
// makeQueryObject handles the discriminator in the real exclusive cases.
const auto discriminator =
_forward ? KeyString::Builder::kExclusiveBefore : KeyString::Builder::kExclusiveAfter;
- _query.resetToKey(key, _idx.ordering(), discriminator);
+ _query.resetToKey(key, _idx.getOrdering(), discriminator);
seekWTCursor(_query);
updatePosition();
return curr(parts);
@@ -962,7 +1019,8 @@ protected:
BSONObj bson;
if (TRACING_ENABLED || (parts & kWantKey)) {
- bson = KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.ordering(), _typeBits);
+ bson =
+ KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.getOrdering(), _typeBits);
TRACE_CURSOR << " returning " << bson << ' ' << _id;
}
@@ -1153,7 +1211,7 @@ public:
}
auto keySize = KeyString::getKeySize(
- _key.getBuffer(), _key.getSize(), _idx.ordering(), _key.getTypeBits());
+ _key.getBuffer(), _key.getSize(), _idx.getOrdering(), _key.getTypeBits());
if (_key.getSize() == keySize) {
_updateIdAndTypeBitsFromValue();
@@ -1189,7 +1247,7 @@ public:
// Get the size of the prefix key
auto keySize = KeyString::getKeySize(
- _key.getBuffer(), _key.getSize(), _idx.ordering(), _key.getTypeBits());
+ _key.getBuffer(), _key.getSize(), _idx.getOrdering(), _key.getTypeBits());
// This check is only to avoid returning the same key again after a restore. Keys
// shorter than _key cannot have "prefix key" same as _key. Therefore we care only about
@@ -1259,8 +1317,9 @@ bool WiredTigerIndexUnique::isTimestampSafeUniqueIdx() const {
bool WiredTigerIndexUnique::_keyExists(OperationContext* opCtx,
WT_CURSOR* c,
- const KeyString::Builder& prefixKey) {
- WiredTigerItem prefixKeyItem(prefixKey.getBuffer(), prefixKey.getSize());
+ const char* buffer,
+ size_t size) {
+ WiredTigerItem prefixKeyItem(buffer, size);
setKey(c, prefixKeyItem.Get());
// An index entry key is KeyString of the prefix key + RecordId. To prevent duplicate prefix
@@ -1278,8 +1337,7 @@ bool WiredTigerIndexUnique::_keyExists(OperationContext* opCtx,
WT_ITEM item;
// Obtain the key from the record returned by search near.
getKey(c, &item);
- if (std::memcmp(prefixKey.getBuffer(), item.data, std::min(prefixKey.getSize(), item.size)) ==
- 0) {
+ if (std::memcmp(buffer, item.data, std::min(size, item.size)) == 0) {
return true;
}
@@ -1298,8 +1356,7 @@ bool WiredTigerIndexUnique::_keyExists(OperationContext* opCtx,
invariantWTOK(ret);
getKey(c, &item);
- return std::memcmp(
- prefixKey.getBuffer(), item.data, std::min(prefixKey.getSize(), item.size)) == 0;
+ return std::memcmp(buffer, item.data, std::min(size, item.size)) == 0;
}
bool WiredTigerIndexUnique::isDup(OperationContext* opCtx, WT_CURSOR* c, const BSONObj& key) {
@@ -1309,10 +1366,10 @@ bool WiredTigerIndexUnique::isDup(OperationContext* opCtx, WT_CURSOR* c, const B
}
// This procedure to determine duplicates is exclusive for timestamp safe unique indexes.
- KeyString::Builder prefixKey(keyStringVersion(), key, _ordering);
+ KeyString::Builder prefixKey(getKeyStringVersion(), key, _ordering);
// Check if a prefix key already exists in the index. When keyExists() returns true, the cursor
// will be positioned on the first occurence of the 'prefixKey'.
- if (!_keyExists(opCtx, c, prefixKey)) {
+ if (!_keyExists(opCtx, c, prefixKey.getBuffer(), prefixKey.getSize())) {
return false;
}
@@ -1338,26 +1395,29 @@ bool WiredTigerIndexUnique::isDup(OperationContext* opCtx, WT_CURSOR* c, const B
Status WiredTigerIndexUnique::_insert(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
if (isTimestampSafeUniqueIdx()) {
- return _insertTimestampSafe(opCtx, c, key, id, dupsAllowed);
+ return _insertTimestampSafe(opCtx, c, keyString, dupsAllowed);
}
- return _insertTimestampUnsafe(opCtx, c, key, id, dupsAllowed);
+ return _insertTimestampUnsafe(opCtx, c, keyString, id, dupsAllowed);
}
Status WiredTigerIndexUnique::_insertTimestampUnsafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
- const KeyString::Builder data(keyStringVersion(), key, _ordering);
- WiredTigerItem keyItem(data.getBuffer(), data.getSize());
+ invariant(id.isValid());
+
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId);
- KeyString::Builder value(keyStringVersion(), id);
- if (!data.getTypeBits().isAllZeros())
- value.appendTypeBits(data.getTypeBits());
+ KeyString::Builder value(getKeyStringVersion(), id);
+ if (!keyString.getTypeBits().isAllZeros())
+ value.appendTypeBits(keyString.getTypeBits());
WiredTigerItem valueItem(value.getBuffer(), value.getSize());
setKey(c, keyItem.Get());
@@ -1393,22 +1453,25 @@ Status WiredTigerIndexUnique::_insertTimestampUnsafe(OperationContext* opCtx,
if (!insertedId && id < idInIndex) {
value.appendRecordId(id);
- value.appendTypeBits(data.getTypeBits());
+ value.appendTypeBits(keyString.getTypeBits());
insertedId = true;
}
// Copy from old to new value
value.appendRecordId(idInIndex);
- value.appendTypeBits(KeyString::TypeBits::fromBuffer(keyStringVersion(), &br));
+ value.appendTypeBits(KeyString::TypeBits::fromBuffer(getKeyStringVersion(), &br));
}
- if (!dupsAllowed)
+ if (!dupsAllowed) {
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
+ }
if (!insertedId) {
// This id is higher than all currently in the index for this key
value.appendRecordId(id);
- value.appendTypeBits(data.getTypeBits());
+ value.appendTypeBits(keyString.getTypeBits());
}
valueItem = WiredTigerItem(value.getBuffer(), value.getSize());
@@ -1423,10 +1486,9 @@ Status WiredTigerIndexUnique::_insertTimestampUnsafe(OperationContext* opCtx,
Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& id,
+ const KeyString::Builder& keyString,
bool dupsAllowed) {
- TRACE_INDEX << "Timestamp safe unique idx key: " << key << " id: " << id;
+ TRACE_INDEX << "Timestamp safe unique idx KeyString: " << keyString;
int ret;
@@ -1434,8 +1496,9 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
if (!dupsAllowed) {
// A prefix key is KeyString of index key. It is the component of the index entry that
// should be unique.
- const KeyString::Builder prefixKey(keyStringVersion(), key, _ordering);
- WiredTigerItem prefixKeyItem(prefixKey.getBuffer(), prefixKey.getSize());
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ WiredTigerItem prefixKeyItem(keyString.getBuffer(), sizeWithoutRecordId);
// First phase inserts the prefix key to prohibit concurrent insertions of same key
setKey(c, prefixKeyItem.Get());
@@ -1445,6 +1508,8 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
// An entry with prefix key already exists. This can happen only during rolling upgrade when
// both timestamp unsafe and timestamp safe index format keys could be present.
if (ret == WT_DUPLICATE_KEY) {
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), sizeWithoutRecordId, _ordering, keyString.getTypeBits());
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
}
invariantWTOK(ret);
@@ -1457,17 +1522,19 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
invariantWTOK(ret);
// Second phase looks up for existence of key to avoid insertion of duplicate key
- if (_keyExists(opCtx, c, prefixKey))
+ if (_keyExists(opCtx, c, keyString.getBuffer(), sizeWithoutRecordId)) {
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), sizeWithoutRecordId, _ordering, keyString.getTypeBits());
return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern);
+ }
}
// Now create the table key/value, the actual data record.
- KeyString::Builder tableKey(keyStringVersion(), key, _ordering, id);
- WiredTigerItem keyItem(tableKey.getBuffer(), tableKey.getSize());
+ WiredTigerItem keyItem(keyString.getBuffer(), keyString.getSize());
- WiredTigerItem valueItem = tableKey.getTypeBits().isAllZeros()
+ WiredTigerItem valueItem = keyString.getTypeBits().isAllZeros()
? emptyItem
- : WiredTigerItem(tableKey.getTypeBits().getBuffer(), tableKey.getTypeBits().getSize());
+ : WiredTigerItem(keyString.getTypeBits().getBuffer(), keyString.getTypeBits().getSize());
setKey(c, keyItem.Get());
c->set_value(c, valueItem.Get());
ret = WT_OP_CHECK(c->insert(c));
@@ -1481,22 +1548,25 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
void WiredTigerIndexUnique::_unindex(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
if (isTimestampSafeUniqueIdx()) {
- return _unindexTimestampSafe(opCtx, c, key, id, dupsAllowed);
+ return _unindexTimestampSafe(opCtx, c, keyString, dupsAllowed);
}
- return _unindexTimestampUnsafe(opCtx, c, key, id, dupsAllowed);
+ return _unindexTimestampUnsafe(opCtx, c, keyString, id, dupsAllowed);
}
void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
- KeyString::Builder data(keyStringVersion(), key, _ordering);
- WiredTigerItem keyItem(data.getBuffer(), data.getSize());
+ invariant(id.isValid());
+
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId);
setKey(c, keyItem.Get());
auto triggerWriteConflictAtPoint = [this, &keyItem](WT_CURSOR* point) {
@@ -1528,7 +1598,7 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
return;
}
// Ensure there aren't any other values in here.
- KeyString::TypeBits::fromBuffer(keyStringVersion(), &br);
+ KeyString::TypeBits::fromBuffer(getKeyStringVersion(), &br);
fassert(40417, !br.remaining());
}
int ret = WT_OP_CHECK(c->remove(c));
@@ -1558,7 +1628,7 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
BufReader br(old.data, old.size);
while (br.remaining()) {
RecordId idInIndex = KeyString::decodeRecordId(&br);
- KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(keyStringVersion(), &br);
+ KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(getKeyStringVersion(), &br);
if (id == idInIndex) {
if (records.empty() && !br.remaining()) {
@@ -1576,12 +1646,14 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
}
if (!foundId) {
+ auto key = KeyString::toBson(
+ keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits());
warning().stream() << id << " not found in the index for key " << redact(key);
return; // nothing to do
}
// Put other ids for this key back in the index.
- KeyString::Builder newValue(keyStringVersion());
+ KeyString::Builder newValue(getKeyStringVersion());
invariant(!records.empty());
for (size_t i = 0; i < records.size(); i++) {
newValue.appendRecordId(records[i].first);
@@ -1599,11 +1671,9 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
void WiredTigerIndexUnique::_unindexTimestampSafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& id,
+ const KeyString::Builder& keyString,
bool dupsAllowed) {
- KeyString::Builder data(keyStringVersion(), key, _ordering, id);
- WiredTigerItem item(data.getBuffer(), data.getSize());
+ WiredTigerItem item(keyString.getBuffer(), keyString.getSize());
setKey(c, item.Get());
int ret = WT_OP_CHECK(c->remove(c));
if (ret != WT_NOTFOUND) {
@@ -1615,8 +1685,9 @@ void WiredTigerIndexUnique::_unindexTimestampSafe(OperationContext* opCtx,
// timestamp safe (new) unique indexes. Old format keys just had the index key while new
// format key has index key + Record id. WT_NOTFOUND is possible if index key is in old format.
// Retry removal of key using old format.
- KeyString::Builder oldFormatKey(keyStringVersion(), key, _ordering);
- WiredTigerItem keyItem(oldFormatKey.getBuffer(), oldFormatKey.getSize());
+ auto sizeWithoutRecordId =
+ KeyString::sizeWithoutRecordIdAtEnd(keyString.getBuffer(), keyString.getSize());
+ WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId);
setKey(c, keyItem.Get());
ret = WT_OP_CHECK(c->remove(c));
@@ -1656,19 +1727,16 @@ SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationCon
Status WiredTigerIndexStandard::_insert(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& keyBson,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) {
invariant(dupsAllowed);
- TRACE_INDEX << " key: " << keyBson << " id: " << id;
-
- KeyString::Builder key(keyStringVersion(), keyBson, _ordering, id);
- WiredTigerItem keyItem(key.getBuffer(), key.getSize());
+ WiredTigerItem keyItem(keyString.getBuffer(), keyString.getSize());
- WiredTigerItem valueItem = key.getTypeBits().isAllZeros()
+ WiredTigerItem valueItem = keyString.getTypeBits().isAllZeros()
? emptyItem
- : WiredTigerItem(key.getTypeBits().getBuffer(), key.getTypeBits().getSize());
+ : WiredTigerItem(keyString.getTypeBits().getBuffer(), keyString.getTypeBits().getSize());
setKey(c, keyItem.Get());
c->set_value(c, valueItem.Get());
@@ -1685,12 +1753,11 @@ Status WiredTigerIndexStandard::_insert(OperationContext* opCtx,
void WiredTigerIndexStandard::_unindex(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& id,
+ const KeyString::Builder& keyString,
+ const RecordId&,
bool dupsAllowed) {
invariant(dupsAllowed);
- KeyString::Builder data(keyStringVersion(), key, _ordering, id);
- WiredTigerItem item(data.getBuffer(), data.getSize());
+ WiredTigerItem item(keyString.getBuffer(), keyString.getSize());
setKey(c, item.Get());
int ret = WT_OP_CHECK(c->remove(c));
if (ret != WT_NOTFOUND) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index dea944021d3..30774f9032e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -96,11 +96,21 @@ public:
const RecordId& id,
bool dupsAllowed);
+ virtual Status insert(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& id,
+ bool dupsAllowed);
+
virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed);
+ virtual void unindex(OperationContext* opCtx,
+ const KeyString::Builder& keyString,
+ const RecordId& id,
+ bool dupsAllowed);
+
virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const;
@@ -130,13 +140,6 @@ public:
uint64_t tableId() const {
return _tableId;
}
- Ordering ordering() const {
- return _ordering;
- }
-
- KeyString::Version keyStringVersion() const {
- return _keyStringVersion;
- }
const NamespaceString& collectionNamespace() const {
return _collectionNamespace;
@@ -160,27 +163,36 @@ public:
protected:
virtual Status _insert(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) = 0;
virtual void _unindex(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) = 0;
void setKey(WT_CURSOR* cursor, const WT_ITEM* item);
void getKey(WT_CURSOR* cursor, WT_ITEM* key);
+ /*
+ * Determines the data format version from application metadata and verifies compatibility.
+ * Returns the corresponding KeyString version.
+ */
+ KeyString::Version _handleVersionInfo(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc,
+ bool isReadOnly);
+
class BulkBuilder;
class StandardBulkBuilder;
class UniqueBulkBuilder;
- const Ordering _ordering;
- // The keystring and data format version are effectively const after the WiredTigerIndex
- // instance is constructed.
- KeyString::Version _keyStringVersion;
+ /*
+ * The data format version is effectively const after the WiredTigerIndex instance is
+ * constructed.
+ */
int _dataFormatVersion;
std::string _uri;
uint64_t _tableId;
@@ -214,45 +226,43 @@ public:
Status _insert(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) override;
Status _insertTimestampUnsafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed);
Status _insertTimestampSafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& id,
+ const KeyString::Builder& keyString,
bool dupsAllowed);
void _unindex(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) override;
void _unindexTimestampUnsafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed);
void _unindexTimestampSafe(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& id,
+ const KeyString::Builder& keyString,
bool dupsAllowed);
private:
/**
* If this returns true, the cursor will be positioned on the first matching the input 'key'.
*/
- bool _keyExists(OperationContext* opCtx, WT_CURSOR* c, const KeyString::Builder& key);
+ bool _keyExists(OperationContext* opCtx, WT_CURSOR* c, const char* buffer, size_t size);
bool _partial;
};
@@ -280,13 +290,13 @@ public:
Status _insert(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) override;
void _unindex(OperationContext* opCtx,
WT_CURSOR* c,
- const BSONObj& key,
+ const KeyString::Builder& keyString,
const RecordId& id,
bool dupsAllowed) override;
};