summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorseantao97 <seantao97@gmail.com>2018-07-12 15:47:31 -0400
committerSean Tao <sean.tao@10gen.com>2018-08-02 10:39:12 -0400
commit308fc1b550fd7343e9168b8d87e5a4065cd4afda (patch)
treefe5e385bfa572a51f0617af1b54dfc926eed832b
parent684d7fce79006ba7d18ddf44e67a2f1aec6f8fbb (diff)
downloadmongo-308fc1b550fd7343e9168b8d87e5a4065cd4afda.tar.gz
SERVER-35947 Implement sorted data interface for the biggie storage engine.
-rw-r--r--src/mongo/db/storage/biggie/SConscript20
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.cpp16
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.h8
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store_test.cpp1
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl.cpp666
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl.h148
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp68
-rw-r--r--src/mongo/db/storage/biggie/store.h4
-rw-r--r--src/mongo/db/storage/key_string.cpp9
-rw-r--r--src/mongo/db/storage/key_string.h5
10 files changed, 876 insertions, 69 deletions
diff --git a/src/mongo/db/storage/biggie/SConscript b/src/mongo/db/storage/biggie/SConscript
index 6f2936f14eb..da99c69e921 100644
--- a/src/mongo/db/storage/biggie/SConscript
+++ b/src/mongo/db/storage/biggie/SConscript
@@ -13,8 +13,9 @@ env.Library(
'biggie_sorted_impl.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
- '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store',
+ '$BUILD_DIR/mongo/db/storage/index_entry_comparison',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
],
LIBDEPS_PRIVATE=[
@@ -29,7 +30,7 @@ env.Library(
'biggie_init.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/storage/kv/kv_engine'
+ '$BUILD_DIR/mongo/db/storage/kv/kv_engine',
'storage_biggie_core',
],
LIBDEPS_PRIVATE=[
@@ -50,6 +51,19 @@ env.CppUnitTest(
],
LIBDEPS=[
'storage_biggie_core',
+ '$BUILD_DIR/mongo/db/storage/key_string',
'$BUILD_DIR/mongo/db/storage/record_store_test_harness'
- ]
+ ],
+)
+
+env.CppUnitTest(
+ target='biggie_sorted_data_interface_test',
+ source=['biggie_sorted_impl_test.cpp'
+ ],
+ LIBDEPS=[
+ 'storage_biggie_core',
+ '$BUILD_DIR/mongo/db/storage/key_string',
+ '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness'
+ ],
)
+
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
index 94065834921..b99d45ccd1c 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
@@ -31,8 +31,10 @@
#include "mongo/db/storage/biggie/biggie_kv_engine.h"
#include "mongo/base/disallow_copying.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/snapshot_window_options.h"
#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/platform/basic.h"
@@ -82,6 +84,20 @@ std::shared_ptr<StringStore> KVEngine::getMaster_inlock() const {
return _master;
}
+
+Status KVEngine::createSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ return Status::OK(); // I don't think we actually need to do anything here
+}
+
+mongo::SortedDataInterface* KVEngine::getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ return new SortedDataInterface(Ordering::make(desc->keyPattern()), desc->unique(), ident);
+}
+
+
class EmptyRecordCursor final : public SeekableRecordCursor {
public:
boost::optional<Record> next() final {
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/biggie/biggie_kv_engine.h
index 81096017f51..a8610fad5e4 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.h
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.h
@@ -67,15 +67,11 @@ public:
virtual Status createSortedDataInterface(OperationContext* opCtx,
StringData ident,
- const IndexDescriptor* desc) {
- return Status::OK();
- }
+ const IndexDescriptor* desc);
virtual mongo::SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
StringData ident,
- const IndexDescriptor* desc) {
- return new SortedDataInterface(); // TODO : implement later.
- }
+ const IndexDescriptor* desc);
virtual Status dropIdent(OperationContext* opCtx, StringData ident) {
return Status::OK();
diff --git a/src/mongo/db/storage/biggie/biggie_record_store_test.cpp b/src/mongo/db/storage/biggie/biggie_record_store_test.cpp
index 9c3b0ad1e8b..af647439207 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store_test.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store_test.cpp
@@ -36,7 +36,6 @@
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
-
namespace mongo {
namespace biggie {
namespace {
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
index 8fc2abfc94f..c5609ed371e 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
@@ -25,117 +25,733 @@
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
-#include <set>
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
+#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
+#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/catalog/index_catalog_entry.h"
+#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
+#include "mongo/db/storage/biggie/store.h"
#include "mongo/db/storage/index_entry_comparison.h"
+#include "mongo/db/storage/key_string.h"
+#include "mongo/platform/basic.h"
#include "mongo/stdx/memory.h"
+#include "mongo/util/bufreader.h"
+#include "mongo/util/hex.h"
+#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/shared_buffer.h"
+
+#include <cstring>
+#include <iomanip>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <utility>
namespace mongo {
namespace biggie {
+namespace {
+
+const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
+
+// This function is the same as the one in record store--basically, using the git analogy, create
+// a working branch if one does not exist.
+StringStore* getRecoveryUnitBranch_forking(OperationContext* opCtx) {
+ RecoveryUnit* biggieRCU = checked_cast<RecoveryUnit*>(opCtx->recoveryUnit());
+ invariant(biggieRCU);
+ biggieRCU->forkIfNeeded();
+ return biggieRCU->getWorkingCopy();
+}
+
+// This just checks to see if the field names are empty or not.
+bool hasFieldNames(const BSONObj& obj) {
+ BSONForEach(e, obj) {
+ if (e.fieldName()[0])
+ return true;
+ }
+ return false;
+}
+
+// This just makes all the fields in a BSON object equal to "".
+BSONObj stripFieldNames(const BSONObj& obj) {
+ BSONObjIterator it(obj);
+ BSONObjBuilder bob;
+ while (it.more()) {
+ bob.appendAs(it.next(), "");
+ }
+ return bob.obj();
+}
+
+Status dupKeyError(const BSONObj& key) {
+ StringBuilder sb;
+ sb << "E11000 duplicate key error ";
+ sb << "dup key: " << key;
+ return Status(ErrorCodes::DuplicateKey, sb.str());
+}
+
+// This function converts a key and an ordering to a KeyString.
+std::unique_ptr<KeyString> keyToKeyString(const BSONObj& key, Ordering order) {
+ KeyString::Version version = KeyString::Version::V1;
+ std::unique_ptr<KeyString> retKs = std::make_unique<KeyString>(version, key, order);
+ return retKs;
+}
+
+// This combines a key, a record ID, and the ident into a single KeyString. Because we cannot
+// compare keys properly (since they require an ordering, because we may have descending keys
+// or multi-field keys), we need to convert them into a KeyString first, and then we can just
+// compare them. Thus, we use a nested KeyString of keys inside our actual KeyString. The only
+// difference between this function and the one below is that this one calls resetToKey first.
+std::string combineKeyAndRIDWithReset(const BSONObj& key,
+ const RecordId& loc,
+ std::string prefixToUse,
+ Ordering order) {
+ KeyString::Version version = KeyString::Version::V1;
+ std::unique_ptr<KeyString> ks = std::make_unique<KeyString>(version);
+ ks->resetToKey(key, order);
+
+ BSONObjBuilder b;
+ b.append("", prefixToUse); // prefix
+ b.append("", std::string(ks->getBuffer(), ks->getSize())); // key
+
+ Ordering allAscending = Ordering::make(BSONObj());
+ std::unique_ptr<KeyString> retKs =
+ std::make_unique<KeyString>(version, b.obj(), allAscending, loc);
+ return std::string(retKs->getBuffer(), retKs->getSize());
+}
+
+std::unique_ptr<KeyString> combineKeyAndRIDKS(const BSONObj& key,
+ const RecordId& loc,
+ std::string prefixToUse,
+ Ordering order) {
+ KeyString::Version version = KeyString::Version::V1;
+ KeyString ks(version, key, order);
+ BSONObjBuilder b;
+ b.append("", prefixToUse); // prefix
+ b.append("", std::string(ks.getBuffer(), ks.getSize())); // key
+ Ordering allAscending = Ordering::make(BSONObj());
+ return std::make_unique<KeyString>(version, b.obj(), allAscending, loc);
+}
+
+// This is similar to the function above, but it returns a string instead of a KeyString. The
+// reason we need both is that we also need to store the typebits, and therefore, we occasionally
+// need to return the KeyString (in the function above). However, most of the time the string
+// representation of the KeyString is good enough, and therefore we just return the string (this
+// function).
+std::string combineKeyAndRID(const BSONObj& key,
+ const RecordId& loc,
+ std::string prefixToUse,
+ Ordering order) {
+ KeyString::Version version = KeyString::Version::V1;
+ KeyString ks(version, key, order);
+
+ BSONObjBuilder b;
+ b.append("", prefixToUse); // prefix
+ b.append("", std::string(ks.getBuffer(), ks.getSize())); // key
+ Ordering allAscending = Ordering::make(BSONObj());
+ std::unique_ptr<KeyString> retKs =
+ std::make_unique<KeyString>(version, b.obj(), allAscending, loc);
+ return std::string(retKs->getBuffer(), retKs->getSize());
+}
+
+// This function converts a KeyString into an IndexKeyEntry. We don't need to store the typebits
+// for the outer key string (the one consisting of the prefix, the key, and the recordId) since
+// those will not be used. However, we do need to store the typebits for the internal keystring
+// (made from the key itself), as those typebits are potentially important.
+IndexKeyEntry keyStringToIndexKeyEntry(std::string keyString,
+ std::string typeBitsString,
+ Ordering order) {
+ KeyString::Version version = KeyString::Version::V1;
+ KeyString::TypeBits tbInternal = KeyString::TypeBits(version);
+ KeyString::TypeBits tbOuter = KeyString::TypeBits(version);
+
+ BufReader brTbInternal(typeBitsString.c_str(), typeBitsString.length());
+ tbInternal.resetFromBuffer(&brTbInternal);
+
+ Ordering allAscending = Ordering::make(BSONObj());
+
+ BSONObj bsonObj =
+ KeyString::toBsonSafe(keyString.c_str(), keyString.length(), allAscending, tbOuter);
+
+ // First we get the BSONObj key.
+ SharedBuffer sb;
+ int counter = 0;
+ for (auto&& elem : bsonObj) {
+ // The key is the second field.
+ if (counter == 1) {
+ const char* valStart = elem.valuestr();
+ int valSize = elem.valuestrsize();
+ KeyString ks(version);
+ ks.resetFromBuffer(valStart, valSize);
+
+ BSONObj originalKey =
+ KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), order, tbInternal);
+
+ sb = SharedBuffer::allocate(originalKey.objsize());
+ std::memcpy(sb.get(), originalKey.objdata(), originalKey.objsize());
+ break;
+ }
+ counter++;
+ }
+ RecordId rid = KeyString::decodeRecordIdAtEnd(keyString.c_str(), keyString.length());
+ ConstSharedBuffer csb(sb);
+ BSONObj key(csb);
+
+ return IndexKeyEntry(key, rid);
+}
+
+int compareTwoKeys(
+ std::string ks1, std::string tbs1, std::string ks2, std::string tbs2, Ordering order) {
+ size_t size1 = KeyString::sizeWithoutRecordIdAtEnd(ks1.c_str(), ks1.length());
+ size_t size2 = KeyString::sizeWithoutRecordIdAtEnd(ks2.c_str(), ks2.length());
+ auto cmpSmallerMemory = std::memcmp(ks1.c_str(), ks2.c_str(), std::min(size1, size2));
+
+ if (cmpSmallerMemory != 0) {
+ return cmpSmallerMemory;
+ }
+ if (size1 == size2) {
+ return 0;
+ }
+ return (size1 > size2);
+}
+
+} // namepsace
+
+SortedDataBuilderInterface::SortedDataBuilderInterface(OperationContext* opCtx,
+ bool dupsAllowed,
+ Ordering order,
+ std::string prefix,
+ std::string identEnd)
+ : _opCtx(opCtx),
+ _dupsAllowed(dupsAllowed),
+ _order(order),
+ _prefix(prefix),
+ _identEnd(identEnd),
+ _hasLast(false),
+ _lastKeyToString(""),
+ _lastRID(-1) {}
+
+void SortedDataBuilderInterface::commit(bool mayInterrupt) {
+ biggie::RecoveryUnit* ru = checked_cast<biggie::RecoveryUnit*>(_opCtx->recoveryUnit());
+ ru->forkIfNeeded();
+ ru->commitUnitOfWork();
+}
Status SortedDataBuilderInterface::addKey(const BSONObj& key, const RecordId& loc) {
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(_opCtx);
+
+ if (key.objsize() >= TempKeyMaxSize) {
+ return Status(ErrorCodes::KeyTooLong, "key too big");
+ }
+
+ invariant(loc.isNormal());
+ invariant(!hasFieldNames(key));
+
+ std::unique_ptr<KeyString> newKS = keyToKeyString(key, _order);
+ std::string newKSToString = std::string(newKS->getBuffer(), newKS->getSize());
+
+ int twoKeyCmp = 1;
+ int twoRIDCmp = 1;
+
+ if (_hasLast) {
+ twoKeyCmp = newKSToString.compare(_lastKeyToString);
+ twoRIDCmp = loc.repr() - _lastRID;
+ }
+
+ if (twoKeyCmp < 0 || (_dupsAllowed && twoKeyCmp == 0 && twoRIDCmp < 0)) {
+ return Status(ErrorCodes::InternalError,
+ "expected ascending (key, RecordId) order in bulk builder");
+ }
+ if (!_dupsAllowed && twoKeyCmp == 0 && twoRIDCmp != 0) {
+ return dupKeyError(key);
+ }
+
+ std::string workingCopyInsertKey = combineKeyAndRID(key, loc, _prefix, _order);
+ std::unique_ptr<KeyString> workingCopyInternalKs = keyToKeyString(key, _order);
+ std::unique_ptr<KeyString> workingCopyOuterKs = combineKeyAndRIDKS(key, loc, _prefix, _order);
+
+ std::string internalTbString(
+ reinterpret_cast<const char*>(workingCopyInternalKs->getTypeBits().getBuffer()),
+ workingCopyInternalKs->getTypeBits().getSize());
+
+ workingCopy->insert(StringStore::value_type(workingCopyInsertKey, internalTbString));
+
+ _hasLast = true;
+ _lastKeyToString = newKSToString;
+ _lastRID = loc.repr();
+
return Status::OK();
}
SortedDataBuilderInterface* SortedDataInterface::getBulkBuilder(OperationContext* opCtx,
bool dupsAllowed) {
- return new SortedDataBuilderInterface(); // TODO: return real thing.
+ return new SortedDataBuilderInterface(opCtx, dupsAllowed, _order, _prefix, _identEnd);
+}
+
+// We append \1 to all idents we get, and therefore the KeyString with ident + \0 will only be
+// before elements in this ident, and the KeyString with ident + \2 will only be after elements in
+// this ident.
+SortedDataInterface::SortedDataInterface(const Ordering& ordering, bool isUnique, StringData ident)
+ : _order(ordering),
+ // All entries in this ident will have a prefix of ident + \1.
+ _prefix(ident.toString().append(1, '\1')),
+ // Therefore, the string ident + \2 will be greater than all elements in this ident.
+ _identEnd(ident.toString().append(1, '\2')),
+ _isUnique(isUnique) {
+ // This is the string representation of the KeyString before elements in this ident, which is
+ // ident + \0. This is before all elements in this ident.
+ _KSForIdentStart =
+ combineKeyAndRID(BSONObj(), RecordId::min(), ident.toString().append(1, '\0'), ordering);
+ // Similarly, this is the string representation of the KeyString for something greater than
+ // all other elements in this ident.
+ _KSForIdentEnd = combineKeyAndRID(BSONObj(), RecordId::min(), _identEnd, ordering);
}
Status SortedDataInterface::insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- return Status::OK(); // TODO: Implement.
+ // The KeyString representation of the key.
+ std::unique_ptr<KeyString> workingCopyInternalKs = keyToKeyString(key, _order);
+ // The KeyString of prefix (which is ident + \1), key, loc.
+ std::unique_ptr<KeyString> workingCopyOuterKs = combineKeyAndRIDKS(key, loc, _prefix, _order);
+ // The string representation.
+ std::string workingCopyInsertKey = combineKeyAndRID(key, loc, _prefix, _order);
+
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+
+ if (key.objsize() >= TempKeyMaxSize) {
+ return Status(ErrorCodes::KeyTooLong, "Error: key too long");
+ }
+
+ if (workingCopy->find(workingCopyInsertKey) != workingCopy->end()) {
+ return Status::OK();
+ }
+
+ // If dups are not allowed, then we need to check that we are not inserting something with an
+ // existing key but a different recordId. However, if the combination of key, recordId already
+ // exists, then we are fine, since we are allowed to insert duplicates.
+ if (!dupsAllowed) {
+ std::string workingCopyLowerBound = combineKeyAndRID(key, RecordId::min(), _prefix, _order);
+ std::string workingCopyUpperBound = combineKeyAndRID(key, RecordId::max(), _prefix, _order);
+ StringStore::iterator lowerBoundIterator = workingCopy->lower_bound(workingCopyLowerBound);
+
+ if (lowerBoundIterator != workingCopy->end() &&
+ lowerBoundIterator->first.compare(_KSForIdentEnd) < 0) {
+ IndexKeyEntry ike = keyStringToIndexKeyEntry(
+ lowerBoundIterator->first, lowerBoundIterator->second, _order);
+ // We need a KeyString comparison here since even if the types are different, if the
+ // values are the same, then we need to still return equal.
+ auto ks1 = keyToKeyString(ike.key, _order);
+ auto ks2 = keyToKeyString(key, _order);
+ if (ks1->compare(*ks2) == 0 && ike.loc.repr() != loc.repr()) {
+ return dupKeyError(key);
+ }
+ }
+ }
+
+ // The key we insert is the workingCopyOuterKs as described above. The value is the typebits
+ // for the internal keystring (created from the key/order), which we will use when decoding the
+ // key and creating an IndexKeyEntry.
+ std::string internalTbString =
+ std::string(reinterpret_cast<const char*>(workingCopyInternalKs->getTypeBits().getBuffer()),
+ workingCopyInternalKs->getTypeBits().getSize());
+ workingCopy->insert(StringStore::value_type(workingCopyInsertKey, internalTbString));
+ return Status::OK();
}
void SortedDataInterface::unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- return; // TODO: Implement.
+ std::string workingCopyInsertKey = combineKeyAndRID(key, loc, _prefix, _order);
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+ workingCopy->erase(workingCopyInsertKey);
+}
+
+// This function is, as of now, not in the interface, but there exists a server ticket to add
+// truncate to the list of commands able to be used.
+Status SortedDataInterface::truncate(OperationContext* opCtx) {
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+ auto workingCopyLowerBound = workingCopy->lower_bound(_KSForIdentStart);
+ auto workingCopyUpperBound = workingCopy->upper_bound(_KSForIdentEnd);
+ workingCopy->erase(workingCopyLowerBound, workingCopyUpperBound);
+ return Status::OK();
}
Status SortedDataInterface::dupKeyCheck(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc) {
+ std::string workingCopyCheckKey = combineKeyAndRID(key, loc, _prefix, _order);
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+
+ // We effectively do the same check as in insert. However, we also check to make sure that
+ // the iterator returned to us by lower_bound also happens to be inside out ident.
+ if (workingCopy->find(workingCopyCheckKey) != workingCopy->end()) {
+ return Status::OK();
+ }
+
+ if (!_dupsAllowed) {
+ std::string workingCopyLowerBound = combineKeyAndRID(key, RecordId::min(), _prefix, _order);
+ auto lowerBoundIterator = workingCopy->lower_bound(workingCopyLowerBound);
+
+ if (lowerBoundIterator != workingCopy->end() &&
+ lowerBoundIterator->first != workingCopyCheckKey &&
+ lowerBoundIterator->first.compare(_KSForIdentEnd) < 0 &&
+ lowerBoundIterator->first.compare(
+ combineKeyAndRID(key, RecordId::max(), _prefix, _order)) <= 0) {
+ return dupKeyError(key);
+ }
+ }
return Status::OK();
- // TODO: Implement.
}
void SortedDataInterface::fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
- // TODO: Implement.
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+ long long numKeys = 0;
+ auto it = workingCopy->lower_bound(_KSForIdentStart);
+ while (it != workingCopy->end() && it->first.compare(_KSForIdentEnd) < 0) {
+ ++it;
+ numKeys++;
+ }
+ *numKeysOut = numKeys;
}
bool SortedDataInterface::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
- return false; // TODO: Implement.
+ return false;
}
long long SortedDataInterface::getSpaceUsedBytes(OperationContext* opCtx) const {
- return -1; // TODO: Implement.
+ StringStore* str = getRecoveryUnitBranch_forking(opCtx);
+ size_t totalSize = 0;
+ StringStore::iterator it = str->lower_bound(_KSForIdentStart);
+ StringStore::iterator end = str->upper_bound(_KSForIdentEnd);
+ int64_t numElements = str->distance(it, end);
+ for (int i = 0; i < numElements; i++) {
+ totalSize += it->first.length();
+ ++it;
+ }
+ return (long long)totalSize;
}
bool SortedDataInterface::isEmpty(OperationContext* opCtx) {
- return true; // TODO: Implement.
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+ return workingCopy->distance(workingCopy->lower_bound(_KSForIdentStart),
+ workingCopy->upper_bound(_KSForIdentEnd)) == 0;
}
std::unique_ptr<mongo::SortedDataInterface::Cursor> SortedDataInterface::newCursor(
OperationContext* opCtx, bool isForward) const {
- return std::make_unique<SortedDataInterface::Cursor>(opCtx, isForward); // TODO: Implement.
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
+
+ return std::make_unique<SortedDataInterface::Cursor>(opCtx,
+ isForward,
+ _prefix,
+ _identEnd,
+ workingCopy,
+ _order,
+ _isUnique,
+ _KSForIdentStart,
+ _KSForIdentEnd);
}
Status SortedDataInterface::initAsEmpty(OperationContext* opCtx) {
- return Status::OK(); // TODO: Implement.
+ return Status::OK();
}
// Cursor
-SortedDataInterface::Cursor::Cursor(OperationContext* opCtx, bool isForward)
- : _opCtx(opCtx), _isForward(isForward) {}
+SortedDataInterface::Cursor::Cursor(OperationContext* opCtx,
+ bool isForward,
+ std::string _prefix,
+ std::string _identEnd,
+ StringStore* workingCopy,
+ Ordering order,
+ bool isUnique,
+ std::string _KSForIdentStart,
+ std::string identEndBSON)
+ : _opCtx(opCtx),
+ _workingCopy(workingCopy),
+ _endPos(boost::none),
+ _endPosReverse(boost::none),
+ _forward(isForward),
+ _atEOF(false),
+ _lastMoveWasRestore(false),
+ _prefix(_prefix),
+ _identEnd(_identEnd),
+ _forwardIt(workingCopy->begin()),
+ _reverseIt(workingCopy->rbegin()),
+ _order(order),
+ _endPosIncl(false),
+ _isUnique(isUnique),
+ _KSForIdentStart(_KSForIdentStart),
+ _KSForIdentEnd(identEndBSON) {}
+
+// This function checks whether or not the cursor end position was set by the user or not.
+bool SortedDataInterface::Cursor::endPosSet() {
+ return (_forward && _endPos != boost::none) || (!_forward && _endPosReverse != boost::none);
+}
+
+// This function checks whether or not a cursor is valid. In particular, it checks 1) whether the
+// cursor is at end() or rend(), 2) whether the cursor is on the wrong side of the end position
+// if it was set, and 3) whether the cursor is still in the ident.
+bool SortedDataInterface::Cursor::checkCursorValid() {
+ if (_forward) {
+ if (_forwardIt == _workingCopy->end()) {
+ return false;
+ }
+ if (endPosSet()) {
+ // The endPos must be in the ident, at most one past the ident, or end. Therefore, the
+ // endPos includes the check for being inside the ident
+ return _endPos.get() == _workingCopy->end() ||
+ _forwardIt->first.compare(_endPos.get()->first) < 0;
+ }
+ return _forwardIt->first.compare(_KSForIdentEnd) <= 0;
+ } else {
+ // This is a reverse cursor
+ if (_reverseIt == _workingCopy->rend()) {
+ return false;
+ }
+ if (endPosSet()) {
+ return _endPosReverse.get() == _workingCopy->rend() ||
+ _reverseIt->first.compare(_endPosReverse.get()->first) > 0;
+ }
+ return _reverseIt->first.compare(_KSForIdentStart) >= 0;
+ }
+}
void SortedDataInterface::Cursor::setEndPosition(const BSONObj& key, bool inclusive) {
- return;
+ auto finalKey = stripFieldNames(key);
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(_opCtx);
+ if (finalKey.isEmpty()) {
+ _endPos = boost::none;
+ _endPosReverse = boost::none;
+ return;
+ }
+ _endPosIncl = inclusive;
+ _endPosKey = key;
+ std::string _endPosBound;
+ // If forward and inclusive or reverse and not inclusive, then we use the last element in this
+ // ident. Otherwise, we use the first as our bound.
+ if (_forward == inclusive) {
+ _endPosBound = combineKeyAndRID(finalKey, RecordId::max(), _prefix, _order);
+ } else {
+ _endPosBound = combineKeyAndRID(finalKey, RecordId::min(), _prefix, _order);
+ }
+ if (_forward) {
+ _endPos = workingCopy->lower_bound(_endPosBound);
+ } else {
+ // Reverse iterators work with upper bound since upper bound will return the first element
+ // past the argument, so when it becomes a reverse iterator, it goes backwards one,
+ // (according to the C++ standard) and we end up in the right place.
+ _endPosReverse = StringStore::reverse_iterator(workingCopy->upper_bound(_endPosBound));
+ }
}
boost::optional<IndexKeyEntry> SortedDataInterface::Cursor::next(RequestedInfo parts) {
- return boost::none;
+ if (!_atEOF) {
+ // If the last move was restore, then we don't need to advance the cursor, since the user
+ // never got the value the cursor was pointing to in the first place. However,
+ // _lastMoveWasRestore will go through extra logic on a unique index, since unique indexes
+ // are not allowed to return the same key twice.
+ if (_lastMoveWasRestore) {
+ _lastMoveWasRestore = false;
+ } else {
+ // We basically just check to make sure the cursor is in the ident.
+ if (_forward) {
+ if (checkCursorValid()) {
+ ++_forwardIt;
+ }
+ } else {
+ if (checkCursorValid()) {
+ ++_reverseIt;
+ }
+ }
+ // We check here to make sure that we are on the correct side of the end position, and
+ // that the cursor is still in the ident after advancing.
+ if (!checkCursorValid()) {
+ _atEOF = true;
+ return boost::none;
+ }
+ }
+ } else {
+ _lastMoveWasRestore = false;
+ return boost::none;
+ }
+
+ if (_forward) {
+ return keyStringToIndexKeyEntry(_forwardIt->first, _forwardIt->second, _order);
+ }
+ return keyStringToIndexKeyEntry(_reverseIt->first, _reverseIt->second, _order);
+}
+
+boost::optional<IndexKeyEntry> SortedDataInterface::Cursor::seekAfterProcessing(BSONObj finalKey,
+ bool inclusive) {
+ std::string workingCopyBound;
+
+ // Similar to above, if forward and inclusive or reverse and not inclusive, then use min() for
+ // recordId. Else, we should use max().
+ if (_forward == inclusive) {
+ workingCopyBound = combineKeyAndRID(finalKey, RecordId::min(), _prefix, _order);
+ } else {
+ workingCopyBound = combineKeyAndRID(finalKey, RecordId::max(), _prefix, _order);
+ }
+
+ if (finalKey.isEmpty()) {
+ // If the key is empty and it's not inclusive, then no elements satisfy this seek.
+ if (!inclusive) {
+ _atEOF = true;
+ return boost::none;
+ } else {
+ // Otherwise, we just try to find the first element in this ident.
+ if (_forward) {
+ _forwardIt = _workingCopy->lower_bound(workingCopyBound);
+ } else {
+
+ // Reverse iterators work with upper bound since upper bound will return the first
+ // element past the argument, so when it becomes a reverse iterator, it goes
+ // backwards one, (according to the C++ standard) and we end up in the right place.
+ _reverseIt =
+ StringStore::reverse_iterator(_workingCopy->upper_bound(workingCopyBound));
+ }
+ // Here, we check to make sure the iterator doesn't fall off the data structure and is
+ // in the ident. We also check to make sure it is on the correct side of the end
+ // position, if it was set.
+ if (!checkCursorValid()) {
+ _atEOF = true;
+ return boost::none;
+ }
+ }
+ } else {
+ // Otherwise, we seek to the nearest element to our key, but only to the right.
+ if (_forward) {
+ _forwardIt = _workingCopy->lower_bound(workingCopyBound);
+ } else {
+ // Reverse iterators work with upper bound since upper bound will return the first
+ // element past the argument, so when it becomes a reverse iterator, it goes
+ // backwards one, (according to the C++ standard) and we end up in the right place.
+ _reverseIt = StringStore::reverse_iterator(_workingCopy->upper_bound(workingCopyBound));
+ }
+ // Once again, we check to make sure the iterator didn't fall off the data structure and
+ // still is in the ident.
+ if (!checkCursorValid()) {
+ _atEOF = true;
+ return boost::none;
+ }
+ }
+
+ // Everything checks out, so we have successfullly seeked and now return.
+ if (_forward) {
+ return keyStringToIndexKeyEntry(_forwardIt->first, _forwardIt->second, _order);
+ }
+ return keyStringToIndexKeyEntry(_reverseIt->first, _reverseIt->second, _order);
}
boost::optional<IndexKeyEntry> SortedDataInterface::Cursor::seek(const BSONObj& key,
bool inclusive,
RequestedInfo parts) {
- return boost::none;
+ BSONObj finalKey = stripFieldNames(key);
+ _lastMoveWasRestore = false;
+ _atEOF = false;
+
+ return seekAfterProcessing(finalKey, inclusive);
}
boost::optional<IndexKeyEntry> SortedDataInterface::Cursor::seek(const IndexSeekPoint& seekPoint,
RequestedInfo parts) {
- return boost::none;
+ const BSONObj key = IndexEntryComparison::makeQueryObject(seekPoint, _forward);
+ _atEOF = false;
+ bool inclusive = true;
+ BSONObj finalKey = key;
+ _lastMoveWasRestore = false;
+
+ return seekAfterProcessing(finalKey, inclusive);
}
void SortedDataInterface::Cursor::save() {
- return;
+ _atEOF = false;
+ if (_lastMoveWasRestore) {
+ return;
+ } else if (_forward && _forwardIt != _workingCopy->end()) {
+ _saveKey = _forwardIt->first;
+ } else if (!_forward && _reverseIt != _workingCopy->rend()) { // reverse
+ _saveKey = _reverseIt->first;
+ } else {
+ _saveKey = "";
+ }
}
void SortedDataInterface::Cursor::restore() {
- return;
+ StringStore* workingCopy = getRecoveryUnitBranch_forking(_opCtx);
+
+ this->_workingCopy = workingCopy;
+
+ // Here, we have to reset the end position if one was set earlier.
+ if (endPosSet()) {
+ setEndPosition(_endPosKey.get(), _endPosIncl);
+ }
+
+ // We reset the cursor, and make sure it's within the end position bounds. It doesn't matter if
+ // the cursor is not in the ident right now, since that will be taken care of upon the call to
+ // next().
+ if (_forward) {
+ if (_saveKey.length() == 0) {
+ _forwardIt = workingCopy->end();
+ } else {
+ _forwardIt = workingCopy->lower_bound(_saveKey);
+ }
+ if (!checkCursorValid()) {
+ _atEOF = true;
+ _lastMoveWasRestore = true;
+ return;
+ }
+
+ if (!_isUnique) {
+ _lastMoveWasRestore = (_forwardIt->first.compare(_saveKey) != 0);
+ } else {
+ // Unique indices cannot return the same key twice. Therefore, if we would normally not
+ // advance on the next call to next() by setting _lastMoveWasRestore, we potentially
+ // won't set it if that would cause us to return the same value twice.
+ int twoKeyCmp = compareTwoKeys(
+ _forwardIt->first, _forwardIt->second, _saveKey, _forwardIt->second, _order);
+ _lastMoveWasRestore = (twoKeyCmp != 0);
+ }
+
+ } else {
+ // Now we are dealing with reverse cursors, and use similar logic.
+ if (_saveKey.length() == 0) {
+ _reverseIt = workingCopy->rend();
+ } else {
+ _reverseIt = StringStore::reverse_iterator(workingCopy->upper_bound(_saveKey));
+ }
+ if (!checkCursorValid()) {
+ _atEOF = true;
+ _lastMoveWasRestore = true;
+ return;
+ }
+
+ if (!_isUnique) {
+ _lastMoveWasRestore = (_reverseIt->first.compare(_saveKey) != 0);
+ } else {
+ // We use similar logic for reverse cursors on unique indexes.
+ int twoKeyCmp = compareTwoKeys(
+ _reverseIt->first, _reverseIt->second, _saveKey, _reverseIt->second, _order);
+ _lastMoveWasRestore = (twoKeyCmp != 0);
+ }
+ }
}
void SortedDataInterface::Cursor::detachFromOperationContext() {
- return;
+ _opCtx = nullptr;
}
void SortedDataInterface::Cursor::reattachToOperationContext(OperationContext* opCtx) {
- return;
+ this->_opCtx = opCtx;
}
-
} // namespace biggie
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.h b/src/mongo/db/storage/biggie/biggie_sorted_impl.h
index 3bb4a87b0cd..35a719207f1 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.h
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
namespace mongo {
@@ -37,64 +38,143 @@ namespace biggie {
class SortedDataBuilderInterface : public ::mongo::SortedDataBuilderInterface {
public:
+ SortedDataBuilderInterface(OperationContext* opCtx,
+ bool dupsAllowed,
+ Ordering order,
+ std::string prefix,
+ std::string identEnd);
+ void commit(bool mayInterrupt) override;
virtual Status addKey(const BSONObj& key, const RecordId& loc);
-};
+private:
+ OperationContext* _opCtx;
+ bool _dupsAllowed;
+ // Order of the keys.
+ Ordering _order;
+ // Prefix and identEnd for the ident.
+ std::string _prefix;
+ std::string _identEnd;
+ // Whether or not we've already added something before.
+ bool _hasLast;
+ // This is the KeyString of the last key added.
+ std::string _lastKeyToString;
+ // This is the last recordId added.
+ int64_t _lastRID;
+};
class SortedDataInterface : public ::mongo::SortedDataInterface {
- // TODO : all of these probably do not need to be public.
public:
- // TODO : this definitely needs to change in the future.
- // TODO : also, what is ephemeralForTest indexSet referring to.
- std::shared_ptr<Store<std::string, std::string>> _data;
- // TODO : this constructor eventually needs to take arguments like in ephemeralForTest.
- SortedDataInterface(){};
-
- // TODO : figure out if all these functions should be public.
-
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed);
+ // Truncate is not required at the time of writing but will be when the truncate command is
+ // created
+ Status truncate(OperationContext* opCtx);
+ SortedDataInterface(const Ordering& ordering, bool isUnique, StringData ident);
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx,
+ bool dupsAllowed) override;
virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
- bool dupsAllowed);
+ bool dupsAllowed) override;
virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
- bool dupsAllowed);
- virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc);
+ bool dupsAllowed) override;
+ virtual Status dupKeyCheck(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& loc) override;
virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
- ValidateResults* fullResults) const;
+ ValidateResults* fullResults) const override;
virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
- double scale) const;
- virtual long long getSpaceUsedBytes(OperationContext* opCtx) const;
- virtual bool isEmpty(OperationContext* opCtx);
- // TODO : what cursor is this?
- // this is not the right cursor I think.
+ double scale) const override;
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const override;
+ virtual bool isEmpty(OperationContext* opCtx) override;
virtual std::unique_ptr<mongo::SortedDataInterface::Cursor> newCursor(
OperationContext* opCtx, bool isForward = true) const override;
- virtual Status initAsEmpty(OperationContext* opCtx);
+ virtual Status initAsEmpty(OperationContext* opCtx) override;
+ /*
+ * This is the cursor class required by the sorted data interface.
+ */
class Cursor final : public ::mongo::SortedDataInterface::Cursor {
- OperationContext* _opCtx;
- bool _isForward;
- // TODO : should all of these be public?
public:
- // TODO : This will need more arguments later on for a real cursor.
- Cursor(OperationContext* opCtx, bool isForward);
- virtual void setEndPosition(const BSONObj& key, bool inclusive);
- virtual boost::optional<IndexKeyEntry> next(RequestedInfo parts = kKeyAndLoc);
+ // All the following public functions just implement the interface.
+ Cursor(OperationContext* opCtx,
+ bool isForward,
+ // This is the ident.
+ std::string _prefix,
+ // This is a string immediately after the ident and before other idents.
+ std::string _identEnd,
+ StringStore* workingCopy,
+ Ordering order,
+ bool isUnique,
+ std::string prefixBSON,
+ std::string KSForIdentEnd);
+ virtual void setEndPosition(const BSONObj& key, bool inclusive) override;
+ virtual boost::optional<IndexKeyEntry> next(RequestedInfo parts = kKeyAndLoc) override;
virtual boost::optional<IndexKeyEntry> seek(const BSONObj& key,
bool inclusive,
- RequestedInfo parts = kKeyAndLoc);
+ RequestedInfo parts = kKeyAndLoc) override;
virtual boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
- RequestedInfo parts = kKeyAndLoc);
- virtual void save();
- virtual void restore();
- virtual void detachFromOperationContext();
- virtual void reattachToOperationContext(OperationContext* opCtx);
+ RequestedInfo parts = kKeyAndLoc) override;
+ virtual void save() override;
+ virtual void restore() override;
+ virtual void detachFromOperationContext() override;
+ virtual void reattachToOperationContext(OperationContext* opCtx) override;
+
+ private:
+ // This is a helper function to check if the cursor was explicitly set by the user or not.
+ bool endPosSet();
+ // This is a helper function to check if the cursor is valid or not.
+ bool checkCursorValid();
+ // This is a helper function for seek.
+ boost::optional<IndexKeyEntry> seekAfterProcessing(BSONObj finalKey, bool inclusive);
+ OperationContext* _opCtx;
+ // This is the "working copy" of the master "branch" in the git analogy.
+ StringStore* _workingCopy;
+ // These store the end positions.
+ boost::optional<StringStore::iterator> _endPos;
+ boost::optional<StringStore::reverse_iterator> _endPosReverse;
+ // This means if the cursor is a forward or reverse cursor.
+ bool _forward;
+ // This means whether the cursor has reached the last EOF (with regard to this index).
+ bool _atEOF;
+ // This means whether or not the last move was restore.
+ bool _lastMoveWasRestore;
+ // This is the keystring for the saved location.
+ std::string _saveKey;
+ // These are the same as before.
+ std::string _prefix;
+ std::string _identEnd;
+ // These two store the iterator, which is the data structure for cursors. The one we use
+ // depends on _forward.
+ StringStore::iterator _forwardIt;
+ StringStore::reverse_iterator _reverseIt;
+ // This is the ordering for the key's values for multi-field keys.
+ Ordering _order;
+ // This stores whether or not the end position is inclusive for restore.
+ bool _endPosIncl;
+ // This stores the key for the end position.
+ boost::optional<BSONObj> _endPosKey;
+ // This stores whether or not the index is unique.
+ bool _isUnique;
+ // The next two are the same as above.
+ std::string _KSForIdentStart;
+ std::string _KSForIdentEnd;
};
+
+private:
+ const Ordering _order;
+ // These two are the same as before.
+ std::string _prefix;
+ std::string _identEnd;
+ // These are the keystring representations of the _prefix and the _identEnd.
+ std::string _KSForIdentStart;
+ std::string _KSForIdentEnd;
+ // This stores whether or not the end position is inclusive.
+ bool _isUnique;
+ // This stores whethert or not dups are allowed.
+ bool _dupsAllowed;
};
} // namespace biggie
} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
new file mode 100644
index 00000000000..6fdcb589627
--- /dev/null
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
@@ -0,0 +1,68 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/storage/biggie/biggie_sorted_impl.h"
+#include "mongo/base/init.h"
+#include "mongo/db/storage/biggie/biggie_kv_engine.h"
+#include "mongo/db/storage/biggie/biggie_recovery_unit.h"
+#include "mongo/db/storage/biggie/store.h"
+#include "mongo/db/storage/sorted_data_interface_test_harness.h"
+#include "mongo/platform/basic.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace biggie {
+namespace {
+class SortedDataInterfaceTestHarnessHelper final : public virtual SortedDataInterfaceHarnessHelper {
+private:
+ KVEngine _kvEngine{};
+ Ordering _order;
+
+public:
+ SortedDataInterfaceTestHarnessHelper() : _order(Ordering::make(BSONObj())) {}
+ std::unique_ptr<mongo::SortedDataInterface> newSortedDataInterface(bool unique) final {
+ return std::make_unique<SortedDataInterface>(_order, unique, "ident"_sd);
+ }
+ std::unique_ptr<mongo::RecoveryUnit> newRecoveryUnit() final {
+ //! not correct lol
+ return std::make_unique<RecoveryUnit>(&_kvEngine);
+ }
+};
+
+std::unique_ptr<HarnessHelper> makeHarnessHelper() {
+ return stdx::make_unique<SortedDataInterfaceTestHarnessHelper>();
+}
+
+MONGO_INITIALIZER(RegisterHarnessFactory)(InitializerContext* const) {
+ mongo::registerHarnessHelperFactory(makeHarnessHelper);
+ return Status::OK();
+}
+} // namespace
+} // namespace biggie
+} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/store.h b/src/mongo/db/storage/biggie/store.h
index ccdbcfce7c8..7b2fd7038af 100644
--- a/src/mongo/db/storage/biggie/store.h
+++ b/src/mongo/db/storage/biggie/store.h
@@ -166,6 +166,10 @@ public:
return map.erase(key);
}
+ void erase(iterator first, iterator last) {
+ map.erase(first.iter, last.iter);
+ }
+
/**
* Returns a Store that has all changes from both 'this' and 'other' compared to base.
* Throws merge_conflict_exception if there are merge conflicts.
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 05b23caf660..085d439e5e5 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -2082,6 +2082,15 @@ RecordId KeyString::decodeRecordIdAtEnd(const void* bufferRaw, size_t bufSize) {
return decodeRecordId(&reader);
}
+size_t KeyString::sizeWithoutRecordIdAtEnd(const void* bufferRaw, size_t bufSize) {
+ invariant(bufSize >= 2); // smallest possible encoding of a RecordId.
+ const unsigned char* buffer = static_cast<const unsigned char*>(bufferRaw);
+ const unsigned char lastByte = *(buffer + bufSize - 1);
+ const size_t ridSize = 2 + (lastByte & 0x7); // stored in low 3 bits.
+ invariant(bufSize >= ridSize);
+ return bufSize - ridSize;
+}
+
RecordId KeyString::decodeRecordId(BufReader* reader) {
const uint8_t firstByte = readType<uint8_t>(reader, false);
const uint8_t numExtraBytes = firstByte >> 5; // high 3 bits in firstByte
diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h
index a144a5096aa..31be4a833c6 100644
--- a/src/mongo/db/storage/key_string.h
+++ b/src/mongo/db/storage/key_string.h
@@ -324,6 +324,11 @@ public:
static RecordId decodeRecordIdAtEnd(const void* buf, size_t size);
/**
+ * Given a KeyString with a RecordId, returns the length of the section without the RecordId.
+ */
+ static size_t sizeWithoutRecordIdAtEnd(const void* bufferRaw, size_t bufSize);
+
+ /**
* Decodes a RecordId, consuming all bytes needed from reader.
*/
static RecordId decodeRecordId(BufReader* reader);