summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-09-16 17:16:32 -0400
committerEliot Horowitz <eliot@10gen.com>2014-09-17 10:49:29 -0400
commitbd0eed0a669ec9f77cb207d698c6bed002acdd0e (patch)
treeab8ccd978d36c1591501ec8215f2d03e5e639d48 /src/mongo/db
parentbb0a34d80b4b1e555fab5dda4ddac8ae48e99152 (diff)
downloadmongo-bd0eed0a669ec9f77cb207d698c6bed002acdd0e.tar.gz
SERVER-13635: hook up mmap_v1 to SortedDataInterface test harness and fix mmap_v1
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/diskloc.h4
-rw-r--r--src/mongo/db/operation_context.h15
-rw-r--r--src/mongo/db/storage/heap1/heap1_btree_impl.cpp31
-rw-r--r--src/mongo/db/storage/heap1/heap1_recovery_unit.cpp58
-rw-r--r--src/mongo/db/storage/heap1/heap1_recovery_unit.h17
-rw-r--r--src/mongo/db/storage/heap1/record_store_heap.cpp3
-rw-r--r--src/mongo/db/storage/index_entry_comparison.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/SConscript10
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp72
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp1
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp62
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h46
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp128
13 files changed, 336 insertions, 117 deletions
diff --git a/src/mongo/db/diskloc.h b/src/mongo/db/diskloc.h
index c632f5f051a..d043dc40b00 100644
--- a/src/mongo/db/diskloc.h
+++ b/src/mongo/db/diskloc.h
@@ -179,7 +179,7 @@ namespace mongo {
// Maximum allowed DiskLoc. Note that only three bytes are used to represent the file number
// for consistency with the v1 index DiskLoc storage format, which uses only 7 bytes total.
// No Record may begin at this location because the minimum size of a Record is larger than one
- // byte.
- const DiskLoc maxDiskLoc(0x00ffffff, 0x7fffffff);
+ // byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ const DiskLoc maxDiskLoc(0x00ffffff, 0x7ffffffe);
} // namespace mongo
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 0f49d2cc9c9..5883b601d2a 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -140,20 +140,25 @@ namespace mongo {
public:
WriteUnitOfWork(OperationContext* txn)
: _txn(txn) {
- _txn->lockState()->beginWriteUnitOfWork();
+ if ( _txn->lockState() ) {
+ _txn->lockState()->beginWriteUnitOfWork();
+ }
_txn->recoveryUnit()->beginUnitOfWork();
}
~WriteUnitOfWork() {
_txn->recoveryUnit()->endUnitOfWork();
- _txn->lockState()->endWriteUnitOfWork();
+ if ( _txn->lockState() ) {
+ _txn->lockState()->endWriteUnitOfWork();
+ }
}
void commit() {
_txn->recoveryUnit()->commitUnitOfWork();
-
- _txn->lockState()->endWriteUnitOfWork();
- _txn->lockState()->beginWriteUnitOfWork();
+ if ( _txn->lockState() ) {
+ _txn->lockState()->endWriteUnitOfWork();
+ _txn->lockState()->beginWriteUnitOfWork();
+ }
}
private:
diff --git a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
index e2964bafc27..fbe37187905 100644
--- a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
+++ b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
@@ -224,15 +224,17 @@ namespace {
}
virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
- // An empty key means we should seek to the front
- if (keyRaw.isEmpty()) {
- _it = _data.begin();
+ const BSONObj key = stripFieldNames(keyRaw);
+ _it = _data.lower_bound(IndexKeyEntry(key, loc)); // lower_bound is >= key
+ if ( _it == _data.end() ) {
return false;
}
- const BSONObj key = stripFieldNames(keyRaw);
- _it = _data.lower_bound(IndexKeyEntry(key, loc)); // lower_bound is >= key
- return _it != _data.end() && (_it->key == key); // intentionally not comparing loc
+ if ( _it->key != key ) {
+ return false;
+ }
+
+ return _it->loc == loc;
}
virtual void customLocate(const BSONObj& keyBegin,
@@ -331,16 +333,19 @@ namespace {
}
virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
- // An empty key means we should seek to the seek to the end,
- // i.e. one past the lowest key in the iterator
- if (keyRaw.isEmpty()) {
- _it = _data.rbegin();
+ const BSONObj key = stripFieldNames(keyRaw);
+ _it = lower_bound(IndexKeyEntry(key, loc)); // lower_bound is <= query
+
+ if ( _it == _data.rend() ) {
return false;
}
- const BSONObj key = stripFieldNames(keyRaw);
- _it = lower_bound(IndexKeyEntry(key, loc)); // lower_bound is <= query
- return _it != _data.rend() && (_it->key == key); // intentionally not comparing loc
+
+ if ( _it->key != key ) {
+ return false;
+ }
+
+ return _it->loc == loc;
}
virtual void customLocate(const BSONObj& keyBegin,
diff --git a/src/mongo/db/storage/heap1/heap1_recovery_unit.cpp b/src/mongo/db/storage/heap1/heap1_recovery_unit.cpp
index b07ccbbdbd6..f95370851ab 100644
--- a/src/mongo/db/storage/heap1/heap1_recovery_unit.cpp
+++ b/src/mongo/db/storage/heap1/heap1_recovery_unit.cpp
@@ -35,46 +35,48 @@
namespace mongo {
Heap1RecoveryUnit::~Heap1RecoveryUnit() {
- invariant( _depth == 0 );
+ invariant( _frames.empty() );
}
void Heap1RecoveryUnit::beginUnitOfWork() {
- _depth++;
+ _frames.push_back( Frame() );
}
void Heap1RecoveryUnit::commitUnitOfWork() {
- if ( _depth == 1 ) {
+ if ( _frames.size() == 1 ) {
_rollbackPossible = true;
- _indexInserts.clear();
- _indexRemoves.clear();
}
+ else {
+ size_t last = _frames.size() - 1;
+ size_t next = last - 1;
+ _frames[next].indexMods.insert( _frames[next].indexMods.end(),
+ _frames[last].indexMods.begin(),
+ _frames[last].indexMods.end() );
+ }
+ _frames.back().indexMods.clear();
}
void Heap1RecoveryUnit::endUnitOfWork() {
- _depth--;
-
- // effectively do a rollback
- invariant( _rollbackPossible );
-
- for ( size_t i = 0; i < _indexInserts.size(); i++ ) {
- invariant( _depth == 0 ); // todo: fix me
- SortedDataInterface* idx = _indexInserts[i].idx;
- idx->unindex( NULL, _indexInserts[i].obj, _indexInserts[i].loc );
- }
+ // invariant( _rollbackPossible ); // todo
- for ( size_t i = 0; i < _indexRemoves.size(); i++ ) {
- invariant( _depth == 0 ); // todo: fix me
- SortedDataInterface* idx = _indexRemoves[i].idx;
- idx->insert( NULL, _indexRemoves[i].obj, _indexRemoves[i].loc, true );
+ const Frame& frame = _frames.back();
+ for ( size_t i = frame.indexMods.size() ; i > 0; i-- ) {
+ const IndexInfo& ii = frame.indexMods[i-1];
+ SortedDataInterface* idx = ii.idx;
+ if ( ii.insert )
+ idx->unindex( NULL, ii.obj, ii.loc );
+ else
+ idx->insert( NULL, ii.obj, ii.loc, true );
}
+ _frames.pop_back();
}
- void Heap1RecoveryUnit::notifyIndexInsert( SortedDataInterface* idx,
- const BSONObj& obj, const DiskLoc& loc ) {
- IndexInfo ii = { idx, obj, loc };
- _indexInserts.push_back( ii );
+ void Heap1RecoveryUnit::notifyIndexMod( SortedDataInterface* idx,
+ const BSONObj& obj, const DiskLoc& loc, bool insert ) {
+ IndexInfo ii = { idx, obj, loc, insert };
+ _frames.back().indexMods.push_back( ii );
}
// static
@@ -84,13 +86,7 @@ namespace mongo {
return;
Heap1RecoveryUnit* ru = dynamic_cast<Heap1RecoveryUnit*>( ctx->recoveryUnit() );
- ru->notifyIndexInsert( idx, obj, loc );
- }
-
- void Heap1RecoveryUnit::notifyIndexRemove( SortedDataInterface* idx,
- const BSONObj& obj, const DiskLoc& loc ) {
- IndexInfo ii = { idx, obj, loc };
- _indexRemoves.push_back( ii );
+ ru->notifyIndexMod( idx, obj, loc, true );
}
// static
@@ -100,7 +96,7 @@ namespace mongo {
return;
Heap1RecoveryUnit* ru = dynamic_cast<Heap1RecoveryUnit*>( ctx->recoveryUnit() );
- ru->notifyIndexRemove( idx, obj, loc );
+ ru->notifyIndexMod( idx, obj, loc, false );
}
diff --git a/src/mongo/db/storage/heap1/heap1_recovery_unit.h b/src/mongo/db/storage/heap1/heap1_recovery_unit.h
index 316eb6f49af..a8064191c85 100644
--- a/src/mongo/db/storage/heap1/heap1_recovery_unit.h
+++ b/src/mongo/db/storage/heap1/heap1_recovery_unit.h
@@ -42,7 +42,6 @@ namespace mongo {
class Heap1RecoveryUnit : public RecoveryUnit {
public:
Heap1RecoveryUnit() {
- _depth = 0;
_rollbackPossible = true;
}
@@ -69,25 +68,29 @@ namespace mongo {
void rollbackImpossible() { _rollbackPossible = false; }
- void notifyIndexInsert( SortedDataInterface* idx, const BSONObj& obj, const DiskLoc& loc );
+ void notifyIndexMod( SortedDataInterface* idx,
+ const BSONObj& obj, const DiskLoc& loc, bool insert );
+
static void notifyIndexInsert( OperationContext* ctx, SortedDataInterface* idx,
const BSONObj& obj, const DiskLoc& loc );
-
- void notifyIndexRemove( SortedDataInterface* idx, const BSONObj& obj, const DiskLoc& loc );
static void notifyIndexRemove( OperationContext* ctx, SortedDataInterface* idx,
const BSONObj& obj, const DiskLoc& loc );
private:
- int _depth;
bool _rollbackPossible;
struct IndexInfo {
SortedDataInterface* idx;
BSONObj obj;
DiskLoc loc;
+ bool insert;
};
- std::vector<IndexInfo> _indexInserts;
- std::vector<IndexInfo> _indexRemoves;
+
+ struct Frame {
+ std::vector<IndexInfo> indexMods;
+ };
+
+ std::vector<Frame> _frames;
};
}
diff --git a/src/mongo/db/storage/heap1/record_store_heap.cpp b/src/mongo/db/storage/heap1/record_store_heap.cpp
index 273ea49f157..98f41926be3 100644
--- a/src/mongo/db/storage/heap1/record_store_heap.cpp
+++ b/src/mongo/db/storage/heap1/record_store_heap.cpp
@@ -69,6 +69,9 @@ namespace mongo {
HeapRecordStore::HeapRecord* HeapRecordStore::recordFor(const DiskLoc& loc) const {
Records::const_iterator it = _records.find(loc);
+ if ( it == _records.end() ) {
+ error() << "HeapRecordStore::recordFor cannot find record for " << ns() << ":" << loc;
+ }
invariant(it != _records.end());
return reinterpret_cast<HeapRecord*>(it->second.get());
}
diff --git a/src/mongo/db/storage/index_entry_comparison.cpp b/src/mongo/db/storage/index_entry_comparison.cpp
index 2e34a5bc64b..1d21a6af9b5 100644
--- a/src/mongo/db/storage/index_entry_comparison.cpp
+++ b/src/mongo/db/storage/index_entry_comparison.cpp
@@ -57,7 +57,8 @@ namespace mongo {
// Iterate through both BSONObjects, comparing individual elements one by one
for (unsigned mask = 1; lhsIt.more(); mask <<= 1) {
- invariant(rhsIt.more());
+ if (!rhsIt.more())
+ return _order.descending(mask) ? -1 : 1;
const BSONElement l = lhsIt.next();
const BSONElement r = rhsIt.next();
@@ -90,7 +91,8 @@ namespace mongo {
}
- invariant(!rhsIt.more());
+ if(rhsIt.more())
+ return -1;
// This means just look at the key, not the loc.
if (lhs.loc.isNull() || rhs.loc.isNull())
diff --git a/src/mongo/db/storage/mmap_v1/SConscript b/src/mongo/db/storage/mmap_v1/SConscript
index 50dec12ab6f..3273fe3399a 100644
--- a/src/mongo/db/storage/mmap_v1/SConscript
+++ b/src/mongo/db/storage/mmap_v1/SConscript
@@ -125,3 +125,13 @@ env.CppUnitTest(
]
)
+env.CppUnitTest(
+ target='btree_interface_test',
+ source=['btree/btree_interface_test.cpp'
+ ],
+ LIBDEPS=[
+ 'btree_test_help',
+ '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness'
+ ]
+ )
+
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
new file mode 100644
index 00000000000..cd7ff76989c
--- /dev/null
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
@@ -0,0 +1,72 @@
+// btree_interface_test.cpp
+
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/storage/mmap_v1/btree/btree_interface.h"
+#include "mongo/db/storage/mmap_v1/btree/btree_test_help.h"
+#include "mongo/db/storage/sorted_data_interface_test_harness.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+
+ class MyHarnessHelper : public HarnessHelper {
+ public:
+ MyHarnessHelper()
+ : _recordStore( "a.b" ),
+ _order( Ordering::make( BSONObj() ) ) {
+ }
+
+ virtual SortedDataInterface* newSortedDataInterface() {
+ auto_ptr<SortedDataInterface> sorted( getMMAPV1Interface( &_headManager,
+ &_recordStore,
+ _order,
+ "a_1",
+ 1,
+ &_deletionNotification ) );
+ OperationContextNoop op;
+ massertStatusOK( sorted->initAsEmpty( &op ) );
+ return sorted.release();
+ }
+
+ virtual RecoveryUnit* newRecoveryUnit() {
+ return new HeapRecordStoreBtreeRecoveryUnit();
+ }
+
+ private:
+ TestHeadManager _headManager;
+ HeapRecordStoreBtree _recordStore;
+ Ordering _order;
+ BucketDeletionNotification _deletionNotification;
+ };
+
+ HarnessHelper* newHarnessHelper() {
+ return new MyHarnessHelper();
+ }
+
+}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
index cdf89eff1c3..bfd62d282c5 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
@@ -37,6 +37,7 @@ namespace mongo {
void DiskLoc56Bit::operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
+ invariant( (ofs & 0x1) == 0 ); // we use the last bit for used
int la = loc.a();
invariant( la <= 0xffffff ); // must fit in 3 bytes
if( la < 0 ) {
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
index 03bf97f98e7..80a9ce0220f 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/storage/mmap_v1/heap_record_store_btree.h"
+#include "mongo/db/operation_context.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -58,6 +59,8 @@ namespace mongo {
const DiskLoc loc = allocateLoc();
_records[loc] = rec;
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert( txn, this, loc );
+
return StatusWith<DiskLoc>(loc);
}
@@ -70,6 +73,8 @@ namespace mongo {
const DiskLoc loc = allocateLoc();
_records[loc] = rec;
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert( txn, this, loc );
+
return StatusWith<DiskLoc>(loc);
}
@@ -86,4 +91,61 @@ namespace mongo {
return Status::OK();
}
+ // ---------------------------
+
+ HeapRecordStoreBtreeRecoveryUnit::~HeapRecordStoreBtreeRecoveryUnit() {
+ invariant( _depth == 0 );
+ }
+
+ void HeapRecordStoreBtreeRecoveryUnit::beginUnitOfWork() {
+ _depth++;
+ }
+
+ void HeapRecordStoreBtreeRecoveryUnit::commitUnitOfWork() {
+ invariant( _depth == 1 );
+ _insertions.clear();
+ _mods.clear();
+ }
+
+ void HeapRecordStoreBtreeRecoveryUnit::endUnitOfWork() {
+ invariant( _depth-- == 1 );
+
+ // reverse in case we write same area twice
+ for ( size_t i = _mods.size(); i > 0; i-- ) {
+ ModEntry& e = _mods[i-1];
+ memcpy( e.data, e.old.get(), e.len );
+ }
+
+ invariant( _insertions.size() == 0 ); // todo
+ }
+
+ void* HeapRecordStoreBtreeRecoveryUnit::writingPtr(void* data, size_t len) {
+ ModEntry e = { data, len, boost::shared_array<char>( new char[len] ) };
+ memcpy( e.old.get(), data, len );
+ _mods.push_back( e );
+ return data;
+ }
+
+ void HeapRecordStoreBtreeRecoveryUnit::notifyInsert( HeapRecordStoreBtree* rs,
+ const DiskLoc& loc ) {
+ InsertEntry e = { rs, loc };
+ _insertions.push_back( e );
+ }
+
+ void HeapRecordStoreBtreeRecoveryUnit::notifyInsert( OperationContext* ctx,
+ HeapRecordStoreBtree* rs,
+ const DiskLoc& loc ) {
+ if ( !ctx )
+ return;
+
+ HeapRecordStoreBtreeRecoveryUnit* ru =
+ dynamic_cast<HeapRecordStoreBtreeRecoveryUnit*>( ctx->recoveryUnit() );
+
+ if ( !ru )
+ return;
+
+ ru->notifyInsert( rs, loc );
+ }
+
+
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index 893ff42a494..4214ee8930c 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -34,6 +34,7 @@
#include <map>
#include "mongo/db/storage/record_store.h"
+#include "mongo/db/storage/recovery_unit.h"
namespace mongo {
@@ -173,4 +174,49 @@ namespace mongo {
int64_t _nextId;
};
+ /**
+ * A RecoveryUnit for HeapRecordStoreBtree, this is for testing btree only.
+ */
+ class HeapRecordStoreBtreeRecoveryUnit : public RecoveryUnit {
+ public:
+ HeapRecordStoreBtreeRecoveryUnit() {
+ _depth = 0;
+ }
+
+ virtual ~HeapRecordStoreBtreeRecoveryUnit();
+
+ virtual void beginUnitOfWork();
+ virtual void commitUnitOfWork();
+ virtual void endUnitOfWork();
+
+ virtual bool awaitCommit() { return true; }
+
+ virtual void registerChange(Change* change) {}
+
+ virtual void* writingPtr(void* data, size_t len);
+
+ virtual void syncDataAndTruncateJournal() {}
+
+ // -----------------------
+
+ void notifyInsert( HeapRecordStoreBtree* rs, const DiskLoc& loc );
+ static void notifyInsert( OperationContext* ctx,
+ HeapRecordStoreBtree* rs, const DiskLoc& loc );
+
+ private:
+ int _depth;
+ struct InsertEntry {
+ HeapRecordStoreBtree* rs;
+ DiskLoc loc;
+ };
+ std::vector<InsertEntry> _insertions;
+
+ struct ModEntry {
+ void* data;
+ size_t len;
+ boost::shared_array<char> old;
+ };
+ std::vector<ModEntry> _mods;
+ };
+
} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index ad0b6716a6a..b65b9540718 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -44,7 +44,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 1 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), true );
uow.commit();
}
}
@@ -53,7 +53,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 6, 1 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 6, 2 ), true );
uow.commit();
}
}
@@ -61,6 +61,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+
+ long long x = 0;
+ sorted->fullValidate( opCtx.get(), &x );
+ ASSERT_EQUALS( 2, x );
}
}
@@ -72,7 +76,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
uow.commit();
}
}
@@ -81,7 +85,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
uow.commit();
}
}
@@ -92,7 +96,7 @@ namespace mongo {
}
}
- TEST( SortedDataInterface, InsertWithDups3 ) {
+ TEST( SortedDataInterface, InsertWithDups3AndRollback ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() );
@@ -100,7 +104,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
uow.commit();
}
}
@@ -109,7 +113,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
// no commit
}
}
@@ -128,7 +132,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), false );
uow.commit();
}
}
@@ -137,7 +141,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 18 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 20 ), false );
uow.commit();
}
}
@@ -157,7 +161,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 1 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), false );
uow.commit();
}
}
@@ -166,7 +170,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 4 ), false );
uow.commit();
}
}
@@ -186,7 +190,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
uow.commit();
}
}
@@ -200,7 +204,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT( !sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ) ) );
+ ASSERT( !sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ) ) );
uow.commit();
}
}
@@ -214,7 +218,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT( !sorted->unindex( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 17 ) ) );
+ ASSERT( !sorted->unindex( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 18 ) ) );
uow.commit();
}
}
@@ -229,7 +233,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT( sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ) ) );
+ ASSERT( sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ) ) );
uow.commit();
}
}
@@ -241,7 +245,7 @@ namespace mongo {
}
- TEST( SortedDataInterface, Unindex2 ) {
+ TEST( SortedDataInterface, Unindex2Rollback ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() );
@@ -249,7 +253,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
uow.commit();
}
}
@@ -263,7 +267,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT( sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 17 ) ) );
+ ASSERT( sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ) ) );
// no commit
}
}
@@ -285,7 +289,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i ), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true ) );
uow.commit();
}
}
@@ -297,7 +301,7 @@ namespace mongo {
int n = 0;
while ( !cursor->isEOF() ) {
DiskLoc loc = cursor->getDiskLoc();
- ASSERT_EQUALS( n, loc.getOfs() );
+ ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
cursor->advance();
@@ -317,7 +321,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i ), true );
+ sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true );
uow.commit();
}
}
@@ -329,7 +333,7 @@ namespace mongo {
int n = 0;
while ( !cursor->isEOF() ) {
DiskLoc loc = cursor->getDiskLoc();
- ASSERT_EQUALS( n, loc.getOfs() );
+ ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
cursor->advance();
@@ -351,7 +355,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 5 ), DiskLoc( 5, i ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 5 ), DiskLoc( 5, i * 2 ), true );
uow.commit();
}
}
@@ -363,7 +367,7 @@ namespace mongo {
int n = 0;
while ( !cursor->isEOF() ) {
DiskLoc loc = cursor->getDiskLoc();
- ASSERT_EQUALS( n, loc.getOfs() );
+ ASSERT_EQUALS( n * 2, loc.getOfs() );
n++;
cursor->advance();
cursor->savePosition();
@@ -416,9 +420,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,1), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,3), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
uow.commit();
}
}
@@ -426,14 +430,14 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -449,9 +453,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,1), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,3), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
uow.commit();
}
}
@@ -462,16 +466,14 @@ namespace mongo {
ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
- ASSERT( !cursor->isEOF() );
- ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
+ ASSERT( cursor->isEOF() );
}
}
@@ -481,14 +483,19 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() );
- for ( int i = 0; i < 10; i++ ) {
- if ( i == 6 )
- continue;
- ASSERT_OK( sorted->insert( NULL, BSON( "" << i ), DiskLoc(1,i), true ) );
+ {
+ scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ for ( int i = 0; i < 10; i++ ) {
+ if ( i == 6 )
+ continue;
+ WriteUnitOfWork uow( opCtx.get() );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc(1,i*2), true ) );
+ uow.commit();
+ }
}
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( NULL, 1 ) );
- ASSERT( cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
@@ -500,21 +507,28 @@ namespace mongo {
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( NULL, -1 ) );
- ASSERT( cursor->locate( BSON( "" << 5 ), DiskLoc() ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), maxDiskLoc ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( NULL, -1 ) );
- cursor->locate( BSON( "" << 6 ), DiskLoc() );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), minDiskLoc ) );
+ ASSERT( !cursor->isEOF() );
+ ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
+ cursor->advance();
+ ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
+
+ cursor.reset( sorted->newCursor( NULL, -1 ) );
+ cursor->locate( BSON( "" << 6 ), maxDiskLoc );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( NULL, -1 ) );
- cursor->locate( BSON( "" << 500 ), DiskLoc() );
+ cursor->locate( BSON( "" << 500 ), maxDiskLoc );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 9 ), cursor->getKey() );
cursor->advance();
@@ -531,10 +545,10 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,3), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,8), true ) );
uow.commit();
}
}
@@ -542,18 +556,18 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( cursor->locate( BSON( "a" << 1 ), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), minDiskLoc ) );
ASSERT( !cursor->isEOF() );
- ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,8), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -562,16 +576,16 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( cursor->locate( BSON( "a" << 1 ), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), maxDiskLoc ) );
ASSERT( !cursor->isEOF() );
ASSERT( cursor->getDirection() == -1 );
- ASSERT_EQUALS( DiskLoc(1,3), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,1), cursor->getDiskLoc() );
+ ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
cursor->advance();
ASSERT( cursor->isEOF() );