summaryrefslogtreecommitdiff
path: root/src/mongo/db/storage
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-11-26 15:04:26 -0500
committerMathias Stearn <mathias@10gen.com>2014-11-26 15:15:59 -0500
commit16a8ef7ad60d498b69bdc0ad5cbca44757d16fd8 (patch)
tree2d9f056acf8f1b9b2e943f82e315bdf019475a38 /src/mongo/db/storage
parentcb5cab88761d9aca75e064665cce89f579c69e36 (diff)
downloadmongo-16a8ef7ad60d498b69bdc0ad5cbca44757d16fd8.tar.gz
SERVER-13679 Replace DiskLoc with RecordId outside of MMAPv1
Operations: sed -i -e 's/\<DiskLoc\>/RecordId/g' sed -i -e 's/\<DiskLocs\>/RecordIds/g' sed -i -e 's/\<minDiskLoc\>/RecordId::min()/g' sed -i -e 's/\<maxDiskLoc\>/RecordId::max()/g' sed -i -e 's/\<getDiskLoc\>/getRecordId/g'' Changes under mmap_v1 were reverted and redone by hand as needed.
Diffstat (limited to 'src/mongo/db/storage')
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h6
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp34
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp42
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.cpp114
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.h58
-rw-r--r--src/mongo/db/storage/index_entry_comparison.h6
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp18
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h6
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp14
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h28
-rw-r--r--src/mongo/db/storage/oplog_hack.cpp28
-rw-r--r--src/mongo/db/storage/oplog_hack.h8
-rw-r--r--src/mongo/db/storage/record_store.h56
-rw-r--r--src/mongo/db/storage/record_store_test_datafor.cpp12
-rw-r--r--src/mongo/db/storage/record_store_test_datasize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_deleterecord.cpp8
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp40
-rw-r--r--src/mongo/db/storage/record_store_test_insertrecord.cpp16
-rw-r--r--src/mongo/db/storage/record_store_test_manyiter.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp64
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp4
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.h6
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp16
-rw-r--r--src/mongo/db/storage/record_store_test_validate.h2
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp80
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.h46
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp46
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.h14
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h34
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor.cpp16
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp88
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp142
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp20
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp28
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp12
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp118
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h16
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_insert.cpp4
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp142
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h30
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp116
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h68
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp60
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h6
59 files changed, 888 insertions, 884 deletions
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index a1048a4d401..2a2d65007e8 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -86,7 +86,7 @@ namespace mongo {
return md.indexes[offset].multikey;
}
- DiskLoc BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
+ RecordId BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
const StringData& indexName ) const {
MetaData md = _getMetaData( txn );
@@ -186,7 +186,7 @@ namespace mongo {
IndexMetaData imd;
imd.spec = idx["spec"].Obj().getOwned();
imd.ready = idx["ready"].trueValue();
- imd.head = DiskLoc( idx["head_a"].Int(),
+ imd.head = RecordId( idx["head_a"].Int(),
idx["head_b"].Int() );
imd.multikey = idx["multikey"].trueValue();
indexes.push_back( imd );
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 5af770ae4d6..188a7a8430a 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -62,7 +62,7 @@ namespace mongo {
virtual bool isIndexMultikey( OperationContext* txn,
const StringData& indexName) const;
- virtual DiskLoc getIndexHead( OperationContext* txn,
+ virtual RecordId getIndexHead( OperationContext* txn,
const StringData& indexName ) const;
virtual bool isIndexReady( OperationContext* txn,
@@ -72,7 +72,7 @@ namespace mongo {
struct IndexMetaData {
IndexMetaData() {}
- IndexMetaData( BSONObj s, bool r, DiskLoc h, bool m )
+ IndexMetaData( BSONObj s, bool r, RecordId h, bool m )
: spec( s ), ready( r ), head( h ), multikey( m ) {}
void updateTTLSetting( long long newExpireSeconds );
@@ -81,7 +81,7 @@ namespace mongo {
BSONObj spec;
bool ready;
- DiskLoc head;
+ RecordId head;
bool multikey;
};
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index c5b1954f4ff..a86c9e9d2dc 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -48,7 +48,7 @@ namespace mongo {
/**
* This will be called right before loc is deleted when wrapping.
*/
- virtual Status aboutToDeleteCapped( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc ) = 0;
};
}
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index d6f1e8fa0af..499e5f64d6c 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -38,12 +38,12 @@ namespace mongo {
class EmptyRecordIterator: public RecordIterator {
public:
virtual bool isEOF() { return true; }
- virtual DiskLoc curr() { return DiskLoc(); }
- virtual DiskLoc getNext() { return DiskLoc(); }
- virtual void invalidate(const DiskLoc& dl) { }
+ virtual RecordId curr() { return RecordId(); }
+ virtual RecordId getNext() { return RecordId(); }
+ virtual void invalidate(const RecordId& dl) { }
virtual void saveState() { }
virtual bool restoreState(OperationContext* txn) { return false; }
- virtual RecordData dataFor( const DiskLoc& loc ) const {
+ virtual RecordData dataFor( const RecordId& loc ) const {
invariant( false );
}
};
@@ -72,42 +72,42 @@ namespace mongo {
return 0;
}
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc) const {
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc) const {
return RecordData( _dummy.objdata(), _dummy.objsize() );
}
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* rd ) const {
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const {
return false;
}
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl ) {}
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) {}
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
_numInserts++;
- return StatusWith<DiskLoc>( DiskLoc( 6, 4 ) );
+ return StatusWith<RecordId>( RecordId( 6, 4 ) );
}
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
_numInserts++;
- return StatusWith<DiskLoc>( DiskLoc( 6, 4 ) );
+ return StatusWith<RecordId>( RecordId( 6, 4 ) );
}
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier ) {
- return StatusWith<DiskLoc>( oldLocation );
+ return StatusWith<RecordId>( oldLocation );
}
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -115,7 +115,7 @@ namespace mongo {
}
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir ) const {
return new EmptyRecordIterator();
}
@@ -133,7 +133,7 @@ namespace mongo {
virtual Status truncate( OperationContext* txn ) { return Status::OK(); }
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) { }
virtual bool compactSupported() const { return false; }
diff --git a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
index 396be5f5375..8a86917287a 100644
--- a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
@@ -74,8 +74,8 @@ namespace {
return Status(ErrorCodes::DuplicateKey, sb.str());
}
- bool isDup(const IndexSet& data, const BSONObj& key, DiskLoc loc) {
- const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, DiskLoc()));
+ bool isDup(const IndexSet& data, const BSONObj& key, RecordId loc) {
+ const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
if (it == data.end())
return false;
@@ -93,8 +93,8 @@ namespace {
invariant(_data->empty());
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
- // inserts should be in ascending (key, DiskLoc) order.
+ Status addKey(const BSONObj& key, const RecordId& loc) {
+ // inserts should be in ascending (key, RecordId) order.
if ( key.objsize() >= TempKeyMaxSize ) {
return Status(ErrorCodes::KeyTooLong, "key too big");
@@ -105,11 +105,11 @@ namespace {
invariant(!hasFieldNames(key));
if (!_data->empty()) {
- // Compare specified key with last inserted key, ignoring its DiskLoc
- int cmp = _comparator.compare(IndexKeyEntry(key, DiskLoc()), *_last);
+ // Compare specified key with last inserted key, ignoring its RecordId
+ int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last);
if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
return Status(ErrorCodes::InternalError,
- "expected ascending (key, DiskLoc) order in bulk builder");
+ "expected ascending (key, RecordId) order in bulk builder");
}
else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
return dupKeyError(key);
@@ -129,7 +129,7 @@ namespace {
const bool _dupsAllowed;
IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
- IndexSet::const_iterator _last; // or (key, DiskLoc) ordering violations
+ IndexSet::const_iterator _last; // or (key, RecordId) ordering violations
};
class InMemoryBtreeImpl : public SortedDataInterface {
@@ -146,7 +146,7 @@ namespace {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
@@ -174,7 +174,7 @@ namespace {
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -199,7 +199,7 @@ namespace {
return _currentKeySize + ( sizeof(IndexKeyEntry) * _data->size() );
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
invariant(!hasFieldNames(key));
if (isDup(*_data, key, loc))
return dupKeyError(key);
@@ -235,11 +235,11 @@ namespace {
return _it == other._it;
}
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) {
+ virtual void aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
+ virtual bool locate(const BSONObj& keyRaw, const RecordId& loc) {
const BSONObj key = stripFieldNames(keyRaw);
_it = _data.lower_bound(IndexKeyEntry(key, loc)); // lower_bound is >= key
if ( _it == _data.end() ) {
@@ -266,7 +266,7 @@ namespace {
keyEnd,
keyEndInclusive,
1), // forward
- DiskLoc()));
+ RecordId()));
}
void advanceTo(const BSONObj &keyBegin,
@@ -282,7 +282,7 @@ namespace {
return _it->key;
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _it->loc;
}
@@ -320,7 +320,7 @@ namespace {
// For save/restorePosition since _it may be invalidated durring a yield.
bool _savedAtEnd;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
@@ -345,11 +345,11 @@ namespace {
return _it == other._it;
}
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) {
+ virtual void aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
+ virtual bool locate(const BSONObj& keyRaw, const RecordId& loc) {
const BSONObj key = stripFieldNames(keyRaw);
_it = lower_bound(IndexKeyEntry(key, loc)); // lower_bound is <= query
@@ -378,7 +378,7 @@ namespace {
keyEnd,
keyEndInclusive,
-1), // reverse
- DiskLoc()));
+ RecordId()));
}
void advanceTo(const BSONObj &keyBegin,
@@ -394,7 +394,7 @@ namespace {
return _it->key;
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _it->loc;
}
@@ -446,7 +446,7 @@ namespace {
// For save/restorePosition since _it may be invalidated durring a yield.
bool _savedAtEnd;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
virtual SortedDataInterface::Cursor* newCursor(OperationContext* txn, int direction) const {
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
index 6afa48c5fa5..fbca40124fb 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
@@ -43,7 +43,7 @@
namespace mongo {
class InMemoryRecordStore::InsertChange : public RecoveryUnit::Change {
public:
- InsertChange(Data* data, DiskLoc loc) :_data(data), _loc(loc) {}
+ InsertChange(Data* data, RecordId loc) :_data(data), _loc(loc) {}
virtual void commit() {}
virtual void rollback() {
Records::iterator it = _data->records.find(_loc);
@@ -55,13 +55,13 @@ namespace mongo {
private:
Data* const _data;
- const DiskLoc _loc;
+ const RecordId _loc;
};
// Works for both removes and updates
class InMemoryRecordStore::RemoveChange : public RecoveryUnit::Change {
public:
- RemoveChange(Data* data, DiskLoc loc, const InMemoryRecord& rec)
+ RemoveChange(Data* data, RecordId loc, const InMemoryRecord& rec)
:_data(data), _loc(loc), _rec(rec)
{}
@@ -78,7 +78,7 @@ namespace mongo {
private:
Data* const _data;
- const DiskLoc _loc;
+ const RecordId _loc;
const InMemoryRecord _rec;
};
@@ -136,12 +136,12 @@ namespace mongo {
const char* InMemoryRecordStore::name() const { return "InMemory"; }
- RecordData InMemoryRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc ) const {
+ RecordData InMemoryRecordStore::dataFor( OperationContext* txn, const RecordId& loc ) const {
return recordFor(loc)->toRecordData();
}
const InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(
- const DiskLoc& loc) const {
+ const RecordId& loc) const {
Records::const_iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
@@ -151,7 +151,7 @@ namespace mongo {
return &it->second;
}
- InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const DiskLoc& loc) {
+ InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const RecordId& loc) {
Records::iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
@@ -162,7 +162,7 @@ namespace mongo {
}
bool InMemoryRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* rd ) const {
+ const RecordId& loc, RecordData* rd ) const {
Records::const_iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
return false;
@@ -171,7 +171,7 @@ namespace mongo {
return true;
}
- void InMemoryRecordStore::deleteRecord(OperationContext* txn, const DiskLoc& loc) {
+ void InMemoryRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
InMemoryRecord* rec = recordFor(loc);
txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
_data->dataSize -= rec->size;
@@ -195,7 +195,7 @@ namespace mongo {
while (cappedAndNeedDelete(txn)) {
invariant(!_data->records.empty());
- DiskLoc oldest = _data->records.begin()->first;
+ RecordId oldest = _data->records.begin()->first;
if (_cappedDeleteCallback)
uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest));
@@ -204,35 +204,35 @@ namespace mongo {
}
}
- StatusWith<DiskLoc> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
+ StatusWith<RecordId> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
int len) const {
- StatusWith<DiskLoc> status = oploghack::extractKey(data, len);
+ StatusWith<RecordId> status = oploghack::extractKey(data, len);
if (!status.isOK())
return status;
if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts not higher than highest");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts not higher than highest");
return status;
}
- StatusWith<DiskLoc> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
const char* data,
int len,
bool enforceQuota) {
if (_isCapped && len > _cappedMaxSize) {
// We use dataSize for capped rollover and we don't want to delete everything if we know
// this won't fit.
- return StatusWith<DiskLoc>(ErrorCodes::BadValue,
+ return StatusWith<RecordId>(ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize");
}
InMemoryRecord rec(len);
memcpy(rec.data.get(), data, len);
- DiskLoc loc;
+ RecordId loc;
if (_data->isOplog) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(data, len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -247,26 +247,26 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
- StatusWith<DiskLoc> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
const DocWriter* doc,
bool enforceQuota) {
const int len = doc->documentSize();
if (_isCapped && len > _cappedMaxSize) {
// We use dataSize for capped rollover and we don't want to delete everything if we know
// this won't fit.
- return StatusWith<DiskLoc>(ErrorCodes::BadValue,
+ return StatusWith<RecordId>(ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize");
}
InMemoryRecord rec(len);
doc->writeDocument(rec.data.get());
- DiskLoc loc;
+ RecordId loc;
if (_data->isOplog) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(rec.data.get(), len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(rec.data.get(), len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -281,11 +281,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
- StatusWith<DiskLoc> InMemoryRecordStore::updateRecord(OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> InMemoryRecordStore::updateRecord(OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -294,7 +294,7 @@ namespace mongo {
int oldLen = oldRecord->size;
if (_isCapped && len > oldLen) {
- return StatusWith<DiskLoc>( ErrorCodes::InternalError,
+ return StatusWith<RecordId>( ErrorCodes::InternalError,
"failing update: objects in a capped ns cannot grow",
10003 );
}
@@ -308,11 +308,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
Status InMemoryRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -343,7 +343,7 @@ namespace mongo {
RecordIterator* InMemoryRecordStore::getIterator(
OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir) const {
if (dir == CollectionScanParams::FORWARD) {
@@ -375,7 +375,7 @@ namespace mongo {
}
void InMemoryRecordStore::temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) {
Records::iterator it = inclusive ? _data->records.lower_bound(end)
: _data->records.upper_bound(end);
@@ -470,23 +470,23 @@ namespace mongo {
return _data->dataSize + recordOverhead;
}
- DiskLoc InMemoryRecordStore::allocateLoc() {
+ RecordId InMemoryRecordStore::allocateLoc() {
const int64_t id = _data->nextId++;
- // This is a hack, but both the high and low order bits of DiskLoc offset must be 0, and the
+ // This is a hack, but both the high and low order bits of RecordId offset must be 0, and the
// file must fit in 23 bits. This gives us a total of 30 + 23 == 53 bits.
invariant(id < (1LL << 53));
- return DiskLoc(int(id >> 30), int((id << 1) & ~(1<<31)));
+ return RecordId(int(id >> 30), int((id << 1) & ~(1<<31)));
}
- DiskLoc InMemoryRecordStore::oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
+ RecordId InMemoryRecordStore::oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
if (!_data->isOplog)
- return DiskLoc().setInvalid();
+ return RecordId().setInvalid();
const Records& records = _data->records;
if (records.empty())
- return DiskLoc();
+ return RecordId();
Records::const_iterator it = records.lower_bound(startingPosition);
if (it == records.end() || it->first > startingPosition)
@@ -502,11 +502,11 @@ namespace mongo {
InMemoryRecordIterator::InMemoryRecordIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start,
+ RecordId start,
bool tailable)
: _txn(txn),
_tailable(tailable),
- _lastLoc(minDiskLoc),
+ _lastLoc(RecordId::min()),
_killedByInvalidate(false),
_records(records),
_rs(rs) {
@@ -523,19 +523,19 @@ namespace mongo {
return _it == _records.end();
}
- DiskLoc InMemoryRecordIterator::curr() {
+ RecordId InMemoryRecordIterator::curr() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
return _it->first;
}
- DiskLoc InMemoryRecordIterator::getNext() {
+ RecordId InMemoryRecordIterator::getNext() {
if (isEOF()) {
if (!_tailable)
- return DiskLoc();
+ return RecordId();
if (_records.empty())
- return DiskLoc();
+ return RecordId();
invariant(!_killedByInvalidate);
@@ -545,17 +545,17 @@ namespace mongo {
invariant(_it != _records.end());
if (++_it == _records.end())
- return DiskLoc();
+ return RecordId();
}
- const DiskLoc out = _it->first;
+ const RecordId out = _it->first;
++_it;
if (_tailable && _it == _records.end())
_lastLoc = out;
return out;
}
- void InMemoryRecordIterator::invalidate(const DiskLoc& loc) {
+ void InMemoryRecordIterator::invalidate(const RecordId& loc) {
if (_rs.isCapped()) {
// Capped iterators die on invalidation rather than advancing.
if (isEOF()) {
@@ -582,7 +582,7 @@ namespace mongo {
return !_killedByInvalidate;
}
- RecordData InMemoryRecordIterator::dataFor(const DiskLoc& loc) const {
+ RecordData InMemoryRecordIterator::dataFor(const RecordId& loc) const {
return _rs.dataFor(_txn, loc);
}
@@ -594,7 +594,7 @@ namespace mongo {
OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start) : _txn(txn),
+ RecordId start) : _txn(txn),
_killedByInvalidate(false),
_records(records),
_rs(rs) {
@@ -615,22 +615,22 @@ namespace mongo {
return _it == _records.rend();
}
- DiskLoc InMemoryRecordReverseIterator::curr() {
+ RecordId InMemoryRecordReverseIterator::curr() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
return _it->first;
}
- DiskLoc InMemoryRecordReverseIterator::getNext() {
+ RecordId InMemoryRecordReverseIterator::getNext() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
- const DiskLoc out = _it->first;
+ const RecordId out = _it->first;
++_it;
return out;
}
- void InMemoryRecordReverseIterator::invalidate(const DiskLoc& loc) {
+ void InMemoryRecordReverseIterator::invalidate(const RecordId& loc) {
if (_killedByInvalidate)
return;
@@ -650,7 +650,7 @@ namespace mongo {
void InMemoryRecordReverseIterator::saveState() {
if (isEOF()) {
- _savedLoc = DiskLoc();
+ _savedLoc = RecordId();
}
else {
_savedLoc = _it->first;
@@ -667,7 +667,7 @@ namespace mongo {
return !_killedByInvalidate;
}
- RecordData InMemoryRecordReverseIterator::dataFor(const DiskLoc& loc) const {
+ RecordData InMemoryRecordReverseIterator::dataFor(const RecordId& loc) const {
return _rs.dataFor(_txn, loc);
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.h b/src/mongo/db/storage/in_memory/in_memory_record_store.h
index 75862e22c7c..c5e69f2f72d 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.h
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.h
@@ -57,36 +57,36 @@ namespace mongo {
virtual const char* name() const;
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* rd ) const;
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir) const;
virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const;
@@ -95,7 +95,7 @@ namespace mongo {
virtual Status truncate( OperationContext* txn );
- virtual void temp_cappedTruncateAfter( OperationContext* txn, DiskLoc end, bool inclusive );
+ virtual void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
virtual bool compactSupported() const;
virtual Status compact( OperationContext* txn,
@@ -131,8 +131,8 @@ namespace mongo {
return _data->records.size();
}
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const;
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
protected:
struct InMemoryRecord {
@@ -145,15 +145,15 @@ namespace mongo {
boost::shared_array<char> data;
};
- virtual const InMemoryRecord* recordFor( const DiskLoc& loc ) const;
- virtual InMemoryRecord* recordFor( const DiskLoc& loc );
+ virtual const InMemoryRecord* recordFor( const RecordId& loc ) const;
+ virtual InMemoryRecord* recordFor( const RecordId& loc );
public:
//
// Not in RecordStore interface
//
- typedef std::map<DiskLoc, InMemoryRecord> Records;
+ typedef std::map<RecordId, InMemoryRecord> Records;
bool isCapped() const { return _isCapped; }
void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
@@ -167,9 +167,9 @@ namespace mongo {
class RemoveChange;
class TruncateChange;
- StatusWith<DiskLoc> extractAndCheckLocForOplog(const char* data, int len) const;
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
- DiskLoc allocateLoc();
+ RecordId allocateLoc();
bool cappedAndNeedDelete(OperationContext* txn) const;
void cappedDeleteAsNeeded(OperationContext* txn);
@@ -197,28 +197,28 @@ namespace mongo {
InMemoryRecordIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start = DiskLoc(),
+ RecordId start = RecordId(),
bool tailable = false);
virtual bool isEOF();
- virtual DiskLoc curr();
+ virtual RecordId curr();
- virtual DiskLoc getNext();
+ virtual RecordId getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
OperationContext* _txn; // not owned
InMemoryRecordStore::Records::const_iterator _it;
bool _tailable;
- DiskLoc _lastLoc; // only for restarting tailable
+ RecordId _lastLoc; // only for restarting tailable
bool _killedByInvalidate;
const InMemoryRecordStore::Records& _records;
@@ -230,27 +230,27 @@ namespace mongo {
InMemoryRecordReverseIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start = DiskLoc());
+ RecordId start = RecordId());
virtual bool isEOF();
- virtual DiskLoc curr();
+ virtual RecordId curr();
- virtual DiskLoc getNext();
+ virtual RecordId getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
OperationContext* _txn; // not owned
InMemoryRecordStore::Records::const_reverse_iterator _it;
bool _killedByInvalidate;
- DiskLoc _savedLoc; // isNull if saved at EOF
+ RecordId _savedLoc; // isNull if saved at EOF
const InMemoryRecordStore::Records& _records;
const InMemoryRecordStore& _rs;
diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h
index 5765ad6d6bd..ea2eb005e9b 100644
--- a/src/mongo/db/storage/index_entry_comparison.h
+++ b/src/mongo/db/storage/index_entry_comparison.h
@@ -42,10 +42,10 @@ namespace mongo {
* and a disk location.
*/
struct IndexKeyEntry {
- IndexKeyEntry(const BSONObj& key, DiskLoc loc) :key(key), loc(loc) {}
+ IndexKeyEntry(const BSONObj& key, RecordId loc) :key(key), loc(loc) {}
BSONObj key;
- DiskLoc loc;
+ RecordId loc;
};
/**
@@ -66,7 +66,7 @@ namespace mongo {
* otherwise.
*
* IndexKeyEntries are compared lexicographically field by field in the BSONObj, followed by
- * the DiskLoc. Either lhs or rhs (but not both) can be a query object returned by
+ * the RecordId. Either lhs or rhs (but not both) can be a query object returned by
* makeQueryObject(). See makeQueryObject() for a description of how its arguments affect
* the outcome of the comparison.
*/
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 2724b57a8e6..21cfaca98fd 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -118,7 +118,7 @@ namespace {
// No locking needed since called single threaded.
scoped_ptr<RecordIterator> it( _rs->getIterator( opCtx ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
BSONObj obj( data.data() );
@@ -177,7 +177,7 @@ namespace {
obj = b.obj();
}
- StatusWith<DiskLoc> res = _rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), false );
+ StatusWith<RecordId> res = _rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), false );
if ( !res.isOK() )
return res.getStatus();
@@ -203,7 +203,7 @@ namespace {
BSONObj KVCatalog::_findEntry( OperationContext* opCtx,
const StringData& ns,
- DiskLoc* out ) const {
+ RecordId* out ) const {
boost::scoped_ptr<Lock::ResourceLock> rLk;
if (!_isRsThreadSafe && opCtx->lockState()) {
@@ -212,7 +212,7 @@ namespace {
MODE_S));
}
- DiskLoc dl;
+ RecordId dl;
{
boost::mutex::scoped_lock lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
@@ -256,7 +256,7 @@ namespace {
MODE_X));
}
- DiskLoc loc;
+ RecordId loc;
BSONObj obj = _findEntry( opCtx, ns, &loc );
{
@@ -287,7 +287,7 @@ namespace {
obj = b.obj();
}
- StatusWith<DiskLoc> status = _rs->updateRecord( opCtx,
+ StatusWith<RecordId> status = _rs->updateRecord( opCtx,
loc,
obj.objdata(),
obj.objsize(),
@@ -309,7 +309,7 @@ namespace {
MODE_X));
}
- DiskLoc loc;
+ RecordId loc;
BSONObj old = _findEntry( opCtx, fromNS, &loc ).getOwned();
{
BSONObjBuilder b;
@@ -326,7 +326,7 @@ namespace {
b.appendElementsUnique( old );
BSONObj obj = b.obj();
- StatusWith<DiskLoc> status = _rs->updateRecord( opCtx,
+ StatusWith<RecordId> status = _rs->updateRecord( opCtx,
loc,
obj.objdata(),
obj.objsize(),
@@ -396,7 +396,7 @@ namespace {
scoped_ptr<RecordIterator> it( _rs->getIterator( opCtx ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
BSONObj obj( data.data() );
v.push_back( obj["ident"].String() );
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index 6370568515e..5c6f1f52e1a 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -95,7 +95,7 @@ namespace mongo {
BSONObj _findEntry( OperationContext* opCtx,
const StringData& ns,
- DiskLoc* out=NULL ) const;
+ RecordId* out=NULL ) const;
std::string _newUniqueIdent(const char* kind);
@@ -112,10 +112,10 @@ namespace mongo {
struct Entry {
Entry(){}
- Entry( std::string i, DiskLoc l )
+ Entry( std::string i, RecordId l )
: ident(i), storedLoc( l ) {}
std::string ident;
- DiskLoc storedLoc;
+ RecordId storedLoc;
};
typedef std::map<std::string,Entry> NSToIdentMap;
NSToIdentMap _idents;
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 1552598e70e..1aeed1a1277 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -108,7 +108,7 @@ namespace mongo {
void KVCollectionCatalogEntry::setIndexHead( OperationContext* txn,
const StringData& indexName,
- const DiskLoc& newHead ) {
+ const RecordId& newHead ) {
MetaData md = _getMetaData( txn );
int offset = md.findIndexOffset( indexName );
invariant( offset >= 0 );
@@ -132,7 +132,7 @@ namespace mongo {
Status KVCollectionCatalogEntry::prepareForIndexBuild( OperationContext* txn,
const IndexDescriptor* spec ) {
MetaData md = _getMetaData( txn );
- md.indexes.push_back( IndexMetaData( spec->infoObj(), false, DiskLoc(), false ) );
+ md.indexes.push_back( IndexMetaData( spec->infoObj(), false, RecordId(), false ) );
_catalog->putMetaData( txn, ns().toString(), md );
string ident = _catalog->getIndexIdent( txn, ns().ns(), spec->indexName() );
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
index 678b6722ead..646b0c07fa3 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
@@ -57,7 +57,7 @@ namespace mongo {
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
- const DiskLoc& newHead );
+ const RecordId& newHead );
virtual Status removeIndex( OperationContext* txn,
const StringData& indexName );
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 86ed0fd7886..6ef9cbe1b3d 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -64,11 +64,11 @@ namespace mongo {
}
- DiskLoc loc;
+ RecordId loc;
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false );
+ StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -96,7 +96,7 @@ namespace mongo {
string ns = "a.b";
// 'loc' holds location of "abc" and is referenced after restarting engine.
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<RecordStore> rs;
{
@@ -109,7 +109,7 @@ namespace mongo {
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false );
+ StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -151,7 +151,7 @@ namespace mongo {
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), DiskLoc( 6, 4 ), true ) );
+ ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), RecordId( 6, 4 ), true ) );
uow.commit();
}
@@ -237,7 +237,7 @@ namespace mongo {
md.ns ="a.b";
md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
false,
- DiskLoc(),
+ RecordId(),
false ) );
catalog->putMetaData( &opCtx, "a.b", md );
uow.commit();
@@ -263,7 +263,7 @@ namespace mongo {
catalog->putMetaData( &opCtx, "a.b", md ); // remove index
md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
false,
- DiskLoc(),
+ RecordId(),
false ) );
catalog->putMetaData( &opCtx, "a.b", md );
uow.commit();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index 35bb2080e31..c7644db33ab 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -188,7 +188,7 @@ namespace mongo {
return _btree->getKey(_txn, _bucket, _ofs);
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _btree->getDiskLoc(_txn, _bucket, _ofs);
}
@@ -201,7 +201,7 @@ namespace mongo {
virtual void savePosition() {
if (!_bucket.isNull()) {
_savedKey = getKey().getOwned();
- _savedLoc = getDiskLoc();
+ _savedLoc = getRecordId();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index 290d6a16123..c05b2453484 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -1089,7 +1089,7 @@ namespace mongo {
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
+ DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
while (!bucket.isNull()) {
FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
@@ -1121,7 +1121,7 @@ namespace mongo {
int position;
bool found;
- DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
+ DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
while (!posLoc.isNull()) {
FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
index 717e5bcb37b..15997d5681c 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
@@ -40,8 +40,8 @@ namespace mongo {
void DiskLoc56Bit::operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
int la = loc.a();
- if (la == maxDiskLoc.a()) {
- invariant(ofs == maxDiskLoc.getOfs());
+ if (la == DiskLoc::max().a()) {
+ invariant(ofs == DiskLoc::max().getOfs());
la = OurMaxA;
}
invariant( la <= OurMaxA ); // must fit in 3 bytes
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index 020bd7a58f1..971af81eb92 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -216,9 +216,9 @@ namespace mongo {
}
void DataFileHeader::checkUpgrade(OperationContext* txn) {
- if ( freeListStart == minDiskLoc ) {
+ if ( freeListStart == DiskLoc(0, 0) ) {
// we are upgrading from 2.4 to 2.6
- invariant( freeListEnd == minDiskLoc ); // both start and end should be (0,0) or real
+ invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
WriteUnitOfWork wunit(txn);
*txn->recoveryUnit()->writing( &freeListStart ) = DiskLoc();
*txn->recoveryUnit()->writing( &freeListEnd ) = DiskLoc();
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 7ac89937842..07c69cc9b25 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -44,12 +44,16 @@ namespace mongo {
template< class Version > class BtreeBucket;
+ // TODO make DiskLoc and RecordId different types
+ class RecordId;
+ typedef RecordId DiskLoc;
+
#pragma pack(1)
/** represents a disk location/offset on disk in a database. 64 bits.
it is assumed these will be passed around by value a lot so don't do anything to make them large
(such as adding a virtual function)
*/
- class DiskLoc {
+ class RecordId {
int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
int ofs;
@@ -65,8 +69,17 @@ namespace mongo {
MaxFiles=16000
};
- DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) { }
- DiskLoc() { Null(); }
+ RecordId(int a, int Ofs) : _a(a), ofs(Ofs) { }
+ RecordId() { Null(); }
+
+ // Minimum allowed DiskLoc. No Record may begin at this location because file and extent
+ // headers must precede Records in a file.
+ static DiskLoc min() { return DiskLoc(0, 0); }
+
+ // Maximum allowed DiskLoc.
+ // No Record may begin at this location because the minimum size of a Record is larger than
+ // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ static DiskLoc max() { return DiskLoc(0x7fffffff, 0x7ffffffe); }
bool questionable() const {
return ofs < -1 ||
@@ -164,13 +177,4 @@ namespace mongo {
return stream << loc.toString();
}
- // Minimum allowed DiskLoc. No Record may begin at this location because file and extent
- // headers must precede Records in a file.
- const DiskLoc minDiskLoc(0, 0);
-
- // Maximum allowed DiskLoc.
- // No Record may begin at this location because the minimum size of a Record is larger than one
- // byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
- const DiskLoc maxDiskLoc(0x7fffffff, 0x7ffffffe);
-
} // namespace mongo
diff --git a/src/mongo/db/storage/oplog_hack.cpp b/src/mongo/db/storage/oplog_hack.cpp
index f2361fecc6f..f3f6a2e4937 100644
--- a/src/mongo/db/storage/oplog_hack.cpp
+++ b/src/mongo/db/storage/oplog_hack.cpp
@@ -41,37 +41,37 @@
namespace mongo {
namespace oploghack {
- StatusWith<DiskLoc> keyForOptime(const OpTime& opTime) {
+ StatusWith<RecordId> keyForOptime(const OpTime& opTime) {
// Make sure secs and inc wouldn't be negative if treated as signed. This ensures that they
- // don't sort differently when put in a DiskLoc. It also avoids issues with Null/Invalid
- // DiskLocs
+ // don't sort differently when put in a RecordId. It also avoids issues with Null/Invalid
+ // RecordIds
if (opTime.getSecs() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts secs too high");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts secs too high");
if (opTime.getInc() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts inc too high");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts inc too high");
- const DiskLoc out = DiskLoc(opTime.getSecs(), opTime.getInc());
- if (out <= minDiskLoc)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts too low");
- if (out >= maxDiskLoc)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts too high");
+ const RecordId out = RecordId(opTime.getSecs(), opTime.getInc());
+ if (out <= RecordId::min())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too low");
+ if (out >= RecordId::max())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too high");
- return StatusWith<DiskLoc>(out);
+ return StatusWith<RecordId>(out);
}
/**
* data and len must be the arguments from RecordStore::insert() on an oplog collection.
*/
- StatusWith<DiskLoc> extractKey(const char* data, int len) {
+ StatusWith<RecordId> extractKey(const char* data, int len) {
DEV invariant(validateBSON(data, len).isOK());
const BSONObj obj(data);
const BSONElement elem = obj["ts"];
if (elem.eoo())
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "no ts field");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "no ts field");
if (elem.type() != Timestamp)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts must be a Timestamp");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts must be a Timestamp");
return keyForOptime(elem._opTime());
}
diff --git a/src/mongo/db/storage/oplog_hack.h b/src/mongo/db/storage/oplog_hack.h
index 8c89b3c1287..20708c1db4c 100644
--- a/src/mongo/db/storage/oplog_hack.h
+++ b/src/mongo/db/storage/oplog_hack.h
@@ -32,21 +32,21 @@
#include "mongo/base/status_with.h"
namespace mongo {
- class DiskLoc;
+ class RecordId;
class OpTime;
namespace oploghack {
/**
- * Converts OpTime to a DiskLoc in an unspecified manor that is safe to use as the key to in a
+ * Converts OpTime to a RecordId in an unspecified manor that is safe to use as the key to in a
* RecordStore.
*/
- StatusWith<DiskLoc> keyForOptime(const OpTime& opTime);
+ StatusWith<RecordId> keyForOptime(const OpTime& opTime);
/**
* data and len must be the arguments from RecordStore::insert() on an oplog collection.
*/
- StatusWith<DiskLoc> extractKey(const char* data, int len);
+ StatusWith<RecordId> extractKey(const char* data, int len);
} // namespace oploghack
} // namespace mongo
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 9540067f83c..453f0fdc3ee 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -73,7 +73,7 @@ namespace mongo {
public:
virtual ~UpdateMoveNotifier(){}
virtual Status recordStoreGoingToMove( OperationContext* txn,
- const DiskLoc& oldLocation,
+ const RecordId& oldLocation,
const char* oldBuffer,
size_t oldSize ) = 0;
};
@@ -89,17 +89,17 @@ namespace mongo {
// True if getNext will produce no more data, false otherwise.
virtual bool isEOF() = 0;
- // Return the DiskLoc that the iterator points at. Returns DiskLoc() if isEOF.
- virtual DiskLoc curr() = 0;
+ // Return the RecordId that the iterator points at. Returns RecordId() if isEOF.
+ virtual RecordId curr() = 0;
- // Return the DiskLoc that the iterator points at and move the iterator to the next item
- // from the collection. Returns DiskLoc() if isEOF.
- virtual DiskLoc getNext() = 0;
+ // Return the RecordId that the iterator points at and move the iterator to the next item
+ // from the collection. Returns RecordId() if isEOF.
+ virtual RecordId getNext() = 0;
// Can only be called after saveState and before restoreState.
- virtual void invalidate(const DiskLoc& dl) = 0;
+ virtual void invalidate(const RecordId& dl) = 0;
- // Save any state required to resume operation (without crashing) after DiskLoc deletion or
+ // Save any state required to resume operation (without crashing) after RecordId deletion or
// a collection drop.
virtual void saveState() = 0;
@@ -110,7 +110,7 @@ namespace mongo {
// normally this will just go back to the RecordStore and convert
// but this gives the iterator an oppurtnity to optimize
- virtual RecordData dataFor( const DiskLoc& loc ) const = 0;
+ virtual RecordData dataFor( const RecordId& loc ) const = 0;
};
@@ -146,24 +146,24 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc) const = 0;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc) const = 0;
/**
* @param out - If the record exists, the contents of this are set.
* @return true iff there is a Record for loc
*/
virtual bool findRecord( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
RecordData* out ) const = 0;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl ) = 0;
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) = 0;
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) = 0;
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) = 0;
@@ -171,17 +171,17 @@ namespace mongo {
* @param notifier - this is called if the document is moved
* it is to be called after the document has been written to new
* location, before deleted from old.
- * @return Status or DiskLoc, DiskLoc might be different
+ * @return Status or RecordId, RecordId might be different
*/
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier ) = 0;
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) = 0;
@@ -202,14 +202,14 @@ namespace mongo {
* Storage engines which support document-level locking need not implement this.
*/
virtual RecordFetcher* recordNeedsFetch( OperationContext* txn,
- const DiskLoc& loc ) const { return NULL; }
+ const RecordId& loc ) const { return NULL; }
/**
* returned iterator owned by caller
* Default arguments return all items in record store.
*/
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD
) const = 0;
@@ -245,7 +245,7 @@ namespace mongo {
* XXX: this will go away soon, just needed to move for now
*/
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) = 0;
// does this RecordStore support the compact operation
@@ -299,15 +299,15 @@ namespace mongo {
BSONObjBuilder* info = NULL ) = 0;
/**
- * Return the DiskLoc of an oplog entry as close to startingPosition as possible without
- * being higher. If there are no entries <= startingPosition, return DiskLoc().
+ * Return the RecordId of an oplog entry as close to startingPosition as possible without
+ * being higher. If there are no entries <= startingPosition, return RecordId().
*
* If you don't implement the oplogStartHack, just use the default implementation which
- * returns an Invalid DiskLoc.
+ * returns an Invalid RecordId.
*/
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
- return DiskLoc().setInvalid();
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
+ return RecordId().setInvalid();
}
/**
@@ -330,7 +330,7 @@ namespace mongo {
virtual ~RecordStoreCompactAdaptor(){}
virtual bool isDataValid( const RecordData& recData ) = 0;
virtual size_t dataSize( const RecordData& recData ) = 0;
- virtual void inserted( const RecordData& recData, const DiskLoc& newLocation ) = 0;
+ virtual void inserted( const RecordData& recData, const RecordId& newLocation ) = 0;
};
struct ValidateResults {
diff --git a/src/mongo/db/storage/record_store_test_datafor.cpp b/src/mongo/db/storage/record_store_test_datafor.cpp
index 82f445fd2ab..13acd9270ee 100644
--- a/src/mongo/db/storage/record_store_test_datafor.cpp
+++ b/src/mongo/db/storage/record_store_test_datafor.cpp
@@ -41,7 +41,7 @@ using std::stringstream;
namespace mongo {
// Insert a record and verify its contents by calling dataFor()
- // on the returned DiskLoc.
+ // on the returned RecordId.
TEST( RecordStoreTestHarness, DataFor ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "record-";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -83,7 +83,7 @@ namespace mongo {
}
// Insert multiple records and verify their contents by calling dataFor()
- // on each of the returned DiskLocs.
+ // on each of the returned RecordIds.
TEST( RecordStoreTestHarness, DataForMultiple ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -94,7 +94,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -103,7 +103,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_datasize.cpp b/src/mongo/db/storage/record_store_test_datasize.cpp
index 4bcb67f8477..f2b7730403b 100644
--- a/src/mongo/db/storage/record_store_test_datasize.cpp
+++ b/src/mongo/db/storage/record_store_test_datasize.cpp
@@ -73,7 +73,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_deleterecord.cpp b/src/mongo/db/storage/record_store_test_deleterecord.cpp
index d66708a021c..afc4d78c92c 100644
--- a/src/mongo/db/storage/record_store_test_deleterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_deleterecord.cpp
@@ -51,12 +51,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -97,7 +97,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -106,7 +106,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 83eee87db40..cd0694132cc 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -45,13 +45,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc1;
+ RecordId loc1;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -66,7 +66,7 @@ namespace mongo {
ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
RecordData rd;
- ASSERT( !rs->findRecord( opCtx.get(), DiskLoc(111,17), &rd ) );
+ ASSERT( !rs->findRecord( opCtx.get(), RecordId(111,17), &rd ) );
ASSERT( rd.data() == NULL );
ASSERT( rs->findRecord( opCtx.get(), loc1, &rd ) );
@@ -77,7 +77,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
uow.commit();
}
@@ -108,7 +108,7 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
+ RecordId loc1;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -116,7 +116,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
DummyDocWriter dw;
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), &dw, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), &dw, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -137,13 +137,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -183,13 +183,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
@@ -227,12 +227,12 @@ namespace mongo {
string s1 = "eliot was here";
string s2 = "eliot was here again";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
s1.c_str(), s1.size() + 1,
false );
ASSERT_OK( res.getStatus() );
@@ -251,7 +251,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(), loc,
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(), loc,
s2.c_str(), s2.size() + 1,
false, NULL );
ASSERT_OK( res.getStatus() );
@@ -276,13 +276,13 @@ namespace mongo {
string s1 = "aaa111bbb";
string s2 = "aaa222bbb";
- DiskLoc loc;
+ RecordId loc;
const RecordData s1Rec(s1.c_str(), s1.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
s1Rec.data(),
s1Rec.size(),
-1 );
@@ -336,12 +336,12 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -410,7 +410,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get() ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
string s = str::stream() << "eliot" << x++;
ASSERT_EQUALS( s, data.data() );
@@ -422,10 +422,10 @@ namespace mongo {
int x = N;
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::BACKWARD ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
string s = str::stream() << "eliot" << --x;
ASSERT_EQUALS( s, data.data() );
diff --git a/src/mongo/db/storage/record_store_test_insertrecord.cpp b/src/mongo/db/storage/record_store_test_insertrecord.cpp
index 5415f8ad39e..df539a013d8 100644
--- a/src/mongo/db/storage/record_store_test_insertrecord.cpp
+++ b/src/mongo/db/storage/record_store_test_insertrecord.cpp
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -85,7 +85,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -94,7 +94,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -121,14 +121,14 @@ namespace mongo {
ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
}
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
StringDocWriter docWriter( "my record", false );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
&docWriter,
false );
ASSERT_OK( res.getStatus() );
@@ -155,7 +155,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -164,7 +164,7 @@ namespace mongo {
StringDocWriter docWriter( ss.str(), false );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
&docWriter,
false );
ASSERT_OK( res.getStatus() );
diff --git a/src/mongo/db/storage/record_store_test_manyiter.cpp b/src/mongo/db/storage/record_store_test_manyiter.cpp
index adbdf550f49..83ddcdc5c8f 100644
--- a/src/mongo/db/storage/record_store_test_manyiter.cpp
+++ b/src/mongo/db/storage/record_store_test_manyiter.cpp
@@ -61,10 +61,10 @@ namespace mongo {
RecordIterator *rIter = *vIter;
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
- ASSERT_EQUALS( DiskLoc(), rIter->getNext() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->getNext() );
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
delete rIter;
}
@@ -82,7 +82,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -91,7 +91,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -106,7 +106,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- set<DiskLoc> remain( locs, locs + nToInsert );
+ set<RecordId> remain( locs, locs + nToInsert );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
vector<RecordIterator*> v = rs->getManyIterators( opCtx.get() );
@@ -116,15 +116,15 @@ namespace mongo {
RecordIterator *rIter = *vIter;
while ( !rIter->isEOF() ) {
- DiskLoc loc = rIter->curr();
+ RecordId loc = rIter->curr();
ASSERT( 1 == remain.erase( loc ) );
ASSERT_EQUALS( loc, rIter->getNext() );
}
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
- ASSERT_EQUALS( DiskLoc(), rIter->getNext() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->getNext() );
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
delete rIter;
}
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 932644e2f65..0b0bcb0e7a4 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -45,7 +45,7 @@ namespace mongo {
// Insert multiple records and iterate through them in the forward direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns DiskLoc() and stays at EOF.
+ // the iterator returns RecordId() and stays at EOF.
TEST( RecordStoreTestHarness, IterateOverMultipleRecords ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -56,7 +56,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -65,7 +65,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -80,12 +80,12 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::FORWARD );
for ( int i = 0; i < nToInsert; i++ ) {
@@ -95,10 +95,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -106,7 +106,7 @@ namespace mongo {
// Insert multiple records and iterate through them in the reverse direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns DiskLoc() and stays at EOF.
+ // the iterator returns RecordId() and stays at EOF.
TEST( RecordStoreTestHarness, IterateOverMultipleRecordsReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -117,7 +117,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -126,7 +126,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -141,12 +141,12 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::BACKWARD );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
@@ -156,10 +156,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -177,7 +177,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -186,7 +186,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -201,7 +201,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -217,10 +217,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -238,7 +238,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -247,7 +247,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -262,7 +262,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -278,10 +278,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -300,7 +300,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -309,7 +309,7 @@ namespace mongo {
string data = sb.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -333,7 +333,7 @@ namespace mongo {
// Iterate, checking EOF along the way.
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !it->isEOF() );
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
ASSERT( !nextLoc.isNull() );
}
ASSERT( it->isEOF() );
@@ -347,7 +347,7 @@ namespace mongo {
string data = sb.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index 260db54829a..81d40d9900d 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -60,10 +60,10 @@ namespace mongo {
return;
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -81,7 +81,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -90,7 +90,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -105,7 +105,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- set<DiskLoc> remain( locs, locs + nToInsert );
+ set<RecordId> remain( locs, locs + nToInsert );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIteratorForRepair( opCtx.get() );
@@ -115,15 +115,15 @@ namespace mongo {
}
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
remain.erase( loc ); // can happen more than once per doc
}
ASSERT( remain.empty() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index f12fe08052f..edbaa28d045 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -57,7 +57,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index b692cae2d0e..14084ceadd4 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -78,7 +78,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -141,7 +141,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index 538ddab3c54..1beb17278ca 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -83,7 +83,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index 66bc1a55158..63ca8fd77e1 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -77,7 +77,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
loc,
data.c_str(),
data.size() + 1,
@@ -110,7 +110,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -119,7 +119,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -142,7 +142,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
locs[i],
data.c_str(),
data.size() + 1,
@@ -179,12 +179,12 @@ namespace mongo {
}
string oldData = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
oldData.c_str(),
oldData.size() + 1,
false );
@@ -206,7 +206,7 @@ namespace mongo {
UpdateMoveNotifierSpy umn( opCtx.get(), loc, oldData.c_str(), oldData.size() );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
loc,
newData.c_str(),
newData.size() + 1,
@@ -214,7 +214,7 @@ namespace mongo {
&umn );
ASSERT_OK( res.getStatus() );
// UpdateMoveNotifier::recordStoreGoingToMove() called only if
- // the DiskLoc for the record changes
+ // the RecordId for the record changes
if ( loc == res.getValue() ) {
ASSERT_EQUALS( 0, umn.getNumCalls() );
} else {
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h
index 76e7653cbcd..479ac9f7748 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.h
+++ b/src/mongo/db/storage/record_store_test_updaterecord.h
@@ -42,7 +42,7 @@ namespace {
class UpdateMoveNotifierSpy : public UpdateMoveNotifier {
public:
- UpdateMoveNotifierSpy( OperationContext* txn, const DiskLoc &loc,
+ UpdateMoveNotifierSpy( OperationContext* txn, const RecordId &loc,
const char *buf, size_t size )
: _txn( txn ), _loc( loc ), _data( buf, size ), nCalls( 0 ) {
}
@@ -50,7 +50,7 @@ namespace {
~UpdateMoveNotifierSpy() { }
Status recordStoreGoingToMove( OperationContext *txn,
- const DiskLoc &oldLocation,
+ const RecordId &oldLocation,
const char *oldBuffer,
size_t oldSize ) {
nCalls++;
@@ -64,7 +64,7 @@ namespace {
private:
OperationContext *_txn;
- DiskLoc _loc;
+ RecordId _loc;
string _data;
int nCalls; // to verify that recordStoreGoingToMove() gets called once
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index 52a3d7fc705..63280494557 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -50,13 +50,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -113,13 +113,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -174,13 +174,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -233,13 +233,13 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
diff --git a/src/mongo/db/storage/record_store_test_validate.h b/src/mongo/db/storage/record_store_test_validate.h
index 79512193356..1cb66c43c94 100644
--- a/src/mongo/db/storage/record_store_test_validate.h
+++ b/src/mongo/db/storage/record_store_test_validate.h
@@ -97,7 +97,7 @@ namespace {
ASSERT( _remain.insert( data ).second );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = _rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = _rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
index fe4f368876b..75bbe1c82ff 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store.cpp
@@ -80,11 +80,11 @@ namespace mongo {
iter->SeekToLast();
if (iter->Valid()) {
rocksdb::Slice lastSlice = iter->key();
- DiskLoc lastLoc = _makeDiskLoc( lastSlice );
+ RecordId lastLoc = _makeDiskLoc( lastSlice );
_nextIdNum.store( lastLoc.getOfs() + ( uint64_t( lastLoc.a() ) << 32 ) + 1) ;
}
else {
- // Need to start at 1 so we are always higher than minDiskLoc
+ // Need to start at 1 so we are always higher than RecordId::min()
_nextIdNum.store( 1 );
}
@@ -117,16 +117,16 @@ namespace mongo {
BSONObjBuilder* extraInfo,
int infoLevel ) const {
uint64_t storageSize;
- rocksdb::Range wholeRange( _makeKey( minDiskLoc ), _makeKey( maxDiskLoc ) );
+ rocksdb::Range wholeRange( _makeKey( RecordId::min() ), _makeKey( RecordId::max() ) );
_db->GetApproximateSizes(_columnFamily.get(), &wholeRange, 1, &storageSize);
return static_cast<int64_t>( storageSize );
}
- RecordData RocksRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc) const {
+ RecordData RocksRecordStore::dataFor( OperationContext* txn, const RecordId& loc) const {
return _getDataFor(_db, _columnFamily.get(), txn, loc);
}
- void RocksRecordStore::deleteRecord( OperationContext* txn, const DiskLoc& dl ) {
+ void RocksRecordStore::deleteRecord( OperationContext* txn, const RecordId& dl ) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
std::string oldValue;
@@ -177,7 +177,7 @@ namespace mongo {
invariant(numRecords(txn) > 0);
rocksdb::Slice slice = iter->key();
- DiskLoc oldest = _makeDiskLoc( slice );
+ RecordId oldest = _makeDiskLoc( slice );
if ( _cappedDeleteCallback )
uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest));
@@ -187,18 +187,18 @@ namespace mongo {
}
}
- StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<DiskLoc>( ErrorCodes::BadValue,
+ return StatusWith<RecordId>( ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize" );
}
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
- DiskLoc loc = _nextId();
+ RecordId loc = _nextId();
ru->writeBatch()->Put(_columnFamily.get(), _makeKey(loc), rocksdb::Slice(data, len));
@@ -207,10 +207,10 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
- StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
const int len = doc->documentSize();
@@ -220,8 +220,8 @@ namespace mongo {
return insertRecord( txn, buf.get(), len, enforceQuota );
}
- StatusWith<DiskLoc> RocksRecordStore::updateRecord( OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> RocksRecordStore::updateRecord( OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -232,7 +232,7 @@ namespace mongo {
auto status = ru->Get(_columnFamily.get(), _makeKey(loc), &old_value);
if ( !status.ok() ) {
- return StatusWith<DiskLoc>( ErrorCodes::InternalError, status.ToString() );
+ return StatusWith<RecordId>( ErrorCodes::InternalError, status.ToString() );
}
int old_length = old_value.size();
@@ -243,11 +243,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
Status RocksRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -284,7 +284,7 @@ namespace mongo {
}
RecordIterator* RocksRecordStore::getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir
) const {
return new Iterator(txn, _db, _columnFamily, dir, start);
@@ -308,7 +308,7 @@ namespace mongo {
//AFB add Clear(ColumnFamilyHandle*)
boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
deleteRecord( txn, loc );
}
@@ -412,8 +412,8 @@ namespace mongo {
virtual ~RocksCollectionComparator() { }
virtual int Compare( const rocksdb::Slice& a, const rocksdb::Slice& b ) const {
- DiskLoc lhs = reinterpret_cast<const DiskLoc*>( a.data() )[0];
- DiskLoc rhs = reinterpret_cast<const DiskLoc*>( b.data() )[0];
+ RecordId lhs = reinterpret_cast<const RecordId*>( a.data() )[0];
+ RecordId rhs = reinterpret_cast<const RecordId*>( b.data() )[0];
return lhs.compare( rhs );
}
@@ -441,14 +441,14 @@ namespace mongo {
}
void RocksRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive ) {
boost::scoped_ptr<RecordIterator> iter(
- getIterator( txn, maxDiskLoc, CollectionScanParams::BACKWARD ) );
+ getIterator( txn, RecordId::max(), CollectionScanParams::BACKWARD ) );
while( !iter->isEOF() ) {
WriteUnitOfWork wu( txn );
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
if ( loc < end || ( !inclusive && loc == end))
return;
@@ -476,25 +476,25 @@ namespace mongo {
return options;
}
- DiskLoc RocksRecordStore::_nextId() {
+ RecordId RocksRecordStore::_nextId() {
const uint64_t myId = _nextIdNum.fetchAndAdd(1);
int a = myId >> 32;
// This masks the lowest 4 bytes of myId
int ofs = myId & 0x00000000FFFFFFFF;
- DiskLoc loc( a, ofs );
+ RecordId loc( a, ofs );
return loc;
}
- rocksdb::Slice RocksRecordStore::_makeKey(const DiskLoc& loc) {
+ rocksdb::Slice RocksRecordStore::_makeKey(const RecordId& loc) {
return rocksdb::Slice(reinterpret_cast<const char*>(&loc), sizeof(loc));
}
- DiskLoc RocksRecordStore::_makeDiskLoc( const rocksdb::Slice& slice ) {
- return reinterpret_cast<const DiskLoc*>( slice.data() )[0];
+ RecordId RocksRecordStore::_makeDiskLoc( const rocksdb::Slice& slice ) {
+ return reinterpret_cast<const RecordId*>( slice.data() )[0];
}
bool RocksRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* out ) const {
+ const RecordId& loc, RecordData* out ) const {
RecordData rd = _getDataFor(_db, _columnFamily.get(), txn, loc);
if ( rd.data() == NULL )
return false;
@@ -503,7 +503,7 @@ namespace mongo {
}
RecordData RocksRecordStore::_getDataFor(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
- OperationContext* txn, const DiskLoc& loc) {
+ OperationContext* txn, const RecordId& loc) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
std::string value_storage;
@@ -549,7 +549,7 @@ namespace mongo {
RocksRecordStore::Iterator::Iterator(
OperationContext* txn, rocksdb::DB* db,
boost::shared_ptr<rocksdb::ColumnFamilyHandle> columnFamily,
- const CollectionScanParams::Direction& dir, const DiskLoc& start)
+ const CollectionScanParams::Direction& dir, const RecordId& start)
: _txn(txn),
_db(db),
_cf(columnFamily),
@@ -570,20 +570,20 @@ namespace mongo {
return _eof;
}
- DiskLoc RocksRecordStore::Iterator::curr() {
+ RecordId RocksRecordStore::Iterator::curr() {
if (_eof) {
- return DiskLoc();
+ return RecordId();
}
return _curr;
}
- DiskLoc RocksRecordStore::Iterator::getNext() {
+ RecordId RocksRecordStore::Iterator::getNext() {
if (_eof) {
- return DiskLoc();
+ return RecordId();
}
- DiskLoc toReturn = _curr;
+ RecordId toReturn = _curr;
if ( _forward() )
_iterator->Next();
@@ -599,7 +599,7 @@ namespace mongo {
return toReturn;
}
- void RocksRecordStore::Iterator::invalidate( const DiskLoc& dl ) {
+ void RocksRecordStore::Iterator::invalidate( const RecordId& dl ) {
_iterator.reset( NULL );
}
@@ -619,7 +619,7 @@ namespace mongo {
return true;
}
- RecordData RocksRecordStore::Iterator::dataFor(const DiskLoc& loc) const {
+ RecordData RocksRecordStore::Iterator::dataFor(const RecordId& loc) const {
if (!_eof && loc == _curr && _iterator->Valid() && _iterator->status().ok()) {
SharedBuffer data = SharedBuffer::allocate(_iterator->value().size());
memcpy(data.get(), _iterator->value().data(), _iterator->value().size());
@@ -628,7 +628,7 @@ namespace mongo {
return RocksRecordStore::_getDataFor(_db, _cf.get(), _txn, loc);
}
- void RocksRecordStore::Iterator::_locate(const DiskLoc& loc) {
+ void RocksRecordStore::Iterator::_locate(const RecordId& loc) {
if (_forward()) {
if (loc.isNull()) {
_iterator->SeekToFirst();
@@ -659,7 +659,7 @@ namespace mongo {
}
}
- DiskLoc RocksRecordStore::Iterator::_decodeCurr() const {
+ RecordId RocksRecordStore::Iterator::_decodeCurr() const {
invariant(_iterator && _iterator->Valid());
return _makeDiskLoc(_iterator->key());
}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.h b/src/mongo/db/storage/rocks/rocks_record_store.h
index 0dc97c8dab9..096543fe19d 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.h
+++ b/src/mongo/db/storage/rocks/rocks_record_store.h
@@ -77,38 +77,38 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
virtual bool findRecord( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
RecordData* out ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD ) const;
@@ -141,7 +141,7 @@ namespace mongo {
BSONObjBuilder* info = NULL );
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive);
void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
@@ -163,19 +163,19 @@ namespace mongo {
public:
Iterator(OperationContext* txn, rocksdb::DB* db,
boost::shared_ptr<rocksdb::ColumnFamilyHandle> columnFamily,
- const CollectionScanParams::Direction& dir, const DiskLoc& start);
+ const CollectionScanParams::Direction& dir, const RecordId& start);
virtual bool isEOF();
- virtual DiskLoc curr();
- virtual DiskLoc getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual RecordId curr();
+ virtual RecordId getNext();
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
- void _locate(const DiskLoc& loc);
- DiskLoc _decodeCurr() const;
+ void _locate(const RecordId& loc);
+ RecordId _decodeCurr() const;
bool _forward() const;
void _checkStatus();
@@ -184,7 +184,7 @@ namespace mongo {
boost::shared_ptr<rocksdb::ColumnFamilyHandle> _cf;
CollectionScanParams::Direction _dir;
bool _eof;
- DiskLoc _curr;
+ RecordId _curr;
boost::scoped_ptr<rocksdb::Iterator> _iterator;
};
@@ -194,18 +194,18 @@ namespace mongo {
*/
static rocksdb::ReadOptions _readOptions(OperationContext* opCtx = NULL);
- static DiskLoc _makeDiskLoc( const rocksdb::Slice& slice );
+ static RecordId _makeDiskLoc( const rocksdb::Slice& slice );
static RecordData _getDataFor(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
- OperationContext* txn, const DiskLoc& loc);
+ OperationContext* txn, const RecordId& loc);
- DiskLoc _nextId();
+ RecordId _nextId();
bool cappedAndNeedDelete(OperationContext* txn) const;
void cappedDeleteAsNeeded(OperationContext* txn);
- // The use of this function requires that the passed in DiskLoc outlives the returned Slice
+ // The use of this function requires that the passed in RecordId outlives the returned Slice
// TODO possibly make this safer in the future
- static rocksdb::Slice _makeKey( const DiskLoc& loc );
+ static rocksdb::Slice _makeKey( const RecordId& loc );
void _changeNumRecords(OperationContext* txn, bool insert);
void _increaseDataSize(OperationContext* txn, int amount);
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
index 69c78a09d3a..34ec1a9c676 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
@@ -55,7 +55,7 @@ namespace mongo {
rocksdb::Slice emptyByteSlice( "" );
rocksdb::SliceParts emptyByteSliceParts( &emptyByteSlice, 1 );
- // functions for converting between BSONObj-DiskLoc pairs and strings/rocksdb::Slices
+ // functions for converting between BSONObj-RecordId pairs and strings/rocksdb::Slices
/**
* Strips the field names from a BSON object
@@ -77,21 +77,21 @@ namespace mongo {
* strings, and false otherwise. Useful because field names are not necessary in an index
* key, because the ordering of the fields is already known.
*/
- string makeString( const BSONObj& key, const DiskLoc loc, bool removeFieldNames = true ) {
+ string makeString( const BSONObj& key, const RecordId loc, bool removeFieldNames = true ) {
const BSONObj& finalKey = removeFieldNames ? stripFieldNames( key ) : key;
string s( finalKey.objdata(), finalKey.objsize() );
- s.append( reinterpret_cast<const char*>( &loc ), sizeof( DiskLoc ) );
+ s.append( reinterpret_cast<const char*>( &loc ), sizeof( RecordId ) );
return s;
}
/**
* Constructs an IndexKeyEntry from a slice containing the bytes of a BSONObject followed
- * by the bytes of a DiskLoc
+ * by the bytes of a RecordId
*/
IndexKeyEntry makeIndexKeyEntry( const rocksdb::Slice& slice ) {
BSONObj key = BSONObj( slice.data() ).getOwned();
- DiskLoc loc = *reinterpret_cast<const DiskLoc*>( slice.data() + key.objsize() );
+ RecordId loc = *reinterpret_cast<const RecordId*>( slice.data() + key.objsize() );
return IndexKeyEntry( key, loc );
}
@@ -141,11 +141,11 @@ namespace mongo {
( valid && otherValid && _iterator->key() == realOther->_iterator->key() );
}
- void aboutToDeleteBucket(const DiskLoc& bucket) {
+ void aboutToDeleteBucket(const RecordId& bucket) {
invariant( !"aboutToDeleteBucket should never be called from RocksSortedDataImpl" );
}
- bool locate(const BSONObj& key, const DiskLoc& loc) {
+ bool locate(const BSONObj& key, const RecordId& loc) {
if (_forward) {
return _locate(stripFieldNames(key), loc);
} else {
@@ -169,9 +169,9 @@ namespace mongo {
getDirection() );
if (_forward) {
- _locate(key, minDiskLoc);
+ _locate(key, RecordId::min());
} else {
- _reverseLocate(key, maxDiskLoc);
+ _reverseLocate(key, RecordId::max());
}
}
@@ -194,7 +194,7 @@ namespace mongo {
return _cachedKey;
}
- DiskLoc getDiskLoc() const {
+ RecordId getRecordId() const {
_load();
return _cachedLoc;
}
@@ -216,7 +216,7 @@ namespace mongo {
_savedAtEnd = false;
_savePositionObj = getKey().getOwned();
- _savePositionLoc = getDiskLoc();
+ _savePositionLoc = getRecordId();
}
void restorePosition(OperationContext* txn) {
@@ -252,7 +252,7 @@ namespace mongo {
}
// _locate() for reverse iterators
- bool _reverseLocate( const BSONObj& key, const DiskLoc loc ) {
+ bool _reverseLocate( const BSONObj& key, const RecordId loc ) {
invariant( !_forward );
const IndexKeyEntry keyEntry( key, loc );
@@ -289,7 +289,7 @@ namespace mongo {
* helper so that its possible to choose whether or not to strip the fieldnames before
* performing the actual locate logic.
*/
- bool _locate( const BSONObj& key, const DiskLoc loc ) {
+ bool _locate( const BSONObj& key, const RecordId loc ) {
invariant(_forward);
_isCached = false;
@@ -325,7 +325,7 @@ namespace mongo {
_isCached = true;
rocksdb::Slice slice = _iterator->key();
_cachedKey = BSONObj( slice.data() ).getOwned();
- _cachedLoc = *reinterpret_cast<const DiskLoc*>( slice.data() +
+ _cachedLoc = *reinterpret_cast<const RecordId*>( slice.data() +
_cachedKey.objsize() );
}
@@ -336,12 +336,12 @@ namespace mongo {
mutable bool _isCached;
mutable BSONObj _cachedKey;
- mutable DiskLoc _cachedLoc;
+ mutable RecordId _cachedLoc;
// not for caching, but rather for savePosition() and restorePosition()
bool _savedAtEnd;
BSONObj _savePositionObj;
- DiskLoc _savePositionLoc;
+ RecordId _savePositionLoc;
// Used for comparing elements in reverse iterators. Because the rocksdb::Iterator is
// only a forward iterator, it is sometimes necessary to compare index keys manually
@@ -350,7 +350,7 @@ namespace mongo {
};
/**
- * Custom comparator for rocksdb used to compare Index Entries by BSONObj and DiskLoc
+ * Custom comparator for rocksdb used to compare Index Entries by BSONObj and RecordId
*/
class RocksIndexEntryComparator : public rocksdb::Comparator {
public:
@@ -408,7 +408,7 @@ namespace mongo {
invariant(index->isEmpty(txn));
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
+ Status addKey(const BSONObj& key, const RecordId& loc) {
// TODO maybe optimize based on a fact that index is empty?
return _index->insert(_txn, key, loc, _dupsAllowed);
}
@@ -456,7 +456,7 @@ namespace mongo {
Status RocksSortedDataImpl::insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
if (key.objsize() >= kTempKeyMaxSize) {
@@ -485,7 +485,7 @@ namespace mongo {
void RocksSortedDataImpl::unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
@@ -503,11 +503,11 @@ namespace mongo {
Status RocksSortedDataImpl::dupKeyCheck(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) {
+ const RecordId& loc) {
boost::scoped_ptr<SortedDataInterface::Cursor> cursor(newCursor(txn, 1));
- cursor->locate(key, DiskLoc(0, 0));
+ cursor->locate(key, RecordId(0, 0));
- if (cursor->isEOF() || cursor->getKey() != key || cursor->getDiskLoc() == loc) {
+ if (cursor->isEOF() || cursor->getKey() != key || cursor->getRecordId() == loc) {
return Status::OK();
} else {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key));
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
index accd8b25abc..404e57b4f12 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
@@ -50,16 +50,16 @@ namespace mongo {
class RocksSortedDataBuilderImpl : public SortedDataBuilderInterface {
public:
- virtual Status addKey(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
};
/**
* Rocks implementation of the SortedDataInterface. Each index is stored as a single column
- * family. Each mapping from a BSONObj to a DiskLoc is stored as the key of a key-value pair
+ * family. Each mapping from a BSONObj to a RecordId is stored as the key of a key-value pair
* in the column family. Consequently, each value in the database is simply an empty string.
* This is done because RocksDB only supports unique keys, and because RocksDB can take a custom
* comparator to use when ordering keys. We use a custom comparator which orders keys based
- * first upon the BSONObj in the key, and uses the DiskLoc as a tiebreaker.
+ * first upon the BSONObj in the key, and uses the RecordId as a tiebreaker.
*/
class RocksSortedDataImpl : public SortedDataInterface {
MONGO_DISALLOW_COPYING( RocksSortedDataImpl );
@@ -71,13 +71,13 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
- virtual void unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& loc,
+ virtual void unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
bool dupsAllowed);
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc);
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
virtual void fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
BSONObjBuilder* output) const;
@@ -101,7 +101,7 @@ namespace mongo {
static rocksdb::Comparator* newRocksComparator( const Ordering& order );
private:
- typedef DiskLoc RecordId;
+ typedef RecordId RecordId;
rocksdb::DB* _db; // not owned
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index 1fecf2591f0..d4c9c7be9c6 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -82,7 +82,7 @@ namespace mongo {
bool dupsAllowed) = 0;
/**
- * Insert an entry into the index with the specified key and DiskLoc.
+ * Insert an entry into the index with the specified key and RecordId.
*
* @param txn the transaction under which the insert takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
@@ -91,15 +91,15 @@ namespace mongo {
* @return Status::OK() if the insert succeeded,
*
* ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
- * at a DiskLoc other than 'loc' and duplicates were not allowed
+ * at a RecordId other than 'loc' and duplicates were not allowed
*/
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) = 0;
/**
- * Remove the entry from the index with the specified key and DiskLoc.
+ * Remove the entry from the index with the specified key and RecordId.
*
* @param txn the transaction under which the remove takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
@@ -107,12 +107,12 @@ namespace mongo {
*/
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) = 0;
/**
* Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
- * index at a DiskLoc other than 'loc', and Status::OK() otherwise.
+ * index at a RecordId other than 'loc', and Status::OK() otherwise.
*
* @param txn the transaction under which this operation takes place
*
@@ -120,7 +120,7 @@ namespace mongo {
*/
virtual Status dupKeyCheck(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) = 0;
+ const RecordId& loc) = 0;
//
// Information about the tree
@@ -201,7 +201,7 @@ namespace mongo {
/**
* Return true if 'this' cursor and the 'other' cursor are positioned at
- * the same key and DiskLoc, or if both cursors are at EOF. Otherwise,
+ * the same key and RecordId, or if both cursors are at EOF. Otherwise,
* this function returns false.
*
* Implementations should prohibit the comparison of cursors associated
@@ -214,17 +214,17 @@ namespace mongo {
* be forwarded to all Cursors over that SortedData.
* TODO something better.
*/
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) = 0;
+ virtual void aboutToDeleteBucket(const RecordId& bucket) = 0;
/**
* Position 'this' forward (reverse) cursor either at the entry or
- * immediately after (or immediately before) the specified key and DiskLoc.
+ * immediately after (or immediately before) the specified key and RecordId.
* The cursor should be positioned at EOF if no such entry exists.
*
- * @return true if the entry (key, DiskLoc) exists within the index,
+ * @return true if the entry (key, RecordId) exists within the index,
* and false otherwise
*/
- virtual bool locate(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual bool locate(const BSONObj& key, const RecordId& loc) = 0;
/**
* Position 'this' forward (reverse) cursor either at the next
@@ -298,9 +298,9 @@ namespace mongo {
virtual BSONObj getKey() const = 0;
/**
- * Return the DiskLoc associated with the current position of 'this' cursor.
+ * Return the RecordId associated with the current position of 'this' cursor.
*/
- virtual DiskLoc getDiskLoc() const = 0;
+ virtual RecordId getRecordId() const = 0;
/**
* Position 'this' forward (reverse) cursor at the next (preceding) entry
@@ -313,7 +313,7 @@ namespace mongo {
//
/**
- * Save the entry in the index (i.e. its key and DiskLoc) of where
+ * Save the entry in the index (i.e. its key and RecordId) of where
* 'this' cursor is currently positioned.
*
* Implementations can assume that no operations other than delete
@@ -326,7 +326,7 @@ namespace mongo {
* Restore 'this' cursor to the previously saved entry in the index.
*
* Implementations should have the same behavior as calling locate()
- * with the saved key and DiskLoc.
+ * with the saved key and RecordId.
*/
virtual void restorePosition(OperationContext* txn) = 0;
};
@@ -369,7 +369,7 @@ namespace mongo {
* 'key' must be > or >= the last key passed to this function (depends on _dupsAllowed). If
* this is violated an error Status (ErrorCodes::InternalError) will be returned.
*/
- virtual Status addKey(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
/**
* Do any necessary work to finish building the tree.
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
index 87583fdcdd8..c6adfd3de3f 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
@@ -73,7 +73,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
ASSERT( cursor->isEOF() );
// Cursor at EOF should remain at EOF when advanced
@@ -96,7 +96,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
ASSERT( cursor->isEOF() );
// Cursor at EOF should remain at EOF when advanced
@@ -122,7 +122,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -136,11 +136,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
}
ASSERT( cursor->isEOF() );
@@ -168,7 +168,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -182,11 +182,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
}
ASSERT( cursor->isEOF() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
index 3f70c28ce1a..da78e71e595 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
@@ -39,7 +39,7 @@ namespace mongo {
// using a forward cursor by specifying their exact key. When
// advanceTo() is called on a duplicate key, the cursor is
// positioned at the next occurrence of that key in ascending
- // order by DiskLoc.
+ // order by RecordId.
TEST( SortedDataInterface, AdvanceTo ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -73,7 +73,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -83,7 +83,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first occurrence of key in index
// when advanceTo() called on duplicate key
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -92,7 +92,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -101,7 +101,7 @@ namespace mongo {
cursor->advanceTo( key3, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
}
{
@@ -118,7 +118,7 @@ namespace mongo {
// using a reverse cursor by specifying their exact key. When
// advanceTo() is called on a duplicate key, the cursor is
// positioned at the next occurrence of that key in descending
- // order by DiskLoc.
+ // order by RecordId.
TEST( SortedDataInterface, AdvanceToReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -152,7 +152,7 @@ namespace mongo {
ASSERT( cursor->locate( key3, loc5 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -162,7 +162,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last occurrence of key in index
// when advanceTo() called on duplicate key
// ASSERT_EQUALS( key3, cursor->getKey() );
- // ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -171,7 +171,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -180,7 +180,7 @@ namespace mongo {
cursor->advanceTo( key1, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
{
@@ -226,7 +226,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -236,7 +236,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first key in index
// when advanceTo() called with key smaller than any entry
// ASSERT_EQUALS( key2, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
}
@@ -246,7 +246,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -256,7 +256,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first key in index
// when advanceTo() called with key smaller than any entry
// ASSERT_EQUALS( key2, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
}
}
@@ -294,7 +294,7 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -304,7 +304,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last key in index
// when advanceTo() called with key larger than any entry
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
@@ -314,7 +314,7 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -324,7 +324,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last key in index
// when advanceTo() called with key larger than any entry
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
@@ -363,7 +363,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -382,7 +382,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -428,7 +428,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -447,7 +447,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -496,7 +496,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -504,7 +504,7 @@ namespace mongo {
cursor->advanceTo( key1, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -513,7 +513,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
}
{
@@ -573,7 +573,7 @@ namespace mongo {
ASSERT( cursor->locate( key3, loc5 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -581,7 +581,7 @@ namespace mongo {
cursor->advanceTo( key3, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -590,7 +590,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
{
@@ -649,7 +649,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -661,7 +661,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -674,7 +674,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
}
@@ -715,7 +715,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -727,7 +727,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -740,7 +740,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
@@ -783,7 +783,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -796,7 +796,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -810,7 +810,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
@@ -820,7 +820,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -833,7 +833,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
}
@@ -876,7 +876,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -889,7 +889,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -903,7 +903,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
@@ -913,7 +913,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -926,7 +926,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
index b1ffe0ff3bf..5c21f00616e 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
@@ -36,7 +36,7 @@
namespace mongo {
// Insert a key and try to locate it using a forward cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, Locate ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -62,7 +62,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -70,7 +70,7 @@ namespace mongo {
}
// Insert a key and try to locate it using a reverse cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -96,7 +96,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -104,7 +104,7 @@ namespace mongo {
}
// Insert a compound key and try to locate it using a forward cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateCompoundKey ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -130,7 +130,7 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -138,7 +138,7 @@ namespace mongo {
}
// Insert a compound key and try to locate it using a reverse cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateCompoundKeyReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -164,7 +164,7 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -172,7 +172,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a forward cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultiple ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -199,11 +199,11 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -224,26 +224,26 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -251,7 +251,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a reverse cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -278,11 +278,11 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -303,26 +303,26 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
ASSERT( cursor->locate( key3, loc3 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -330,7 +330,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleCompoundKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -358,15 +358,15 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -388,23 +388,23 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -412,7 +412,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleCompoundKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -440,15 +440,15 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey2b, loc3 ) );
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -470,23 +470,23 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey3a, loc5 ) );
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -494,7 +494,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a forward cursor
- // by specifying either a smaller key or DiskLoc.
+ // by specifying either a smaller key or RecordId.
TEST( SortedDataInterface, LocateIndirect ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -519,9 +519,9 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( key1, maxDiskLoc ) );
+ ASSERT( !cursor->locate( key1, RecordId::max() ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -540,17 +540,17 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( key1, minDiskLoc ) );
+ ASSERT( !cursor->locate( key1, RecordId::min() ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -558,7 +558,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a reverse cursor
- // by specifying either a larger key or DiskLoc.
+ // by specifying either a larger key or RecordId.
TEST( SortedDataInterface, LocateIndirectReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -583,9 +583,9 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( key2, minDiskLoc ) );
+ ASSERT( !cursor->locate( key2, RecordId::min() ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -604,17 +604,17 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( key3, maxDiskLoc ) );
+ ASSERT( !cursor->locate( key3, RecordId::max() ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -622,7 +622,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying either a smaller key or DiskLoc.
+ // by specifying either a smaller key or RecordId.
TEST( SortedDataInterface, LocateIndirectCompoundKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -648,13 +648,13 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( compoundKey1a, maxDiskLoc ) );
+ ASSERT( !cursor->locate( compoundKey1a, RecordId::max() ) );
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -676,11 +676,11 @@ namespace mongo {
ASSERT( !cursor->locate( compoundKey2a, loc1 ) );
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -688,7 +688,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying either a larger key or DiskLoc.
+ // by specifying either a larger key or RecordId.
TEST( SortedDataInterface, LocateIndirectCompoundKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -714,13 +714,13 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( compoundKey2b, minDiskLoc ) );
+ ASSERT( !cursor->locate( compoundKey2b, RecordId::min() ) );
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -742,15 +742,15 @@ namespace mongo {
ASSERT( !cursor->locate( compoundKey1d, loc1 ) );
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -772,7 +772,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSONObj(), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId::min() ) );
ASSERT( cursor->isEOF() );
}
}
@@ -792,7 +792,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSONObj(), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId::max() ) );
ASSERT( cursor->isEOF() );
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
index 8f9b8906e92..ba368de3e34 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
@@ -51,8 +51,8 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor1( sorted->newCursor( opCtx.get(), 1 ) );
scoped_ptr<SortedDataInterface::Cursor> cursor2( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor1->locate( minKey, minDiskLoc ) );
- ASSERT( !cursor2->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor1->locate( minKey, RecordId::min() ) );
+ ASSERT( !cursor2->locate( minKey, RecordId::min() ) );
ASSERT( cursor1->isEOF() );
ASSERT( cursor2->isEOF() );
ASSERT( cursor1->pointsToSamePlaceAs( *cursor2 ) );
@@ -76,8 +76,8 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor1( sorted->newCursor( opCtx.get(), -1 ) );
scoped_ptr<SortedDataInterface::Cursor> cursor2( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor1->locate( maxKey, maxDiskLoc ) );
- ASSERT( !cursor2->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor1->locate( maxKey, RecordId::max() ) );
+ ASSERT( !cursor2->locate( maxKey, RecordId::max() ) );
ASSERT( cursor1->isEOF() );
ASSERT( cursor2->isEOF() );
ASSERT( cursor1->pointsToSamePlaceAs( *cursor2 ) );
@@ -258,7 +258,7 @@ namespace mongo {
}
// Verify that two forward cursors positioned at a duplicate key, but with
- // different DiskLocs are not considered to point to the same place.
+ // different RecordIds are not considered to point to the same place.
TEST( SortedDataInterface, CursorsPointToDifferentDiskLocs ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -296,7 +296,7 @@ namespace mongo {
}
// Verify that two reverse cursors positioned at a duplicate key, but with
- // different DiskLocs are not considered to point to the same place.
+ // different RecordIds are not considered to point to the same place.
TEST( SortedDataInterface, CursorsPointToDifferentDiskLocsReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -410,7 +410,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -424,7 +424,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT( cursor->pointsToSamePlaceAs( *cursor ) );
@@ -450,7 +450,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -464,7 +464,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT( cursor->pointsToSamePlaceAs( *cursor ) );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index 36ee79d930b..2031e1cf278 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -53,7 +53,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -67,11 +67,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -98,7 +98,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -112,11 +112,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -127,7 +127,7 @@ namespace mongo {
// Insert the same key multiple times and try to iterate through each
// occurrence using a forward cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the DiskLoc is saved
+ // restorePosition() in succession. Verify that the RecordId is saved
// as part of the current position of the cursor.
TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -143,7 +143,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
uow.commit();
}
@@ -157,11 +157,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -172,7 +172,7 @@ namespace mongo {
// Insert the same key multiple times and try to iterate through each
// occurrence using a reverse cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the DiskLoc is saved
+ // restorePosition() in succession. Verify that the RecordId is saved
// as part of the current position of the cursor.
TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -188,7 +188,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
uow.commit();
}
@@ -202,11 +202,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
index b160ae9b56e..a8644536ea2 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
@@ -36,7 +36,7 @@
namespace mongo {
// Insert a key and verify that dupKeyCheck() returns a non-OK status for
- // the same key. When dupKeyCheck() is called with the exact (key, DiskLoc)
+ // the same key. When dupKeyCheck() is called with the exact (key, RecordId)
// pair that was inserted, it should still return an OK status.
TEST( SortedDataInterface, DupKeyCheckAfterInsert ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -66,7 +66,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
ASSERT_OK( sorted->dupKeyCheck( opCtx.get(), key1, loc1 ) );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, minDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
uow.commit();
}
}
@@ -94,7 +94,7 @@ namespace mongo {
}
// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a DiskLoc that comes after the one specified.
+ // when the insert key is located at a RecordId that comes after the one specified.
TEST( SortedDataInterface, DupKeyCheckWhenDiskLocBefore ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -122,14 +122,14 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, minDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
uow.commit();
}
}
}
// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a DiskLoc that comes before the one specified.
+ // when the insert key is located at a RecordId that comes before the one specified.
TEST( SortedDataInterface, DupKeyCheckWhenDiskLocAfter ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -157,7 +157,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, maxDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::max() ) );
uow.commit();
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
index a1cd7669a31..703d281b520 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
@@ -52,7 +52,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index a0bd37aa780..be03fbcc917 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -43,7 +43,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), true );
uow.commit();
}
}
@@ -52,7 +52,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 6, 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 6, 2 ), true );
uow.commit();
}
}
@@ -75,7 +75,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -84,7 +84,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
uow.commit();
}
}
@@ -103,7 +103,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -112,7 +112,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
// no commit
}
}
@@ -131,7 +131,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), false );
uow.commit();
}
}
@@ -140,7 +140,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 20 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 20 ), false );
uow.commit();
}
}
@@ -160,7 +160,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), false );
uow.commit();
}
}
@@ -169,7 +169,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 4 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 4 ), false );
uow.commit();
}
}
@@ -189,7 +189,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -203,7 +203,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
uow.commit();
}
@@ -218,7 +218,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 18 ), true );
ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
uow.commit();
}
@@ -234,7 +234,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
ASSERT( sorted->isEmpty( opCtx.get() ) );
uow.commit();
}
@@ -255,7 +255,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -269,7 +269,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
ASSERT( sorted->isEmpty( opCtx.get() ) );
// no commit
}
@@ -292,7 +292,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true ) );
uow.commit();
}
}
@@ -300,10 +300,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
@@ -324,7 +324,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true );
uow.commit();
}
}
@@ -332,10 +332,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
@@ -358,7 +358,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 5 ), DiskLoc( 5, i * 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 5 ), RecordId( 5, i * 2 ), true );
uow.commit();
}
}
@@ -366,10 +366,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
n++;
cursor->advance();
@@ -387,7 +387,7 @@ namespace mongo {
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
BSONObj key = BSON( "" << 1 );
- DiskLoc loc( 5, 16 );
+ RecordId loc( 5, 16 );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -410,7 +410,7 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
ASSERT( cursor->locate( key, loc ) );
ASSERT_EQUALS( key, cursor->getKey() );
- ASSERT_EQUALS( loc, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc, cursor->getRecordId() );
}
}
@@ -423,9 +423,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
uow.commit();
}
}
@@ -433,14 +433,14 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "a" << 2 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -456,9 +456,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
uow.commit();
}
}
@@ -466,16 +466,16 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId(0,0) ) );
ASSERT( cursor->isEOF() );
}
@@ -492,47 +492,47 @@ namespace mongo {
if ( i == 6 )
continue;
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc(1,i*2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), RecordId(1,i*2), true ) );
uow.commit();
}
}
scoped_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 7 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId::max() ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId::min() ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- cursor->locate( BSON( "" << 6 ), maxDiskLoc );
+ cursor->locate( BSON( "" << 6 ), RecordId::max() );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- cursor->locate( BSON( "" << 500 ), maxDiskLoc );
+ cursor->locate( BSON( "" << 500 ), RecordId::max() );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 9 ), cursor->getKey() );
cursor->advance();
@@ -549,10 +549,10 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,6), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,8), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,8), true ) );
uow.commit();
}
}
@@ -560,18 +560,18 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 1 ), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), RecordId::min() ) );
ASSERT( !cursor->isEOF() );
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,8), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,8), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -580,16 +580,16 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 1 ), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), RecordId::max() ) );
ASSERT( !cursor->isEOF() );
ASSERT( cursor->getDirection() == -1 );
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index f059cc114ec..3b830e1a313 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -55,14 +55,14 @@ namespace mongo {
const BSONObj compoundKey3b = BSON( "" << 3 << "" << "b" );
const BSONObj compoundKey3c = BSON( "" << 3 << "" << "c" );
- const DiskLoc loc1( 10, 42 );
- const DiskLoc loc2( 10, 44 );
- const DiskLoc loc3( 10, 46 );
- const DiskLoc loc4( 10, 48 );
- const DiskLoc loc5( 10, 50 );
- const DiskLoc loc6( 10, 52 );
- const DiskLoc loc7( 10, 54 );
- const DiskLoc loc8( 10, 56 );
+ const RecordId loc1( 10, 42 );
+ const RecordId loc2( 10, 44 );
+ const RecordId loc3( 10, 46 );
+ const RecordId loc4( 10, 48 );
+ const RecordId loc5( 10, 50 );
+ const RecordId loc6( 10, 52 );
+ const RecordId loc7( 10, 54 );
+ const RecordId loc8( 10, 56 );
class RecoveryUnit;
class SortedDataInterface;
diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
index 03e2f98c485..2ae1675ca74 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
@@ -85,7 +85,7 @@ namespace mongo {
}
}
- // Insert multiple, distinct keys at the same DiskLoc and verify that the
+ // Insert multiple, distinct keys at the same RecordId and verify that the
// number of entries in the index equals the number that were inserted, even
// when duplicates are not allowed.
TEST( SortedDataInterface, InsertSameDiskLoc ) {
@@ -127,7 +127,7 @@ namespace mongo {
}
}
- // Insert multiple, distinct keys at the same DiskLoc and verify that the
+ // Insert multiple, distinct keys at the same RecordId and verify that the
// number of entries in the index equals the number that were inserted, even
// when duplicates are allowed.
TEST( SortedDataInterface, InsertSameDiskLocWithDupsAllowed ) {
diff --git a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
index 30c4ee75569..f11bbe3b49b 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
@@ -69,7 +69,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index c240ef94fe5..108a691a822 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -73,38 +73,38 @@ namespace {
/**
* Constructs an IndexKeyEntry from a slice containing the bytes of a BSONObject followed
- * by the bytes of a DiskLoc
+ * by the bytes of a RecordId
*/
static IndexKeyEntry makeIndexKeyEntry(const WT_ITEM *keyCols) {
const char* data = reinterpret_cast<const char*>( keyCols->data );
BSONObj key( data );
if ( keyCols->size == static_cast<size_t>( key.objsize() ) ) {
// in unique mode
- return IndexKeyEntry( key, DiskLoc() );
+ return IndexKeyEntry( key, RecordId() );
}
- invariant( keyCols->size == key.objsize() + sizeof(DiskLoc) );
- DiskLoc loc = reinterpret_cast<const DiskLoc*>( data + key.objsize() )[0];
+ invariant( keyCols->size == key.objsize() + sizeof(RecordId) );
+ RecordId loc = reinterpret_cast<const RecordId*>( data + key.objsize() )[0];
return IndexKeyEntry( key, loc );
}
- WiredTigerItem _toItem( const BSONObj& key, const DiskLoc& loc,
+ WiredTigerItem _toItem( const BSONObj& key, const RecordId& loc,
boost::scoped_array<char>*out ) {
- size_t keyLen = key.objsize() + sizeof(DiskLoc);
+ size_t keyLen = key.objsize() + sizeof(RecordId);
out->reset( new char[keyLen] );
memcpy( out->get(), key.objdata(), key.objsize() );
- memcpy( out->get() + key.objsize(), reinterpret_cast<const char*>(&loc), sizeof(DiskLoc) );
+ memcpy( out->get() + key.objsize(), reinterpret_cast<const char*>(&loc), sizeof(RecordId) );
return WiredTigerItem( out->get(), keyLen );
}
- DiskLoc _toDiskLoc( const WT_ITEM& item ) {
- DiskLoc l;
- memcpy( &l, item.data, sizeof(DiskLoc) );
+ RecordId _toDiskLoc( const WT_ITEM& item ) {
+ RecordId l;
+ memcpy( &l, item.data, sizeof(RecordId) );
return l;
}
/**
- * Custom comparator used to compare Index Entries by BSONObj and DiskLoc
+ * Custom comparator used to compare Index Entries by BSONObj and RecordId
*/
struct WiredTigerIndexCollator : public WT_COLLATOR {
public:
@@ -223,7 +223,7 @@ namespace {
Status WiredTigerIndex::insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -241,7 +241,7 @@ namespace {
void WiredTigerIndex::unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -257,7 +257,7 @@ namespace {
void WiredTigerIndex::fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
BSONObjBuilder* output) const {
IndexCursor cursor(*this, txn, true );
- cursor.locate( minKey, minDiskLoc );
+ cursor.locate( minKey, RecordId::min() );
long long count = 0;
while ( !cursor.isEOF() ) {
cursor.advance();
@@ -286,7 +286,7 @@ namespace {
Status WiredTigerIndex::dupKeyCheck( OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) {
+ const RecordId& loc) {
invariant(!hasFieldNames(key));
invariant(unique());
@@ -316,7 +316,7 @@ namespace {
_uri ) );
}
- bool WiredTigerIndex::isDup(WT_CURSOR *c, const BSONObj& key, const DiskLoc& loc ) {
+ bool WiredTigerIndex::isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc ) {
invariant( unique() );
// First check whether the key exists.
WiredTigerItem item( key.objdata(), key.objsize() );
@@ -328,7 +328,7 @@ namespace {
WT_ITEM value;
invariantWTOK( c->get_value(c,&value) );
- DiskLoc found = _toDiskLoc( value );
+ RecordId found = _toDiskLoc( value );
return found != loc;
}
@@ -399,7 +399,7 @@ namespace {
: BulkBuilder(idx, txn) {
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
+ Status addKey(const BSONObj& key, const RecordId& loc) {
{
const Status s = checkKeySize(key);
if (!s.isOK())
@@ -407,10 +407,10 @@ namespace {
}
// Build a buffer with the key and loc concatenated.
- const size_t keyLen = key.objsize() + sizeof(DiskLoc);
+ const size_t keyLen = key.objsize() + sizeof(RecordId);
invariant(keyLen <= kBufferSize);
memcpy(_buffer, key.objdata(), key.objsize());
- memcpy(_buffer + key.objsize(), &loc, sizeof(DiskLoc));
+ memcpy(_buffer + key.objsize(), &loc, sizeof(RecordId));
WiredTigerItem item(_buffer, keyLen);
// Can't use WiredTigerCursor since we aren't using the cache.
@@ -431,7 +431,7 @@ namespace {
private:
// Will need to support dynamic sizing if we remove TempKeyMaxSize.
- static const size_t kBufferSize = TempKeyMaxSize + sizeof(DiskLoc);
+ static const size_t kBufferSize = TempKeyMaxSize + sizeof(RecordId);
char _buffer[kBufferSize];
};
@@ -449,7 +449,7 @@ namespace {
: BulkBuilder(idx, txn), _dupsAllowed(dupsAllowed) {
}
- Status addKey(const BSONObj& newKey, const DiskLoc& loc) {
+ Status addKey(const BSONObj& newKey, const RecordId& loc) {
{
const Status s = checkKeySize(newKey);
if (!s.isOK())
@@ -500,7 +500,7 @@ namespace {
_cursor->set_key(_cursor, keyItem.Get());
invariant(_locs.size() > 0);
- WiredTigerItem valueItem(&_locs.front(), _locs.size() * sizeof(DiskLoc));
+ WiredTigerItem valueItem(&_locs.front(), _locs.size() * sizeof(RecordId));
_cursor->set_value(_cursor, valueItem.Get());
invariantWTOK(_cursor->insert(_cursor));
@@ -511,7 +511,7 @@ namespace {
const bool _dupsAllowed;
BSONObj _key;
- std::vector<DiskLoc> _locs;
+ std::vector<RecordId> _locs;
};
SortedDataBuilderInterface* WiredTigerIndex::getBulkBuilder( OperationContext* txn,
@@ -553,26 +553,26 @@ namespace {
else if ( _eof || other._eof )
return false;
- if ( getDiskLoc() != other.getDiskLoc() )
+ if ( getRecordId() != other.getRecordId() )
return false;
return getKey() == other.getKey();
}
- void WiredTigerIndex::IndexCursor::aboutToDeleteBucket(const DiskLoc& bucket) {
+ void WiredTigerIndex::IndexCursor::aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- bool WiredTigerIndex::IndexCursor::_locate(const BSONObj &key, const DiskLoc& loc) {
+ bool WiredTigerIndex::IndexCursor::_locate(const BSONObj &key, const RecordId& loc) {
_uniqueLen = -1;
WT_CURSOR *c = _cursor.get();
- DiskLoc searchLoc = loc;
+ RecordId searchLoc = loc;
// Null cursors should start at the zero key to maintain search ordering in the
// collator.
// Reverse cursors should start on the last matching key.
if (loc.isNull())
- searchLoc = _forward ? DiskLoc(0, 0) : DiskLoc(INT_MAX, INT_MAX);
+ searchLoc = _forward ? RecordId(0, 0) : RecordId(INT_MAX, INT_MAX);
boost::scoped_array<char> data;
WiredTigerItem myKey = _toItem( key, searchLoc, &data );
@@ -613,20 +613,20 @@ namespace {
return true;
}
- // we're looking for a specific DiskLoc, lets see if we can find
+ // we're looking for a specific RecordId, lets see if we can find
WT_ITEM item;
invariantWTOK( c->get_value(c, &item ) );
- _uniqueLen = item.size / sizeof(DiskLoc);
+ _uniqueLen = item.size / sizeof(RecordId);
invariant( _uniqueLen > 0 );
if ( _forward ) {
_uniquePos = 0;
for ( ; _uniquePos < _uniqueLen; _uniquePos++ ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( temp == loc )
break;
@@ -637,10 +637,10 @@ namespace {
else {
_uniquePos = _uniqueLen-1;
for ( ; _uniquePos >= 0; _uniquePos-- ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( temp == loc )
break;
@@ -658,12 +658,12 @@ namespace {
return true;
}
- bool WiredTigerIndex::IndexCursor::locate(const BSONObj &key, const DiskLoc& loc) {
+ bool WiredTigerIndex::IndexCursor::locate(const BSONObj &key, const RecordId& loc) {
const BSONObj finalKey = stripFieldNames(key);
bool result = _locate(finalKey, loc);
// An explicit search at the start of the range should always return false
- if (loc == minDiskLoc || loc == maxDiskLoc )
+ if (loc == RecordId::min() || loc == RecordId::max() )
return false;
return result;
}
@@ -678,7 +678,7 @@ namespace {
keyBegin, keyBeginLen,
afterKey, keyEnd, keyEndInclusive, getDirection() );
- _locate(key, DiskLoc());
+ _locate(key, RecordId());
}
void WiredTigerIndex::IndexCursor::customLocate(const BSONObj& keyBegin,
@@ -697,9 +697,9 @@ namespace {
return makeIndexKeyEntry(&keyItem).key;
}
- DiskLoc WiredTigerIndex::IndexCursor::getDiskLoc() const {
+ RecordId WiredTigerIndex::IndexCursor::getRecordId() const {
if ( _eof )
- return DiskLoc();
+ return RecordId();
WT_CURSOR *c = _cursor.get();
WT_ITEM item;
@@ -707,12 +707,12 @@ namespace {
invariantWTOK( c->get_value(c, &item ) );
if ( _uniqueLen == -1 ) {
// first time at this spot
- _uniqueLen = item.size / sizeof(DiskLoc);
+ _uniqueLen = item.size / sizeof(RecordId);
invariant( _uniqueLen > 0 );
_uniquePos = 0;
}
- DiskLoc loc;
+ RecordId loc;
int posToUse = _uniquePos;
if ( !_forward )
posToUse = _uniqueLen - 1 - _uniquePos;
@@ -720,8 +720,8 @@ namespace {
memcpy( &loc,
- reinterpret_cast<const char*>(item.data) + ( posToUse * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( posToUse * sizeof(RecordId) ),
+ sizeof(RecordId) );
invariant( posToUse >= 0 && posToUse < _uniqueLen );
@@ -739,7 +739,7 @@ namespace {
if ( _idx.unique() ) {
if ( _uniqueLen == -1 ) {
// we need to investigate
- getDiskLoc();
+ getRecordId();
}
_uniquePos++; // advance
@@ -768,7 +768,7 @@ namespace {
if ( !wt_keeptxnopen() && !_eof ) {
_savedKey = getKey().getOwned();
- _savedLoc = getDiskLoc();
+ _savedLoc = getRecordId();
_cursor.reset();
}
@@ -794,7 +794,7 @@ namespace {
Status WiredTigerIndexUnique::_insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
WiredTigerItem keyItem( key.objdata(), key.objsize() );
@@ -821,14 +821,14 @@ namespace {
WT_ITEM old;
invariantWTOK( c->get_value(c, &old ) );
- std::set<DiskLoc> all;
+ std::set<RecordId> all;
// see if its already in the array
- for ( size_t i = 0; i < (old.size/sizeof(DiskLoc)); i++ ) {
- DiskLoc temp;
+ for ( size_t i = 0; i < (old.size/sizeof(RecordId)); i++ ) {
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>( old.data ) + ( i * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>( old.data ) + ( i * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( loc == temp )
return Status::OK();
all.insert( temp );
@@ -841,14 +841,14 @@ namespace {
all.insert( loc );
// not in the array, add it to the back
- size_t newSize = all.size() * sizeof(DiskLoc);
+ size_t newSize = all.size() * sizeof(RecordId);
boost::scoped_array<char> bigger( new char[newSize] );
size_t offset = 0;
- for ( std::set<DiskLoc>::const_iterator it = all.begin(); it != all.end(); ++it ) {
- DiskLoc dl = *it;
- memcpy( bigger.get() + offset, &dl, sizeof(DiskLoc) );
- offset += sizeof(DiskLoc);
+ for ( std::set<RecordId>::const_iterator it = all.begin(); it != all.end(); ++it ) {
+ RecordId dl = *it;
+ memcpy( bigger.get() + offset, &dl, sizeof(RecordId) );
+ offset += sizeof(RecordId);
}
valueItem = WiredTigerItem( bigger.get(), newSize );
@@ -858,7 +858,7 @@ namespace {
void WiredTigerIndexUnique::_unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
WiredTigerItem keyItem( key.objdata(), key.objsize() );
c->set_key( c, keyItem.Get() );
@@ -873,7 +873,7 @@ namespace {
return;
}
- // ups are allowed, so we have to deal with a vector of DiskLoc
+ // ups are allowed, so we have to deal with a vector of RecordId
int ret = c->search(c);
if ( ret == WT_NOTFOUND )
@@ -884,17 +884,17 @@ namespace {
invariantWTOK( c->get_value(c, &old ) );
// see if its in the array
- size_t num = old.size / sizeof(DiskLoc);
+ size_t num = old.size / sizeof(RecordId);
for ( size_t i = 0; i < num; i++ ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>( old.data ) + ( i * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>( old.data ) + ( i * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( loc != temp )
continue;
// we found it, now lets re-save array without it
- size_t newSize = old.size - sizeof(DiskLoc);
+ size_t newSize = old.size - sizeof(RecordId);
if ( newSize == 0 ) {
// nothing left, just delete entry
@@ -903,11 +903,11 @@ namespace {
}
boost::scoped_array<char> smaller( new char[newSize] );
- size_t offset = i * sizeof(DiskLoc);
+ size_t offset = i * sizeof(RecordId);
memcpy( smaller.get(), old.data, offset );
memcpy( smaller.get() + offset,
- reinterpret_cast<const char*>( old.data ) + offset + sizeof(DiskLoc),
- old.size - sizeof(DiskLoc) - offset );
+ reinterpret_cast<const char*>( old.data ) + offset + sizeof(RecordId),
+ old.size - sizeof(RecordId) - offset );
WiredTigerItem valueItem = WiredTigerItem( smaller.get(), newSize );
c->set_value( c, valueItem.Get() );
invariantWTOK( c->update( c ) );
@@ -923,7 +923,7 @@ namespace {
Status WiredTigerIndexStandard::_insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant( dupsAllowed );
@@ -943,7 +943,7 @@ namespace {
void WiredTigerIndexStandard::_unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant( dupsAllowed );
boost::scoped_array<char> data;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 53c280aafcb..d97749334df 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -59,24 +59,24 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
virtual void fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
BSONObjBuilder* output) const;
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc);
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
virtual bool isEmpty(OperationContext* txn);
virtual long long getSpaceUsedBytes( OperationContext* txn ) const;
- bool isDup(WT_CURSOR *c, const BSONObj& key, const DiskLoc& loc );
+ bool isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc );
virtual SortedDataInterface::Cursor* newCursor(
OperationContext* txn, int direction) const;
@@ -93,12 +93,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) = 0;
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) = 0;
class BulkBuilder;
@@ -119,9 +119,9 @@ namespace mongo {
virtual bool pointsToSamePlaceAs(const SortedDataInterface::Cursor &genother) const;
- virtual void aboutToDeleteBucket(const DiskLoc& bucket);
+ virtual void aboutToDeleteBucket(const RecordId& bucket);
- virtual bool locate(const BSONObj &key, const DiskLoc& loc);
+ virtual bool locate(const BSONObj &key, const RecordId& loc);
virtual void customLocate(const BSONObj& keyBegin,
int keyBeginLen,
@@ -137,7 +137,7 @@ namespace mongo {
virtual BSONObj getKey() const;
- virtual DiskLoc getDiskLoc() const;
+ virtual RecordId getRecordId() const;
virtual void advance();
@@ -146,7 +146,7 @@ namespace mongo {
virtual void restorePosition( OperationContext *txn );
private:
- bool _locate(const BSONObj &key, const DiskLoc& loc);
+ bool _locate(const BSONObj &key, const RecordId& loc);
OperationContext *_txn;
WiredTigerCursor _cursor;
@@ -160,7 +160,7 @@ namespace mongo {
// For save/restorePosition check
RecoveryUnit* _savedForCheck;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
const Ordering _ordering;
@@ -177,12 +177,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
};
@@ -194,12 +194,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 257265d41ec..7d39bf51dd4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -93,7 +93,7 @@ namespace {
class CappedInsertChange : public RecoveryUnit::Change {
public:
- CappedInsertChange( WiredTigerRecordStore* rs, const DiskLoc& loc )
+ CappedInsertChange( WiredTigerRecordStore* rs, const RecordId& loc )
: _rs( rs ), _loc( loc ) {
}
@@ -107,7 +107,7 @@ namespace {
private:
WiredTigerRecordStore* _rs;
- DiskLoc _loc;
+ RecordId _loc;
};
} // namespace
@@ -193,19 +193,19 @@ namespace {
invariant(_cappedMaxDocs == -1);
}
- // Find the largest DiskLoc currently in use and estimate the number of records.
- scoped_ptr<RecordIterator> iterator( getIterator( ctx, DiskLoc(),
+ // Find the largest RecordId currently in use and estimate the number of records.
+ scoped_ptr<RecordIterator> iterator( getIterator( ctx, RecordId(),
CollectionScanParams::BACKWARD ) );
if ( iterator->isEOF() ) {
_dataSize.store(0);
_numRecords.store(0);
- // Need to start at 1 so we are always higher than minDiskLoc
+ // Need to start at 1 so we are always higher than RecordId::min()
_nextIdNum.store( 1 );
if ( sizeStorer )
_sizeStorer->onCreate( this, 0, 0 );
}
else {
- DiskLoc maxLoc = iterator->curr();
+ RecordId maxLoc = iterator->curr();
uint64_t max = _makeKey( maxLoc );
_oplog_highestSeen = maxLoc;
_nextIdNum.store( 1 + max );
@@ -226,7 +226,7 @@ namespace {
_dataSize.store(0);
while( !iterator->isEOF() ) {
- DiskLoc loc = iterator->getNext();
+ RecordId loc = iterator->getNext();
RecordData data = iterator->dataFor( loc );
_numRecords.fetchAndAdd(1);
_dataSize.fetchAndAdd(data.size());
@@ -314,7 +314,7 @@ namespace {
return RecordData(data.moveFrom(), value.size);
}
- RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const DiskLoc& loc) const {
+ RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
// ownership passes to the shared_array created below
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
@@ -322,14 +322,14 @@ namespace {
c->set_key(c, _makeKey(loc));
int ret = c->search(c);
massert( 28556,
- "Didn't find DiskLoc in WiredTigerRecordStore",
+ "Didn't find RecordId in WiredTigerRecordStore",
ret != WT_NOTFOUND );
invariantWTOK(ret);
return _getData(curwrap);
}
bool WiredTigerRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* out ) const {
+ const RecordId& loc, RecordData* out ) const {
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
invariant( c );
@@ -342,7 +342,7 @@ namespace {
return true;
}
- void WiredTigerRecordStore::deleteRecord( OperationContext* txn, const DiskLoc& loc ) {
+ void WiredTigerRecordStore::deleteRecord( OperationContext* txn, const RecordId& loc ) {
WiredTigerCursor cursor( _uri, _instanceId, txn );
WT_CURSOR *c = cursor.get();
c->set_key(c, _makeKey(loc));
@@ -380,7 +380,7 @@ namespace {
}
void WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
- const DiskLoc& justInserted ) {
+ const RecordId& justInserted ) {
if ( _isOplog ) {
if ( oplogCounter++ % 100 > 0 )
@@ -408,7 +408,7 @@ namespace {
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
int ret = c->next(c);
- DiskLoc oldest;
+ RecordId oldest;
while ( ret == 0 && cappedAndNeedDelete() ) {
WriteUnitOfWork wuow( txn );
@@ -459,23 +459,23 @@ namespace {
}
}
- StatusWith<DiskLoc> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data,
+ StatusWith<RecordId> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data,
int len) {
return oploghack::extractKey(data, len);
}
- StatusWith<DiskLoc> WiredTigerRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<DiskLoc>( ErrorCodes::BadValue,
+ return StatusWith<RecordId>( ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize" );
}
- DiskLoc loc;
+ RecordId loc;
if ( _useOplogHack ) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(data, len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -504,7 +504,7 @@ namespace {
c->set_value(c, value.Get());
int ret = c->insert(c);
if ( ret ) {
- return StatusWith<DiskLoc>( wtRCToStatus( ret,
+ return StatusWith<RecordId>( wtRCToStatus( ret,
"WiredTigerRecordStore::insertRecord" ) );
}
@@ -513,10 +513,10 @@ namespace {
cappedDeleteAsNeeded(txn, loc);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
- void WiredTigerRecordStore::dealtWithCappedLoc( const DiskLoc& loc ) {
+ void WiredTigerRecordStore::dealtWithCappedLoc( const RecordId& loc ) {
boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
SortedDiskLocs::iterator it = std::find(_uncommittedDiskLocs.begin(),
_uncommittedDiskLocs.end(),
@@ -525,7 +525,7 @@ namespace {
_uncommittedDiskLocs.erase(it);
}
- bool WiredTigerRecordStore::isCappedHidden( const DiskLoc& loc ) const {
+ bool WiredTigerRecordStore::isCappedHidden( const RecordId& loc ) const {
boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
if (_uncommittedDiskLocs.empty()) {
return false;
@@ -533,7 +533,7 @@ namespace {
return _uncommittedDiskLocs.front() <= loc;
}
- StatusWith<DiskLoc> WiredTigerRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
const int len = doc->documentSize();
@@ -544,8 +544,8 @@ namespace {
return insertRecord( txn, buf.get(), len, enforceQuota );
}
- StatusWith<DiskLoc> WiredTigerRecordStore::updateRecord( OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> WiredTigerRecordStore::updateRecord( OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -573,11 +573,11 @@ namespace {
cappedDeleteAsNeeded(txn, loc);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
Status WiredTigerRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damangeSource,
const mutablebson::DamageVector& damages ) {
@@ -620,7 +620,7 @@ namespace {
}
RecordIterator* WiredTigerRecordStore::getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir ) const {
if ( _isOplog && dir == CollectionScanParams::FORWARD ) {
WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
@@ -639,7 +639,7 @@ namespace {
// XXX do we want this to actually return a set of iterators?
std::vector<RecordIterator*> iterators;
- iterators.push_back( new Iterator(*this, txn, DiskLoc(),
+ iterators.push_back( new Iterator(*this, txn, RecordId(),
CollectionScanParams::FORWARD, true) );
return iterators;
@@ -649,7 +649,7 @@ namespace {
// TODO: use a WiredTiger fast truncate
boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
deleteRecord( txn, loc );
}
@@ -684,7 +684,7 @@ namespace {
++nrecords;
if ( full && scanData ) {
size_t dataSize;
- DiskLoc loc = iter->curr();
+ RecordId loc = iter->curr();
RecordData data = dataFor( txn, loc );
Status status = adaptor->validate( data, &dataSize );
if ( !status.isOK() ) {
@@ -760,7 +760,7 @@ namespace {
Status WiredTigerRecordStore::oplogDiskLocRegister( OperationContext* txn,
const OpTime& opTime ) {
- StatusWith<DiskLoc> loc = oploghack::keyForOptime( opTime );
+ StatusWith<RecordId> loc = oploghack::keyForOptime( opTime );
if ( !loc.isOK() )
return loc.getStatus();
@@ -770,7 +770,7 @@ namespace {
}
void WiredTigerRecordStore::_addUncommitedDiskLoc_inlock( OperationContext* txn,
- const DiskLoc& loc ) {
+ const RecordId& loc ) {
// todo: make this a dassert at some point
invariant( _uncommittedDiskLocs.empty() ||
_uncommittedDiskLocs.back() < loc );
@@ -779,10 +779,10 @@ namespace {
_oplog_highestSeen = loc;
}
- DiskLoc WiredTigerRecordStore::oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
+ RecordId WiredTigerRecordStore::oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
if (!_useOplogHack)
- return DiskLoc().setInvalid();
+ return RecordId().setInvalid();
{
WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
@@ -796,7 +796,7 @@ namespace {
c->set_key(c, _makeKey(startingPosition));
int ret = c->search_near(c, &cmp);
if (ret == 0 && cmp > 0) ret = c->prev(c); // landed one higher than startingPosition
- if (ret == WT_NOTFOUND) return DiskLoc(); // nothing <= startingPosition
+ if (ret == WT_NOTFOUND) return RecordId(); // nothing <= startingPosition
invariantWTOK(ret);
uint64_t key;
@@ -805,13 +805,13 @@ namespace {
return _fromKey(key);
}
- DiskLoc WiredTigerRecordStore::_nextId() {
+ RecordId WiredTigerRecordStore::_nextId() {
invariant(!_useOplogHack);
const uint64_t myId = _nextIdNum.fetchAndAdd(1);
int a = myId >> 32;
// This masks the lowest 4 bytes of myId
int ofs = myId & 0x00000000FFFFFFFF;
- DiskLoc loc( a, ofs );
+ RecordId loc( a, ofs );
return loc;
}
@@ -873,13 +873,13 @@ namespace {
}
}
- uint64_t WiredTigerRecordStore::_makeKey( const DiskLoc& loc ) {
+ uint64_t WiredTigerRecordStore::_makeKey( const RecordId& loc ) {
return ((uint64_t)loc.a() << 32 | loc.getOfs());
}
- DiskLoc WiredTigerRecordStore::_fromKey( uint64_t key ) {
+ RecordId WiredTigerRecordStore::_fromKey( uint64_t key ) {
uint32_t a = key >> 32;
uint32_t ofs = (uint32_t)key;
- return DiskLoc(a, ofs);
+ return RecordId(a, ofs);
}
// --------
@@ -887,7 +887,7 @@ namespace {
WiredTigerRecordStore::Iterator::Iterator(
const WiredTigerRecordStore& rs,
OperationContext *txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir,
bool forParallelCollectionScan)
: _rs( rs ),
@@ -904,7 +904,7 @@ namespace {
WiredTigerRecordStore::Iterator::~Iterator() {
}
- void WiredTigerRecordStore::Iterator::_locate(const DiskLoc &loc, bool exact) {
+ void WiredTigerRecordStore::Iterator::_locate(const RecordId &loc, bool exact) {
RS_ITERATOR_TRACE("_locate " << loc);
WT_CURSOR *c = _cursor->get();
invariant( c );
@@ -931,7 +931,7 @@ namespace {
ret = c->search_near(c, &cmp);
if ( ret == WT_NOTFOUND ) {
_eof = true;
- _loc = DiskLoc();
+ _loc = RecordId();
return;
}
invariantWTOK(ret);
@@ -958,10 +958,10 @@ namespace {
}
// Allow const functions to use curr to find current location.
- DiskLoc WiredTigerRecordStore::Iterator::_curr() const {
+ RecordId WiredTigerRecordStore::Iterator::_curr() const {
RS_ITERATOR_TRACE( "_curr" );
if (_eof)
- return DiskLoc();
+ return RecordId();
WT_CURSOR *c = _cursor->get();
dassert( c );
@@ -971,7 +971,7 @@ namespace {
return _fromKey(key);
}
- DiskLoc WiredTigerRecordStore::Iterator::curr() {
+ RecordId WiredTigerRecordStore::Iterator::curr() {
return _loc;
}
@@ -990,7 +990,7 @@ namespace {
_loc = _curr();
RS_ITERATOR_TRACE("_getNext " << ret << " " << _eof << " " << _loc );
if ( _rs._isCapped ) {
- DiskLoc loc = _curr();
+ RecordId loc = _curr();
if ( _readUntilForOplog.isNull() ) {
// this is the normal capped case
if ( _rs.isCappedHidden( loc ) ) {
@@ -1011,13 +1011,13 @@ namespace {
}
if (_eof) {
- _loc = DiskLoc();
+ _loc = RecordId();
}
}
- DiskLoc WiredTigerRecordStore::Iterator::getNext() {
+ RecordId WiredTigerRecordStore::Iterator::getNext() {
RS_ITERATOR_TRACE( "getNext" );
- const DiskLoc toReturn = _loc;
+ const RecordId toReturn = _loc;
RS_ITERATOR_TRACE( "getNext toReturn: " << toReturn );
_getNext();
RS_ITERATOR_TRACE( " ----" );
@@ -1025,7 +1025,7 @@ namespace {
return toReturn;
}
- void WiredTigerRecordStore::Iterator::invalidate( const DiskLoc& dl ) {
+ void WiredTigerRecordStore::Iterator::invalidate( const RecordId& dl ) {
// this should never be called
}
@@ -1069,11 +1069,11 @@ namespace {
invariant( _savedRecoveryUnit == txn->recoveryUnit() );
if ( needRestore || !wt_keeptxnopen() ) {
- DiskLoc saved = _lastLoc;
+ RecordId saved = _lastLoc;
_locate(_lastLoc, false);
RS_ITERATOR_TRACE( "isEOF check " << _eof );
if ( _eof ) {
- _lastLoc = DiskLoc();
+ _lastLoc = RecordId();
}
else if ( _loc != saved ) {
// old doc deleted, we're ok
@@ -1089,7 +1089,7 @@ namespace {
return true;
}
- RecordData WiredTigerRecordStore::Iterator::dataFor( const DiskLoc& loc ) const {
+ RecordData WiredTigerRecordStore::Iterator::dataFor( const RecordId& loc ) const {
// Retrieve the data if the iterator is already positioned at loc, otherwise
// open a new cursor and find the data to avoid upsetting the iterators
// cursor position.
@@ -1103,12 +1103,12 @@ namespace {
}
void WiredTigerRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive ) {
WriteUnitOfWork wuow(txn);
boost::scoped_ptr<RecordIterator> iter( getIterator( txn, end ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
if ( end < loc || ( inclusive && end == loc ) ) {
deleteRecord( txn, loc );
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 3f07d83f055..49e91b81129 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -94,36 +94,36 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* out ) const;
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* out ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damangeSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD ) const;
@@ -154,11 +154,11 @@ namespace mongo {
BSONObjBuilder* info = NULL );
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive);
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const;
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
virtual Status oplogDiskLocRegister( OperationContext* txn,
const OpTime& opTime );
@@ -177,8 +177,8 @@ namespace mongo {
void setSizeStorer( WiredTigerSizeStorer* ss ) { _sizeStorer = ss; }
- void dealtWithCappedLoc( const DiskLoc& loc );
- bool isCappedHidden( const DiskLoc& loc ) const;
+ void dealtWithCappedLoc( const RecordId& loc );
+ bool isCappedHidden( const RecordId& loc ) const;
private:
@@ -186,24 +186,24 @@ namespace mongo {
public:
Iterator( const WiredTigerRecordStore& rs,
OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir,
bool forParallelCollectionScan );
virtual ~Iterator();
virtual bool isEOF();
- virtual DiskLoc curr();
- virtual DiskLoc getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual RecordId curr();
+ virtual RecordId getNext();
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext *txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
void _getNext();
- void _locate( const DiskLoc &loc, bool exact );
- DiskLoc _curr() const; // const version of public curr method
+ void _locate( const RecordId &loc, bool exact );
+ RecordId _curr() const; // const version of public curr method
const WiredTigerRecordStore& _rs;
OperationContext* _txn;
@@ -212,10 +212,10 @@ namespace mongo {
bool _forParallelCollectionScan;
scoped_ptr<WiredTigerCursor> _cursor;
bool _eof;
- const DiskLoc _readUntilForOplog;
+ const RecordId _readUntilForOplog;
- DiskLoc _loc; // Cached key of _cursor. Update any time _cursor is moved.
- DiskLoc _lastLoc; // the last thing returned from getNext()
+ RecordId _loc; // Cached key of _cursor. Update any time _cursor is moved.
+ RecordId _lastLoc; // the last thing returned from getNext()
};
class NumRecordsChange;
@@ -223,19 +223,19 @@ namespace mongo {
static WiredTigerRecoveryUnit* _getRecoveryUnit( OperationContext* txn );
- static uint64_t _makeKey(const DiskLoc &loc);
- static DiskLoc _fromKey(uint64_t k);
+ static uint64_t _makeKey(const RecordId &loc);
+ static RecordId _fromKey(uint64_t k);
- void _addUncommitedDiskLoc_inlock( OperationContext* txn, const DiskLoc& loc );
+ void _addUncommitedDiskLoc_inlock( OperationContext* txn, const RecordId& loc );
- DiskLoc _nextId();
- void _setId(DiskLoc loc);
+ RecordId _nextId();
+ void _setId(RecordId loc);
bool cappedAndNeedDelete() const;
- void cappedDeleteAsNeeded(OperationContext* txn, const DiskLoc& justInserted );
+ void cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted );
void _changeNumRecords(OperationContext* txn, bool insert);
void _increaseDataSize(OperationContext* txn, int amount);
RecordData _getData( const WiredTigerCursor& cursor) const;
- StatusWith<DiskLoc> extractAndCheckLocForOplog(const char* data, int len);
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len);
void _oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const;
const std::string _uri;
@@ -251,10 +251,10 @@ namespace mongo {
const bool _useOplogHack;
- typedef std::vector<DiskLoc> SortedDiskLocs;
+ typedef std::vector<RecordId> SortedDiskLocs;
SortedDiskLocs _uncommittedDiskLocs;
- DiskLoc _oplog_visibleTo;
- DiskLoc _oplog_highestSeen;
+ RecordId _oplog_visibleTo;
+ RecordId _oplog_highestSeen;
mutable boost::mutex _uncommittedDiskLocsMutex;
AtomicUInt64 _nextIdNum;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 570eeace31f..1aecd888cee 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -151,15 +151,15 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
- DiskLoc loc2;
+ RecordId loc1;
+ RecordId loc2;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -202,15 +202,15 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
- DiskLoc loc2;
+ RecordId loc1;
+ RecordId loc2;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -268,7 +268,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
for ( int i = 0; i < N; i++ ) {
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
}
uow.commit();
@@ -333,7 +333,7 @@ namespace mongo {
rs.reset( NULL ); // this has to be deleted before ss
}
- StatusWith<DiskLoc> insertBSON(scoped_ptr<OperationContext>& opCtx,
+ StatusWith<RecordId> insertBSON(scoped_ptr<OperationContext>& opCtx,
scoped_ptr<RecordStore>& rs,
const OpTime& opTime) {
BSONObj obj = BSON( "ts" << opTime );
@@ -342,8 +342,8 @@ namespace mongo {
invariant( wrs );
Status status = wrs->oplogDiskLocRegister( opCtx.get(), opTime );
if (!status.isOK())
- return StatusWith<DiskLoc>( status );
- StatusWith<DiskLoc> res = rs->insertRecord(opCtx.get(),
+ return StatusWith<RecordId>( status );
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(),
obj.objdata(),
obj.objsize(),
false);
@@ -381,52 +381,52 @@ namespace mongo {
// success cases
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,1)).getValue(),
- DiskLoc(1,1));
+ RecordId(1,1));
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,2)).getValue(),
- DiskLoc(1,2));
+ RecordId(1,2));
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(2,2)).getValue(),
- DiskLoc(2,2));
+ RecordId(2,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
// find start
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(0,1)), DiskLoc()); // nothing <=
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,1)), DiskLoc(1,2)); // between
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,2)), DiskLoc(2,2)); // ==
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(2,2)); // > highest
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId()); // nothing <=
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,1)), RecordId(1,2)); // between
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,2)), RecordId(2,2)); // ==
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2)); // > highest
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(2,2), false); // no-op
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(2,2), false); // no-op
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(2,2));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(1,2), false); // deletes 2,2
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), false); // deletes 2,2
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(1,2));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(1,2), true); // deletes 1,2
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), true); // deletes 1,2
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(1,1));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,1));
}
{
@@ -438,7 +438,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId());
}
}
@@ -455,20 +455,20 @@ namespace mongo {
obj.objsize(), false ).getStatus());
wuow.commit();
}
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(0,1)), DiskLoc().setInvalid());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId().setInvalid());
}
TEST(WiredTigerRecordStoreTest, CappedOrder) {
scoped_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
scoped_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000,10000));
- DiskLoc loc1;
+ RecordId loc1;
{ // first insert a document
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -524,7 +524,7 @@ namespace mongo {
}
}
- DiskLoc _oplogOrderInsertOplog( OperationContext* txn,
+ RecordId _oplogOrderInsertOplog( OperationContext* txn,
scoped_ptr<RecordStore>& rs,
int inc ) {
OpTime opTime = OpTime(5,inc);
@@ -532,7 +532,7 @@ namespace mongo {
Status status = wrs->oplogDiskLocRegister( txn, opTime );
ASSERT_OK( status );
BSONObj obj = BSON( "ts" << opTime );
- StatusWith<DiskLoc> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
+ StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
ASSERT_OK( res.getStatus() );
return res.getValue();
}
@@ -549,7 +549,7 @@ namespace mongo {
ASSERT( wrs->usingOplogHack() );
}
- DiskLoc loc1;
+ RecordId loc1;
{ // first insert a document
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 8da8ae5772e..9e2cceaf75d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -188,7 +188,7 @@ namespace mongo {
}
}
- void WiredTigerRecoveryUnit::setOplogReadTill( const DiskLoc& loc ) {
+ void WiredTigerRecoveryUnit::setOplogReadTill( const RecordId& loc ) {
_oplogReadTill = loc;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index a1eb26215c3..b15efcbe75e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -85,8 +85,8 @@ namespace mongo {
bool everStartedWrite() const { return _everStartedWrite; }
int depth() const { return _depth; }
- void setOplogReadTill( const DiskLoc& loc );
- DiskLoc getOplogReadTill() const { return _oplogReadTill; }
+ void setOplogReadTill( const RecordId& loc );
+ RecordId getOplogReadTill() const { return _oplogReadTill; }
static WiredTigerRecoveryUnit* get(OperationContext *txn);
@@ -107,7 +107,7 @@ namespace mongo {
Timer _timer;
bool _currentlySquirreled;
bool _syncing;
- DiskLoc _oplogReadTill;
+ RecordId _oplogReadTill;
typedef OwnedPointerVector<Change> Changes;
Changes _changes;