summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/collection.cpp62
-rw-r--r--src/mongo/db/catalog/collection.h30
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h4
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp2
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.h4
-rw-r--r--src/mongo/db/catalog/head_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp8
-rw-r--r--src/mongo/db/catalog/index_catalog.h8
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp14
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h8
-rw-r--r--src/mongo/db/catalog/index_create.cpp8
-rw-r--r--src/mongo/db/catalog/index_create.h6
-rw-r--r--src/mongo/db/cloner.cpp6
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp4
-rw-r--r--src/mongo/db/commands/test_commands.cpp4
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp20
-rw-r--r--src/mongo/db/dbhelpers.h6
-rw-r--r--src/mongo/db/exec/and_hash.cpp12
-rw-r--r--src/mongo/db/exec/and_hash.h14
-rw-r--r--src/mongo/db/exec/and_sorted.cpp22
-rw-r--r--src/mongo/db/exec/and_sorted.h14
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/cached_plan.h4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp4
-rw-r--r--src/mongo/db/exec/collection_scan.h10
-rw-r--r--src/mongo/db/exec/collection_scan_common.h6
-rw-r--r--src/mongo/db/exec/count.cpp2
-rw-r--r--src/mongo/db/exec/count.h2
-rw-r--r--src/mongo/db/exec/count_scan.cpp10
-rw-r--r--src/mongo/db/exec/count_scan.h4
-rw-r--r--src/mongo/db/exec/delete.cpp4
-rw-r--r--src/mongo/db/exec/delete.h4
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp4
-rw-r--r--src/mongo/db/exec/distinct_scan.h4
-rw-r--r--src/mongo/db/exec/eof.cpp2
-rw-r--r--src/mongo/db/exec/eof.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp6
-rw-r--r--src/mongo/db/exec/fetch.h8
-rw-r--r--src/mongo/db/exec/group.cpp2
-rw-r--r--src/mongo/db/exec/group.h2
-rw-r--r--src/mongo/db/exec/idhack.cpp4
-rw-r--r--src/mongo/db/exec/idhack.h4
-rw-r--r--src/mongo/db/exec/index_scan.cpp10
-rw-r--r--src/mongo/db/exec/index_scan.h8
-rw-r--r--src/mongo/db/exec/keep_mutations.cpp2
-rw-r--r--src/mongo/db/exec/keep_mutations.h2
-rw-r--r--src/mongo/db/exec/limit.cpp2
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp6
-rw-r--r--src/mongo/db/exec/merge_sort.h10
-rw-r--r--src/mongo/db/exec/mock_stage.cpp2
-rw-r--r--src/mongo/db/exec/mock_stage.h4
-rw-r--r--src/mongo/db/exec/mock_stage_test.cpp2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp14
-rw-r--r--src/mongo/db/exec/multi_iterator.h6
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.h4
-rw-r--r--src/mongo/db/exec/near.cpp12
-rw-r--r--src/mongo/db/exec/near.h4
-rw-r--r--src/mongo/db/exec/oplogstart.cpp6
-rw-r--r--src/mongo/db/exec/oplogstart.h4
-rw-r--r--src/mongo/db/exec/or.cpp6
-rw-r--r--src/mongo/db/exec/or.h14
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h2
-rw-r--r--src/mongo/db/exec/plan_stage.h8
-rw-r--r--src/mongo/db/exec/plan_stats.h4
-rw-r--r--src/mongo/db/exec/projection.cpp4
-rw-r--r--src/mongo/db/exec/projection.h2
-rw-r--r--src/mongo/db/exec/projection_exec.cpp4
-rw-r--r--src/mongo/db/exec/shard_filter.cpp2
-rw-r--r--src/mongo/db/exec/shard_filter.h2
-rw-r--r--src/mongo/db/exec/skip.cpp2
-rw-r--r--src/mongo/db/exec/skip.h2
-rw-r--r--src/mongo/db/exec/sort.cpp18
-rw-r--r--src/mongo/db/exec/sort.h14
-rw-r--r--src/mongo/db/exec/subplan.cpp2
-rw-r--r--src/mongo/db/exec/subplan.h2
-rw-r--r--src/mongo/db/exec/text.cpp12
-rw-r--r--src/mongo/db/exec/text.h6
-rw-r--r--src/mongo/db/exec/update.cpp14
-rw-r--r--src/mongo/db/exec/update.h8
-rw-r--r--src/mongo/db/exec/working_set.cpp2
-rw-r--r--src/mongo/db/exec/working_set.h6
-rw-r--r--src/mongo/db/exec/working_set_common.cpp6
-rw-r--r--src/mongo/db/exec/working_set_common.h2
-rw-r--r--src/mongo/db/index/btree_based_access_method.cpp26
-rw-r--r--src/mongo/db/index/btree_based_access_method.h16
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.cpp8
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.h12
-rw-r--r--src/mongo/db/index/btree_index_cursor.cpp10
-rw-r--r--src/mongo/db/index/btree_index_cursor.h4
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp8
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h6
-rw-r--r--src/mongo/db/index/index_access_method.h10
-rw-r--r--src/mongo/db/index/index_cursor.h2
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/invalidation_type.h6
-rw-r--r--src/mongo/db/query/internal_plans.h6
-rw-r--r--src/mongo/db/query/new_find.cpp6
-rw-r--r--src/mongo/db/query/plan_executor.cpp4
-rw-r--r--src/mongo/db/query/plan_executor.h8
-rw-r--r--src/mongo/db/query/query_solution.cpp4
-rw-r--r--src/mongo/db/query/query_solution.h4
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp8
-rw-r--r--src/mongo/db/repl/sync.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h6
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp34
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp42
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.cpp114
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.h58
-rw-r--r--src/mongo/db/storage/index_entry_comparison.h6
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp18
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h6
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp14
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h28
-rw-r--r--src/mongo/db/storage/oplog_hack.cpp28
-rw-r--r--src/mongo/db/storage/oplog_hack.h8
-rw-r--r--src/mongo/db/storage/record_store.h56
-rw-r--r--src/mongo/db/storage/record_store_test_datafor.cpp12
-rw-r--r--src/mongo/db/storage/record_store_test_datasize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_deleterecord.cpp8
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp40
-rw-r--r--src/mongo/db/storage/record_store_test_insertrecord.cpp16
-rw-r--r--src/mongo/db/storage/record_store_test_manyiter.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp64
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp4
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp20
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.h6
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp16
-rw-r--r--src/mongo/db/storage/record_store_test_validate.h2
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp80
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.h46
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp46
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.h14
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h34
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor.cpp16
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp88
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp142
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp20
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp28
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp12
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp118
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h16
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_insert.cpp4
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp142
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h30
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp116
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h68
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp60
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h6
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp8
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp22
-rw-r--r--src/mongo/dbtests/namespacetests.cpp48
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp34
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp2
-rw-r--r--src/mongo/dbtests/repltests.cpp8
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp10
-rw-r--r--src/mongo/s/d_migrate.cpp20
-rw-r--r--src/mongo/s/d_split.cpp2
-rw-r--r--src/mongo/s/d_state.h2
192 files changed, 1383 insertions, 1379 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 3ee01cbb42c..af1511b9ad4 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -131,7 +131,7 @@ namespace mongo {
}
RecordIterator* Collection::getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir) const {
invariant( ok() );
return _recordStore->getIterator( txn, start, dir );
@@ -143,11 +143,11 @@ namespace mongo {
int64_t Collection::countTableScan( OperationContext* txn, const MatchExpression* expression ) {
scoped_ptr<RecordIterator> iterator( getIterator( txn,
- DiskLoc(),
+ RecordId(),
CollectionScanParams::FORWARD ) );
int64_t count = 0;
while ( !iterator->isEOF() ) {
- DiskLoc loc = iterator->getNext();
+ RecordId loc = iterator->getNext();
BSONObj obj = docFor( txn, loc );
if ( expression->matchesBSON( obj ) )
count++;
@@ -156,11 +156,11 @@ namespace mongo {
return count;
}
- BSONObj Collection::docFor(OperationContext* txn, const DiskLoc& loc) const {
+ BSONObj Collection::docFor(OperationContext* txn, const RecordId& loc) const {
return _recordStore->dataFor( txn, loc ).releaseToBson();
}
- bool Collection::findDoc(OperationContext* txn, const DiskLoc& loc, BSONObj* out) const {
+ bool Collection::findDoc(OperationContext* txn, const RecordId& loc, BSONObj* out) const {
RecordData rd;
if ( !_recordStore->findRecord( txn, loc, &rd ) )
return false;
@@ -168,26 +168,26 @@ namespace mongo {
return true;
}
- StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
invariant( !_indexCatalog.haveAnyIndexes() ); // eventually can implement, just not done
- StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
+ StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
doc,
_enforceQuota( enforceQuota ) );
if ( !loc.isOK() )
return loc;
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
- StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
const BSONObj& docToInsert,
bool enforceQuota ) {
if ( _indexCatalog.findIdIndex( txn ) ) {
if ( docToInsert["_id"].eoo() ) {
- return StatusWith<DiskLoc>( ErrorCodes::InternalError,
+ return StatusWith<RecordId>( ErrorCodes::InternalError,
str::stream() << "Collection::insertDocument got "
"document without _id for ns:" << _ns.ns() );
}
@@ -196,11 +196,11 @@ namespace mongo {
return _insertDocument( txn, docToInsert, enforceQuota );
}
- StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
const BSONObj& doc,
MultiIndexBlock* indexBlock,
bool enforceQuota ) {
- StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
+ StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
doc.objdata(),
doc.objsize(),
_enforceQuota(enforceQuota) );
@@ -210,18 +210,18 @@ namespace mongo {
Status status = indexBlock->insert( doc, loc.getValue() );
if ( !status.isOK() )
- return StatusWith<DiskLoc>( status );
+ return StatusWith<RecordId>( status );
return loc;
}
RecordFetcher* Collection::documentNeedsFetch( OperationContext* txn,
- const DiskLoc& loc ) const {
+ const RecordId& loc ) const {
return _recordStore->recordNeedsFetch( txn, loc );
}
- StatusWith<DiskLoc> Collection::_insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::_insertDocument( OperationContext* txn,
const BSONObj& docToInsert,
bool enforceQuota ) {
@@ -229,26 +229,26 @@ namespace mongo {
// under the RecordStore, this feels broken since that should be a
// collection access method probably
- StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
+ StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
docToInsert.objdata(),
docToInsert.objsize(),
_enforceQuota( enforceQuota ) );
if ( !loc.isOK() )
return loc;
- invariant( minDiskLoc < loc.getValue() );
- invariant( loc.getValue() < maxDiskLoc );
+ invariant( RecordId::min() < loc.getValue() );
+ invariant( loc.getValue() < RecordId::max() );
_infoCache.notifyOfWriteOp();
Status s = _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
if (!s.isOK())
- return StatusWith<DiskLoc>(s);
+ return StatusWith<RecordId>(s);
return loc;
}
- Status Collection::aboutToDeleteCapped( OperationContext* txn, const DiskLoc& loc ) {
+ Status Collection::aboutToDeleteCapped( OperationContext* txn, const RecordId& loc ) {
BSONObj doc = docFor( txn, loc );
@@ -261,7 +261,7 @@ namespace mongo {
}
void Collection::deleteDocument( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
bool cappedOK,
bool noWarn,
BSONObj* deletedId ) {
@@ -293,8 +293,8 @@ namespace mongo {
Counter64 moveCounter;
ServerStatusMetricField<Counter64> moveCounterDisplay( "record.moves", &moveCounter );
- StatusWith<DiskLoc> Collection::updateDocument( OperationContext* txn,
- const DiskLoc& oldLocation,
+ StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
+ const RecordId& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
@@ -305,7 +305,7 @@ namespace mongo {
BSONElement oldId = objOld["_id"];
BSONElement newId = objNew["_id"];
if ( oldId != newId )
- return StatusWith<DiskLoc>( ErrorCodes::InternalError,
+ return StatusWith<RecordId>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
}
@@ -332,13 +332,13 @@ namespace mongo {
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn, objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
- return StatusWith<DiskLoc>( ret );
+ return StatusWith<RecordId>( ret );
}
}
// This can call back into Collection::recordStoreGoingToMove. If that happens, the old
// object is removed from all indexes.
- StatusWith<DiskLoc> newLocation = _recordStore->updateRecord( txn,
+ StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn,
oldLocation,
objNew.objdata(),
objNew.objsize(),
@@ -366,7 +366,7 @@ namespace mongo {
Status s = _indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
if (!s.isOK())
- return StatusWith<DiskLoc>(s);
+ return StatusWith<RecordId>(s);
return newLocation;
}
@@ -384,7 +384,7 @@ namespace mongo {
int64_t updatedKeys;
Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
if ( !ret.isOK() )
- return StatusWith<DiskLoc>( ret );
+ return StatusWith<RecordId>( ret );
if ( debug )
debug->keyUpdates += updatedKeys;
}
@@ -396,7 +396,7 @@ namespace mongo {
}
Status Collection::recordStoreGoingToMove( OperationContext* txn,
- const DiskLoc& oldLocation,
+ const RecordId& oldLocation,
const char* oldBuffer,
size_t oldSize ) {
moveCounter.increment();
@@ -407,7 +407,7 @@ namespace mongo {
Status Collection::updateDocumentWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -514,7 +514,7 @@ namespace mongo {
}
void Collection::temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) {
invariant( isCapped() );
reinterpret_cast<CappedRecordStoreV1*>(
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index c0b73b40a40..d5f61e1ec21 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -129,20 +129,20 @@ namespace mongo {
bool requiresIdIndex() const;
- BSONObj docFor(OperationContext* txn, const DiskLoc& loc) const;
+ BSONObj docFor(OperationContext* txn, const RecordId& loc) const;
/**
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- bool findDoc(OperationContext* txn, const DiskLoc& loc, BSONObj* out) const;
+ bool findDoc(OperationContext* txn, const RecordId& loc, BSONObj* out) const;
// ---- things that should move to a CollectionAccessMethod like thing
/**
* Default arguments will return all items in the collection.
*/
RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir = CollectionScanParams::FORWARD ) const;
/**
@@ -161,7 +161,7 @@ namespace mongo {
int64_t countTableScan( OperationContext* txn, const MatchExpression* expression );
void deleteDocument( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
bool cappedOK = false,
bool noWarn = false,
BSONObj* deletedId = 0 );
@@ -172,15 +172,15 @@ namespace mongo {
*
* If enforceQuota is false, quotas will be ignored.
*/
- StatusWith<DiskLoc> insertDocument( OperationContext* txn,
+ StatusWith<RecordId> insertDocument( OperationContext* txn,
const BSONObj& doc,
bool enforceQuota );
- StatusWith<DiskLoc> insertDocument( OperationContext* txn,
+ StatusWith<RecordId> insertDocument( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- StatusWith<DiskLoc> insertDocument( OperationContext* txn,
+ StatusWith<RecordId> insertDocument( OperationContext* txn,
const BSONObj& doc,
MultiIndexBlock* indexBlock,
bool enforceQuota );
@@ -195,7 +195,7 @@ namespace mongo {
* Caller takes ownership of the returned RecordFetcher*.
*/
RecordFetcher* documentNeedsFetch( OperationContext* txn,
- const DiskLoc& loc ) const;
+ const RecordId& loc ) const;
/**
* updates the document @ oldLocation with newDoc
@@ -203,8 +203,8 @@ namespace mongo {
* if not, it is moved
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- StatusWith<DiskLoc> updateDocument( OperationContext* txn,
- const DiskLoc& oldLocation,
+ StatusWith<RecordId> updateDocument( OperationContext* txn,
+ const RecordId& oldLocation,
const BSONObj& newDoc,
bool enforceQuota,
OpDebug* debug );
@@ -213,7 +213,7 @@ namespace mongo {
* right now not allowed to modify indexes
*/
Status updateDocumentWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
@@ -254,7 +254,7 @@ namespace mongo {
* @param inclusive - Truncate 'end' as well iff true
* XXX: this will go away soon, just needed to move for now
*/
- void temp_cappedTruncateAfter( OperationContext* txn, DiskLoc end, bool inclusive );
+ void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
// -----------
@@ -284,18 +284,18 @@ namespace mongo {
private:
Status recordStoreGoingToMove( OperationContext* txn,
- const DiskLoc& oldLocation,
+ const RecordId& oldLocation,
const char* oldBuffer,
size_t oldSize );
- Status aboutToDeleteCapped( OperationContext* txn, const DiskLoc& loc );
+ Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc );
/**
* same semantics as insertDocument, but doesn't do:
* - some user error checks
* - adjust padding
*/
- StatusWith<DiskLoc> _insertDocument( OperationContext* txn,
+ StatusWith<RecordId> _insertDocument( OperationContext* txn,
const BSONObj& doc,
bool enforceQuota );
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index db4db802f95..8f91f72402e 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -72,12 +72,12 @@ namespace mongo {
const StringData& indexName,
bool multikey = true) = 0;
- virtual DiskLoc getIndexHead( OperationContext* txn,
+ virtual RecordId getIndexHead( OperationContext* txn,
const StringData& indexName ) const = 0;
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
- const DiskLoc& newHead ) = 0;
+ const RecordId& newHead ) = 0;
virtual bool isIndexReady( OperationContext* txn,
const StringData& indexName ) const = 0;
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index 84417161493..911ef28979a 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -85,7 +85,7 @@ namespace mongo {
return recData.toBson().objsize();
}
- virtual void inserted( const RecordData& recData, const DiskLoc& newLocation ) {
+ virtual void inserted( const RecordData& recData, const RecordId& newLocation ) {
_multiIndexBlock->insert( recData.toBson(), newLocation );
}
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index 582ca22c715..cd5535f1c97 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -347,7 +347,7 @@ namespace mongo {
}
void CollectionCursorCache::invalidateDocument( OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type ) {
if ( supportsDocLocking() ) {
// If a storage engine supports doc locking, then we do not need to invalidate.
diff --git a/src/mongo/db/catalog/collection_cursor_cache.h b/src/mongo/db/catalog/collection_cursor_cache.h
index 3af0975284b..9b0de262ceb 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.h
+++ b/src/mongo/db/catalog/collection_cursor_cache.h
@@ -63,10 +63,10 @@ namespace mongo {
/**
* Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
- * must called *before* the provided DiskLoc is about to be deleted or mutated.
+ * must called *before* the provided RecordId is about to be deleted or mutated.
*/
void invalidateDocument( OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type );
/*
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index de1b81d11b8..f3b198b276e 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -42,9 +42,9 @@ namespace mongo {
public:
virtual ~HeadManager() { }
- virtual const DiskLoc getHead(OperationContext* txn) const = 0;
+ virtual const RecordId getHead(OperationContext* txn) const = 0;
- virtual void setHead(OperationContext* txn, const DiskLoc newHead) = 0;
+ virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 81c339b6656..2a4f1ed04d7 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -1030,7 +1030,7 @@ namespace {
Status IndexCatalog::_indexRecord(OperationContext* txn,
IndexCatalogEntry* index,
const BSONObj& obj,
- const DiskLoc &loc ) {
+ const RecordId &loc ) {
InsertDeleteOptions options;
options.logIfError = false;
options.dupsAllowed = isDupsAllowed( index->descriptor() );
@@ -1042,7 +1042,7 @@ namespace {
Status IndexCatalog::_unindexRecord(OperationContext* txn,
IndexCatalogEntry* index,
const BSONObj& obj,
- const DiskLoc &loc,
+ const RecordId &loc,
bool logIfError) {
InsertDeleteOptions options;
options.logIfError = logIfError;
@@ -1063,7 +1063,7 @@ namespace {
Status IndexCatalog::indexRecord(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc &loc ) {
+ const RecordId &loc ) {
for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
i != _entries.end();
@@ -1078,7 +1078,7 @@ namespace {
void IndexCatalog::unindexRecord(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
bool noWarn) {
for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index d79806ca9c5..9c890b5a6fd 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -272,11 +272,11 @@ namespace mongo {
// ----- data modifiers ------
// this throws for now
- Status indexRecord(OperationContext* txn, const BSONObj& obj, const DiskLoc &loc);
+ Status indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId &loc);
void unindexRecord(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
bool noWarn);
// ------- temp internal -------
@@ -311,12 +311,12 @@ namespace mongo {
Status _indexRecord(OperationContext* txn,
IndexCatalogEntry* index,
const BSONObj& obj,
- const DiskLoc &loc );
+ const RecordId &loc );
Status _unindexRecord(OperationContext* txn,
IndexCatalogEntry* index,
const BSONObj& obj,
- const DiskLoc &loc,
+ const RecordId &loc,
bool logIfError);
/**
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 45c2695d244..31a0aaba355 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -52,11 +52,11 @@ namespace mongo {
HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) { }
virtual ~HeadManagerImpl() { }
- const DiskLoc getHead(OperationContext* txn) const {
+ const RecordId getHead(OperationContext* txn) const {
return _catalogEntry->head(txn);
}
- void setHead(OperationContext* txn, const DiskLoc newHead) {
+ void setHead(OperationContext* txn, const RecordId newHead) {
_catalogEntry->setHead(txn, newHead);
}
@@ -99,7 +99,7 @@ namespace mongo {
_isMultikey = _catalogIsMultikey( txn );
}
- const DiskLoc& IndexCatalogEntry::head( OperationContext* txn ) const {
+ const RecordId& IndexCatalogEntry::head( OperationContext* txn ) const {
DEV invariant( _head == _catalogHead( txn ) );
return _head;
}
@@ -121,17 +121,17 @@ namespace mongo {
class IndexCatalogEntry::SetHeadChange : public RecoveryUnit::Change {
public:
- SetHeadChange(IndexCatalogEntry* ice, DiskLoc oldHead) :_ice(ice), _oldHead(oldHead) {
+ SetHeadChange(IndexCatalogEntry* ice, RecordId oldHead) :_ice(ice), _oldHead(oldHead) {
}
virtual void commit() {}
virtual void rollback() { _ice->_head = _oldHead; }
IndexCatalogEntry* _ice;
- const DiskLoc _oldHead;
+ const RecordId _oldHead;
};
- void IndexCatalogEntry::setHead( OperationContext* txn, DiskLoc newHead ) {
+ void IndexCatalogEntry::setHead( OperationContext* txn, RecordId newHead ) {
_collection->setIndexHead( txn,
_descriptor->indexName(),
newHead );
@@ -207,7 +207,7 @@ namespace mongo {
return _collection->isIndexReady( txn, _descriptor->indexName() );
}
- DiskLoc IndexCatalogEntry::_catalogHead( OperationContext* txn ) const {
+ RecordId IndexCatalogEntry::_catalogHead( OperationContext* txn ) const {
return _collection->getIndexHead( txn, _descriptor->indexName() );
}
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index a71531ac1de..aed569daa0d 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -70,9 +70,9 @@ namespace mongo {
/// ---------------------
- const DiskLoc& head( OperationContext* txn ) const;
+ const RecordId& head( OperationContext* txn ) const;
- void setHead( OperationContext* txn, DiskLoc newHead );
+ void setHead( OperationContext* txn, RecordId newHead );
void setIsReady( bool newIsReady );
@@ -93,7 +93,7 @@ namespace mongo {
class SetHeadChange;
bool _catalogIsReady( OperationContext* txn ) const;
- DiskLoc _catalogHead( OperationContext* txn ) const;
+ RecordId _catalogHead( OperationContext* txn ) const;
bool _catalogIsMultikey( OperationContext* txn ) const;
// -----
@@ -115,7 +115,7 @@ namespace mongo {
Ordering _ordering; // TODO: this might be b-tree specific
bool _isReady; // cache of NamespaceDetails info
- DiskLoc _head; // cache of IndexDetails
+ RecordId _head; // cache of IndexDetails
bool _isMultikey; // cache of NamespaceDetails info
};
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index efe40f754b6..83f49b2cb41 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -203,7 +203,7 @@ namespace mongo {
_collection->getIndexCatalog()->unregisterIndexBuild(descriptor);
}
- Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<DiskLoc>* dupsOut) {
+ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
ProgressMeter* progress = _txn->setMessage(curopMessage,
curopMessage,
@@ -222,7 +222,7 @@ namespace mongo {
}
BSONObj objToIndex;
- DiskLoc loc;
+ RecordId loc;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&objToIndex, &loc))) {
{
@@ -271,7 +271,7 @@ namespace mongo {
return Status::OK();
}
- Status MultiIndexBlock::insert(const BSONObj& doc, const DiskLoc& loc) {
+ Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
for ( size_t i = 0; i < _indexes.size(); i++ ) {
int64_t unused;
Status idxStatus = _indexes[i].forInsert()->insert( _txn,
@@ -285,7 +285,7 @@ namespace mongo {
return Status::OK();
}
- Status MultiIndexBlock::doneInserting(std::set<DiskLoc>* dupsOut) {
+ Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
for ( size_t i = 0; i < _indexes.size(); i++ ) {
if ( _indexes[i].bulk == NULL )
continue;
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index a0830e6147d..d28fe33cc23 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -139,7 +139,7 @@ namespace mongo {
*
* Should not be called inside of a WriteUnitOfWork.
*/
- Status insertAllDocumentsInCollection(std::set<DiskLoc>* dupsOut = NULL);
+ Status insertAllDocumentsInCollection(std::set<RecordId>* dupsOut = NULL);
/**
* Call this after init() for each document in the collection.
@@ -148,7 +148,7 @@ namespace mongo {
*
* Should be called inside of a WriteUnitOfWork.
*/
- Status insert(const BSONObj& wholeDocument, const DiskLoc& loc);
+ Status insert(const BSONObj& wholeDocument, const RecordId& loc);
/**
* Call this after the last insert(). This gives the index builder a chance to do any
@@ -162,7 +162,7 @@ namespace mongo {
*
* Should not be called inside of a WriteUnitOfWork.
*/
- Status doneInserting(std::set<DiskLoc>* dupsOut = NULL);
+ Status doneInserting(std::set<RecordId>* dupsOut = NULL);
/**
* Marks the index ready for use. Should only be called as the last method after
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 452916c15cf..dc9420c2e2b 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -171,7 +171,7 @@ namespace mongo {
BSONObj js = tmp;
- StatusWith<DiskLoc> loc = collection->insertDocument( txn, js, true );
+ StatusWith<RecordId> loc = collection->insertDocument( txn, js, true );
if ( !loc.isOK() ) {
error() << "error: exception cloning object in " << from_collection
<< ' ' << loc.toString() << " obj:" << js;
@@ -540,7 +540,7 @@ namespace mongo {
// We need to drop objects with duplicate _ids because we didn't do a true
// snapshot and this is before applying oplog operations that occur during the
// initial sync.
- set<DiskLoc> dups;
+ set<RecordId> dups;
MultiIndexBlock indexer(txn, c);
if (opts.mayBeInterrupted)
@@ -549,7 +549,7 @@ namespace mongo {
uassertStatusOK(indexer.init(c->getIndexCatalog()->getDefaultIdIndexSpec()));
uassertStatusOK(indexer.insertAllDocumentsInCollection(&dups));
- for (set<DiskLoc>::const_iterator it = dups.begin(); it != dups.end(); ++it) {
+ for (set<RecordId>::const_iterator it = dups.begin(); it != dups.end(); ++it) {
WriteUnitOfWork wunit(txn);
BSONObj id;
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 2b951a865b3..471987ee655 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -43,10 +43,10 @@ namespace mongo {
public:
struct ExtentInfo {
- ExtentInfo( DiskLoc dl, size_t s )
+ ExtentInfo( RecordId dl, size_t s )
: diskLoc(dl), size(s) {
}
- DiskLoc diskLoc;
+ RecordId diskLoc;
size_t size;
};
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index cb1245508da..28ed71e00e8 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -79,7 +79,7 @@ namespace mongo {
return false;
}
}
- StatusWith<DiskLoc> res = collection->insertDocument( txn, obj, false );
+ StatusWith<RecordId> res = collection->insertDocument( txn, obj, false );
Status status = res.getStatus();
if (status.isOK()) {
wunit.commit();
@@ -156,7 +156,7 @@ namespace mongo {
Collection* collection = ctx.getCollection();
massert( 13417, "captrunc collection not found or empty", collection);
- DiskLoc end;
+ RecordId end;
{
boost::scoped_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
nss.ns(),
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 7fed228ed3f..7c3f805dc1a 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -1106,7 +1106,7 @@ namespace mongo {
txn->lockState()->assertWriteLocked( insertNS );
WriteUnitOfWork wunit(txn);
- StatusWith<DiskLoc> status = collection->insertDocument( txn, docToInsert, true );
+ StatusWith<RecordId> status = collection->insertDocument( txn, docToInsert, true );
if ( !status.isOK() ) {
result->setError(toWriteError(status.getStatus()));
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index fcade5aaaf3..2655ca810a9 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -852,7 +852,7 @@ namespace mongo {
long long size = 0;
long long numObjects = 0;
- DiskLoc loc;
+ RecordId loc;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( estimate )
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 3aeb2d6b875..ad9e5c60a15 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -104,7 +104,7 @@ namespace mongo {
const BSONObj &query,
BSONObj& result,
bool requireIndex) {
- DiskLoc loc = findOne( txn, collection, query, requireIndex );
+ RecordId loc = findOne( txn, collection, query, requireIndex );
if ( loc.isNull() )
return false;
result = collection->docFor(txn, loc);
@@ -114,12 +114,12 @@ namespace mongo {
/* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
- DiskLoc Helpers::findOne(OperationContext* txn,
+ RecordId Helpers::findOne(OperationContext* txn,
Collection* collection,
const BSONObj &query,
bool requireIndex) {
if ( !collection )
- return DiskLoc();
+ return RecordId();
CanonicalQuery* cq;
const WhereCallbackReal whereCallback(txn, collection->ns().db());
@@ -139,11 +139,11 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(rawExec);
PlanExecutor::ExecState state;
- DiskLoc loc;
+ RecordId loc;
if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
return loc;
}
- return DiskLoc();
+ return RecordId();
}
bool Helpers::findById(OperationContext* txn,
@@ -177,14 +177,14 @@ namespace mongo {
BtreeBasedAccessMethod* accessMethod =
static_cast<BtreeBasedAccessMethod*>(catalog->getIndex( desc ));
- DiskLoc loc = accessMethod->findSingle( txn, query["_id"].wrap() );
+ RecordId loc = accessMethod->findSingle( txn, query["_id"].wrap() );
if ( loc.isNull() )
return false;
result = collection->docFor( txn, loc );
return true;
}
- DiskLoc Helpers::findById(OperationContext* txn,
+ RecordId Helpers::findById(OperationContext* txn,
Collection* collection,
const BSONObj& idquery) {
verify(collection);
@@ -388,7 +388,7 @@ namespace mongo {
InternalPlanner::IXSCAN_FETCH));
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- DiskLoc rloc;
+ RecordId rloc;
BSONObj obj;
PlanExecutor::ExecState state;
// This may yield so we cannot touch nsd after this.
@@ -497,7 +497,7 @@ namespace mongo {
Status Helpers::getLocsInRange( OperationContext* txn,
const KeyRange& range,
long long maxChunkSizeBytes,
- set<DiskLoc>* locs,
+ set<RecordId>* locs,
long long* numDocs,
long long* estChunkSizeBytes )
{
@@ -556,7 +556,7 @@ namespace mongo {
// already being queued and will be migrated in the 'transferMods' stage
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- DiskLoc loc;
+ RecordId loc;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( !isLargeChunk ) {
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 06a9b2348de..04ac4c611e1 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -86,7 +86,7 @@ namespace mongo {
BSONObj& result,
bool requireIndex = false);
- static DiskLoc findOne(OperationContext* txn,
+ static RecordId findOne(OperationContext* txn,
Collection* collection,
const BSONObj &query,
bool requireIndex);
@@ -102,7 +102,7 @@ namespace mongo {
/* TODO: should this move into Collection?
* uasserts if no _id index.
* @return null loc if not found */
- static DiskLoc findById(OperationContext* txn,
+ static RecordId findById(OperationContext* txn,
Collection* collection, const BSONObj& query);
/** Get/put the first (or last) object from a collection. Generally only useful if the collection
@@ -191,7 +191,7 @@ namespace mongo {
static Status getLocsInRange( OperationContext* txn,
const KeyRange& range,
long long maxChunkSizeBytes,
- std::set<DiskLoc>* locs,
+ std::set<RecordId>* locs,
long long* numDocs,
long long* estChunkSizeBytes );
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 531ff3c432c..8c8ef9e2980 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -220,7 +220,7 @@ namespace mongo {
// We know that we've ADVANCED. See if the WSM is in our table.
WorkingSetMember* member = _ws->get(*out);
- // Maybe the child had an invalidation. We intersect DiskLoc(s) so we can't do anything
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
_ws->flagForReview(*out);
@@ -279,7 +279,7 @@ namespace mongo {
if (PlanStage::ADVANCED == childStatus) {
WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect DiskLoc(s) so we can't do anything
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
_ws->flagForReview(id);
@@ -347,7 +347,7 @@ namespace mongo {
if (PlanStage::ADVANCED == childStatus) {
WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect DiskLoc(s) so we can't do anything
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
_ws->flagForReview(id);
@@ -457,7 +457,7 @@ namespace mongo {
}
}
- void AndHashStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void AndHashStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
if (isEOF()) { return; }
@@ -479,8 +479,8 @@ namespace mongo {
}
}
- // If it's a deletion, we have to forget about the DiskLoc, and since the AND-ing is by
- // DiskLoc we can't continue processing it even with the object.
+ // If it's a deletion, we have to forget about the RecordId, and since the AND-ing is by
+ // RecordId we can't continue processing it even with the object.
//
// If it's a mutation the predicates implied by the AND-ing may no longer be true.
//
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index c4cd77645fb..7a9eb3228f3 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -40,14 +40,14 @@
namespace mongo {
/**
- * Reads from N children, each of which must have a valid DiskLoc. Uses a hash table to
+ * Reads from N children, each of which must have a valid RecordId. Uses a hash table to
* intersect the outputs of the N children, and outputs the intersection.
*
- * Preconditions: Valid DiskLoc. More than one child.
+ * Preconditions: Valid RecordId. More than one child.
*
- * Any DiskLoc that we keep a reference to that is invalidated before we are able to return it
+ * Any RecordId that we keep a reference to that is invalidated before we are able to return it
* is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with DiskLocs, we are unable to evaluate the AND for the invalidated DiskLoc, and it
+ * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
* must be fully matched later.
*/
class AndHashStage : public PlanStage {
@@ -79,7 +79,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -119,12 +119,12 @@ namespace mongo {
// _dataMap is filled out by the first child and probed by subsequent children. This is the
// hash table that we create by intersecting _children and probe with the last child.
- typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataMap;
+ typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
DataMap _dataMap;
// Keeps track of what elements from _dataMap subsequent children have seen.
// Only used while _hashingChildren.
- typedef unordered_set<DiskLoc, DiskLoc::Hasher> SeenMap;
+ typedef unordered_set<RecordId, RecordId::Hasher> SeenMap;
SeenMap _seenMap;
// True if we're still intersecting _children[0..._children.size()-1].
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 5405ff76fe0..362a12fdbab 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -71,14 +71,14 @@ namespace mongo {
_specificStats.failedAnd.resize(_children.size());
}
- // If we don't have any nodes that we're work()-ing until they hit a certain DiskLoc...
+ // If we don't have any nodes that we're work()-ing until they hit a certain RecordId...
if (0 == _workingTowardRep.size()) {
- // Get a target DiskLoc.
+ // Get a target RecordId.
return getTargetLoc(out);
}
- // Move nodes toward the target DiskLoc.
- // If all nodes reach the target DiskLoc, return it. The next call to work() will set a new
+ // Move nodes toward the target RecordId.
+ // If all nodes reach the target RecordId, return it. The next call to work() will set a new
// target.
return moveTowardTargetLoc(out);
}
@@ -86,7 +86,7 @@ namespace mongo {
PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
verify(numeric_limits<size_t>::max() == _targetNode);
verify(WorkingSet::INVALID_ID == _targetId);
- verify(DiskLoc() == _targetLoc);
+ verify(RecordId() == _targetLoc);
// Pick one, and get a loc to work toward.
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -95,7 +95,7 @@ namespace mongo {
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect DiskLoc(s) so we can't do anything
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
_ws->flagForReview(id);
@@ -162,7 +162,7 @@ namespace mongo {
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect DiskLoc(s) so we can't do anything
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
_ws->flagForReview(id);
@@ -184,7 +184,7 @@ namespace mongo {
_targetNode = numeric_limits<size_t>::max();
_targetId = WorkingSet::INVALID_ID;
- _targetLoc = DiskLoc();
+ _targetLoc = RecordId();
// Everyone hit it, hooray. Return it, if it matches.
if (Filter::passes(toMatchTest, _filter)) {
@@ -284,7 +284,7 @@ namespace mongo {
}
void AndSortedStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
@@ -301,13 +301,13 @@ namespace mongo {
// fetch it, flag for review, and find another _targetLoc.
++_specificStats.flagged;
- // The DiskLoc could still be a valid result so flag it and save it for later.
+ // The RecordId could still be a valid result so flag it and save it for later.
WorkingSetCommon::fetchAndInvalidateLoc(txn, _ws->get(_targetId), _collection);
_ws->flagForReview(_targetId);
_targetId = WorkingSet::INVALID_ID;
_targetNode = numeric_limits<size_t>::max();
- _targetLoc = DiskLoc();
+ _targetLoc = RecordId();
_workingTowardRep = std::queue<size_t>();
}
}
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 285cd9614ac..b6c3a9ed10e 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -40,15 +40,15 @@
namespace mongo {
/**
- * Reads from N children, each of which must have a valid DiskLoc. Assumes each child produces
- * DiskLocs in sorted order. Outputs the intersection of the DiskLocs outputted by the
+ * Reads from N children, each of which must have a valid RecordId. Assumes each child produces
+ * RecordIds in sorted order. Outputs the intersection of the RecordIds outputted by the
* children.
*
- * Preconditions: Valid DiskLoc. More than one child.
+ * Preconditions: Valid RecordId. More than one child.
*
- * Any DiskLoc that we keep a reference to that is invalidated before we are able to return it
+ * Any RecordId that we keep a reference to that is invalidated before we are able to return it
* is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with DiskLocs, we are unable to evaluate the AND for the invalidated DiskLoc, and it
+ * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
* must be fully matched later.
*/
class AndSortedStage : public PlanStage {
@@ -63,7 +63,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -99,7 +99,7 @@ namespace mongo {
// The current node we're AND-ing against.
size_t _targetNode;
- DiskLoc _targetLoc;
+ RecordId _targetLoc;
WorkingSetID _targetId;
// Nodes we're moving forward until they hit the element we're AND-ing.
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 42dce27c931..1996fab826a 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -128,7 +128,7 @@ namespace mongo {
}
void CachedPlanStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
if (! _usingBackupChild) {
_mainChildPlan->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 2de911411f7..5f9fe877e3f 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -41,7 +41,7 @@ namespace mongo {
* This stage outputs its mainChild, and possibly its backup child
* and also updates the cache.
*
- * Preconditions: Valid DiskLoc.
+ * Preconditions: Valid RecordId.
*
*/
class CachedPlanStage : public PlanStage {
@@ -64,7 +64,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 7539ea96289..4c062ec5d8e 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -112,7 +112,7 @@ namespace mongo {
if (isEOF())
return PlanStage::IS_EOF;
- const DiskLoc curr = _iter->curr();
+ const RecordId curr = _iter->curr();
if (curr.isNull()) {
// We just hit EOF
if (_params.tailable)
@@ -180,7 +180,7 @@ namespace mongo {
}
void CollectionScan::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index fc3d05e0fee..f7f434ad86c 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -40,10 +40,10 @@ namespace mongo {
class OperationContext;
/**
- * Scans over a collection, starting at the DiskLoc provided in params and continuing until
+ * Scans over a collection, starting at the RecordId provided in params and continuing until
* there are no more records in the collection.
*
- * Preconditions: Valid DiskLoc.
+ * Preconditions: Valid RecordId.
*/
class CollectionScan : public PlanStage {
public:
@@ -55,7 +55,7 @@ namespace mongo {
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
@@ -95,10 +95,10 @@ namespace mongo {
bool _isDead;
- DiskLoc _lastSeenLoc;
+ RecordId _lastSeenLoc;
// We allocate a working set member with this id on construction of the stage. It gets
- // used for all fetch requests, changing the DiskLoc as appropriate.
+ // used for all fetch requests, changing the RecordId as appropriate.
const WorkingSetID _wsidForFetch;
// Stats
diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h
index 799f5bdd1bf..9b327598fa5 100644
--- a/src/mongo/db/exec/collection_scan_common.h
+++ b/src/mongo/db/exec/collection_scan_common.h
@@ -41,7 +41,7 @@ namespace mongo {
};
CollectionScanParams() : collection(NULL),
- start(DiskLoc()),
+ start(RecordId()),
direction(FORWARD),
tailable(false),
maxScan(0) { }
@@ -50,9 +50,9 @@ namespace mongo {
// not owned
const Collection* collection;
- // isNull by default. If you specify any value for this, you're responsible for the DiskLoc
+ // isNull by default. If you specify any value for this, you're responsible for the RecordId
// not being invalidated before the first call to work(...).
- DiskLoc start;
+ RecordId start;
Direction direction;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 58cf1bec598..cc31f30898b 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -180,7 +180,7 @@ namespace mongo {
}
}
- void CountStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void CountStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
if (_child.get()) {
_child->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index a099ca71394..e0b7d261bd2 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -88,7 +88,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index 09d4b098d1d..b860032f33f 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -117,7 +117,7 @@ namespace mongo {
if (isEOF()) { return PlanStage::IS_EOF; }
- DiskLoc loc = _btreeCursor->getValue();
+ RecordId loc = _btreeCursor->getValue();
_btreeCursor->next();
checkEnd();
@@ -203,18 +203,18 @@ namespace mongo {
checkEnd();
}
- void CountScan::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void CountScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
- // The only state we're responsible for holding is what DiskLocs to drop. If a document
+ // The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
return;
}
- // If we see this DiskLoc again, it may not be the same document it was before, so we want
+ // If we see this RecordId again, it may not be the same document it was before, so we want
// to return it if we see it again.
- unordered_set<DiskLoc, DiskLoc::Hasher>::iterator it = _returned.find(dl);
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
if (it != _returned.end()) {
_returned.erase(it);
}
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index 26013004cc0..29c5d030fe7 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -75,7 +75,7 @@ namespace mongo {
virtual bool isEOF();
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -117,7 +117,7 @@ namespace mongo {
boost::scoped_ptr<BtreeIndexCursor> _endCursor;
// Could our index have duplicates? If so, we use _returned to dedup.
- unordered_set<DiskLoc, DiskLoc::Hasher> _returned;
+ unordered_set<RecordId, RecordId::Hasher> _returned;
CountScanParams _params;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index d1244f1219b..6d9e2829cbf 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -89,7 +89,7 @@ namespace mongo {
errmsg));
return PlanStage::FAILURE;
}
- DiskLoc rloc = member->loc;
+ RecordId rloc = member->loc;
_ws->free(id);
BSONObj deletedDoc;
@@ -177,7 +177,7 @@ namespace mongo {
repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(ns.db()));
}
- void DeleteStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void DeleteStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
}
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 12a62697261..aa7512eb1e8 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -58,7 +58,7 @@ namespace mongo {
};
/**
- * This stage delete documents by DiskLoc that are returned from its child. NEED_TIME
+ * This stage delete documents by RecordId that are returned from its child. NEED_TIME
* is returned after deleting a document.
*
* Callers of work() must be holding a write lock (and, for shouldCallLogOp=true deletes,
@@ -79,7 +79,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 5f3f516cea0..57e70786a32 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -127,7 +127,7 @@ namespace mongo {
if (GETTING_NEXT == _scanState) {
// Grab the next (key, value) from the index.
BSONObj ownedKeyObj = _btreeCursor->getKey().getOwned();
- DiskLoc loc = _btreeCursor->getValue();
+ RecordId loc = _btreeCursor->getValue();
// The underlying IndexCursor points at the *next* thing we want to return. We do this
// so that if we're scanning an index looking for docs to delete we don't continually
@@ -203,7 +203,7 @@ namespace mongo {
}
}
- void DistinctScan::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void DistinctScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
}
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index e74a6ecb9d3..9ee1d084f5c 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -104,7 +104,7 @@ namespace mongo {
virtual bool isEOF();
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -145,7 +145,7 @@ namespace mongo {
// For yielding.
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
DistinctParams _params;
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index 0ab4cbb166c..c21e6cf6668 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -60,7 +60,7 @@ namespace mongo {
++_commonStats.unyields;
}
- void EOFStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void EOFStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
}
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index 776df4f2946..7d83b4e2e96 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -47,7 +47,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index bc1fd838385..f6e0dd7094b 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -85,7 +85,7 @@ namespace mongo {
return returnIfMatches(member, id, out);
}
- // If we're here, we're not waiting for a DiskLoc to be fetched. Get another to-be-fetched
+ // If we're here, we're not waiting for a RecordId to be fetched. Get another to-be-fetched
// result from our child.
WorkingSetID id = WorkingSet::INVALID_ID;
StageState status = _child->work(&id);
@@ -118,7 +118,7 @@ namespace mongo {
}
}
- // The doc is already in memory, so go ahead and grab it. Now we have a DiskLoc
+ // The doc is already in memory, so go ahead and grab it. Now we have a RecordId
// as well as an unowned object
member->obj = _collection->docFor(_txn, member->loc);
member->keyData.clear();
@@ -164,7 +164,7 @@ namespace mongo {
_child->restoreState(opCtx);
}
- void FetchStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void FetchStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 8ea33b0a1ae..3c8b5c00b6a 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -36,12 +36,12 @@
namespace mongo {
/**
- * This stage turns a DiskLoc into a BSONObj.
+ * This stage turns a RecordId into a BSONObj.
*
* In WorkingSetMember terms, it transitions from LOC_AND_IDX to LOC_AND_UNOWNED_OBJ by reading
* the record at the provided loc. Returns verbatim any data that already has an object.
*
- * Preconditions: Valid DiskLoc.
+ * Preconditions: Valid RecordId.
*/
class FetchStage : public PlanStage {
public:
@@ -58,7 +58,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -94,7 +94,7 @@ namespace mongo {
// The filter is not owned by us.
const MatchExpression* _filter;
- // If we want to return a DiskLoc and it points to something that's not in memory,
+ // If we want to return a RecordId and it points to something that's not in memory,
// we return a "please page this in" result. We add a RecordFetcher given back to us by the
// storage engine to the WSM. The RecordFetcher is used by the PlanExecutor when it handles
// the fetch request.
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 90a28c8f3f7..1a7d5b174ac 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -269,7 +269,7 @@ namespace mongo {
_child->restoreState(opCtx);
}
- void GroupStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void GroupStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
}
diff --git a/src/mongo/db/exec/group.h b/src/mongo/db/exec/group.h
index 96e06274ab4..6e24c2c0664 100644
--- a/src/mongo/db/exec/group.h
+++ b/src/mongo/db/exec/group.h
@@ -90,7 +90,7 @@ namespace mongo {
virtual bool isEOF();
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index ffa727ec808..8a220eba97f 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -120,7 +120,7 @@ namespace mongo {
static_cast<const BtreeBasedAccessMethod*>(catalog->getIndex(idDesc));
// Look up the key by going directly to the Btree.
- DiskLoc loc = accessMethod->findSingle(_txn, _key);
+ RecordId loc = accessMethod->findSingle(_txn, _key);
// Key not found.
if (loc.isNull()) {
@@ -183,7 +183,7 @@ namespace mongo {
++_commonStats.unyields;
}
- void IDHackStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void IDHackStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
// It's possible that the loc getting invalidated is the one we're about to
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 5c404fdf145..a58d0fc393f 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -55,7 +55,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
/**
* ID Hack has a very strict criteria for the queries it supports.
@@ -103,7 +103,7 @@ namespace mongo {
// Do we need to add index key metadata for $returnKey?
bool _addKeyMetadata;
- // If we want to return a DiskLoc and it points to something that's not in memory,
+ // If we want to return a RecordId and it points to something that's not in memory,
// we return a "please page this in" result. We add a RecordFetcher given back to us by the
// storage engine to the WSM. The RecordFetcher is used by the PlanExecutor when it handles
// the fetch request.
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 06e821f14ed..2a8ab64305b 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -169,7 +169,7 @@ namespace mongo {
if (GETTING_NEXT == _scanState) {
// Grab the next (key, value) from the index.
BSONObj keyObj = _indexCursor->getKey();
- DiskLoc loc = _indexCursor->getValue();
+ RecordId loc = _indexCursor->getValue();
bool filterPasses = Filter::passes(keyObj, _keyPattern, _filter);
if ( filterPasses ) {
@@ -278,18 +278,18 @@ namespace mongo {
}
}
- void IndexScan::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void IndexScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
- // The only state we're responsible for holding is what DiskLocs to drop. If a document
+ // The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
return;
}
- // If we see this DiskLoc again, it may not be the same document it was before, so we want
+ // If we see this RecordId again, it may not be the same document it was before, so we want
// to return it if we see it again.
- unordered_set<DiskLoc, DiskLoc::Hasher>::iterator it = _returned.find(dl);
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
if (it != _returned.end()) {
++_specificStats.seenInvalidated;
_returned.erase(it);
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 0626d5db716..54481c77ff1 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -68,7 +68,7 @@ namespace mongo {
/**
* Stage scans over an index from startKey to endKey, returning results that pass the provided
- * filter. Internally dedups on DiskLoc.
+ * filter. Internally dedups on RecordId.
*
* TODO: we probably should split this into 2 stages: one btree-only "fast" ixscan and one that
* strictly talks through the index API. Need to figure out what we really want to ship down
@@ -110,7 +110,7 @@ namespace mongo {
virtual bool isEOF();
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -154,11 +154,11 @@ namespace mongo {
// Could our index have duplicates? If so, we use _returned to dedup.
bool _shouldDedup;
- unordered_set<DiskLoc, DiskLoc::Hasher> _returned;
+ unordered_set<RecordId, RecordId::Hasher> _returned;
// For yielding.
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
IndexScanParams _params;
diff --git a/src/mongo/db/exec/keep_mutations.cpp b/src/mongo/db/exec/keep_mutations.cpp
index 025e1ee5e27..c35793c4300 100644
--- a/src/mongo/db/exec/keep_mutations.cpp
+++ b/src/mongo/db/exec/keep_mutations.cpp
@@ -125,7 +125,7 @@ namespace mongo {
}
void KeepMutationsStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 1828edd5b45..74538d6b5e2 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -53,7 +53,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index b92cb45b548..0d0dfcb596a 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -98,7 +98,7 @@ namespace mongo {
_child->restoreState(opCtx);
}
- void LimitStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void LimitStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index 0377247b919..2426732b904 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -51,7 +51,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index f30991aeaba..d175d612787 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -86,13 +86,13 @@ namespace mongo {
WorkingSetMember* member = _ws->get(id);
if (!member->hasLoc()) {
- // Can't dedup data unless there's a DiskLoc. We go ahead and use its
+ // Can't dedup data unless there's a RecordId. We go ahead and use its
// result.
_noResultToMerge.pop();
}
else {
++_specificStats.dupsTested;
- // ...and there's a diskloc and and we've seen the DiskLoc before
+ // ...and there's a diskloc and and we've seen the RecordId before
if (_seen.end() != _seen.find(member->loc)) {
// ...drop it.
_ws->free(id);
@@ -203,7 +203,7 @@ namespace mongo {
}
void MergeSortStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
for (size_t i = 0; i < _children.size(); ++i) {
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index db4fb189199..353fbaa14ec 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -67,7 +67,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -91,11 +91,11 @@ namespace mongo {
// The pattern that we're sorting by.
BSONObj _pattern;
- // Are we deduplicating on DiskLoc?
+ // Are we deduplicating on RecordId?
bool _dedup;
- // Which DiskLocs have we seen?
- unordered_set<DiskLoc, DiskLoc::Hasher> _seen;
+ // Which RecordIds have we seen?
+ unordered_set<RecordId, RecordId::Hasher> _seen;
// Owned by us. All the children we're reading from.
std::vector<PlanStage*> _children;
@@ -160,7 +160,7 @@ namespace mongo {
// How we're sorting.
BSONObj pattern;
- // Do we deduplicate on DiskLoc?
+ // Do we deduplicate on RecordId?
bool dedup;
};
diff --git a/src/mongo/db/exec/mock_stage.cpp b/src/mongo/db/exec/mock_stage.cpp
index e95680bd2cd..c40265399f2 100644
--- a/src/mongo/db/exec/mock_stage.cpp
+++ b/src/mongo/db/exec/mock_stage.cpp
@@ -72,7 +72,7 @@ namespace mongo {
++_commonStats.unyields;
}
- void MockStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void MockStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
}
diff --git a/src/mongo/db/exec/mock_stage.h b/src/mongo/db/exec/mock_stage.h
index f3347c84d9a..57bd337bf21 100644
--- a/src/mongo/db/exec/mock_stage.h
+++ b/src/mongo/db/exec/mock_stage.h
@@ -35,7 +35,7 @@
namespace mongo {
- class DiskLoc;
+ class RecordId;
/**
* MockStage is a data-producing stage that is used for testing. Unlike the other two leaf
@@ -60,7 +60,7 @@ namespace mongo {
// have correct yielding behavior.
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/mock_stage_test.cpp b/src/mongo/db/exec/mock_stage_test.cpp
index 8a3c3b07d24..a7f7ec8652f 100644
--- a/src/mongo/db/exec/mock_stage_test.cpp
+++ b/src/mongo/db/exec/mock_stage_test.cpp
@@ -93,7 +93,7 @@ namespace {
ASSERT_EQUALS(stats->unyields, 1U);
// invalidates
- const DiskLoc dl(0, 0);
+ const RecordId dl(0, 0);
mock->invalidate(NULL, dl, INVALIDATION_MUTATION);
ASSERT_EQUALS(stats->invalidates, 1U);
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 6dab3080da4..9f5d0268c44 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -57,10 +57,10 @@ namespace mongo {
if ( _collection == NULL )
return PlanStage::DEAD;
- // The DiskLoc we're about to look at it might not be in memory. In this case
+ // The RecordId we're about to look at it might not be in memory. In this case
// we request a yield while we fetch the document.
if (!_iterators.empty()) {
- DiskLoc curr = _iterators.back()->curr();
+ RecordId curr = _iterators.back()->curr();
if (!curr.isNull()) {
std::auto_ptr<RecordFetcher> fetcher(_collection->documentNeedsFetch(_txn, curr));
if (NULL != fetcher.get()) {
@@ -74,7 +74,7 @@ namespace mongo {
}
}
- DiskLoc next = _advance();
+ RecordId next = _advance();
if (next.isNull())
return PlanStage::IS_EOF;
@@ -113,7 +113,7 @@ namespace mongo {
}
void MultiIteratorStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
switch ( type ) {
case INVALIDATION_DELETION:
@@ -132,16 +132,16 @@ namespace mongo {
return empty;
}
- DiskLoc MultiIteratorStage::_advance() {
+ RecordId MultiIteratorStage::_advance() {
while (!_iterators.empty()) {
- DiskLoc out = _iterators.back()->getNext();
+ RecordId out = _iterators.back()->getNext();
if (!out.isNull())
return out;
_iterators.popAndDeleteBack();
}
- return DiskLoc();
+ return RecordId();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 823fa2f567f..c421c51ec26 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -62,7 +62,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
//
// These should not be used.
@@ -81,7 +81,7 @@ namespace mongo {
/**
* @return if more data
*/
- DiskLoc _advance();
+ RecordId _advance();
OperationContext* _txn;
Collection* _collection;
@@ -91,7 +91,7 @@ namespace mongo {
WorkingSet* _ws;
// We allocate a working set member with this id on construction of the stage. It gets
- // used for all fetch requests, changing the DiskLoc as appropriate.
+ // used for all fetch requests, changing the RecordId as appropriate.
const WorkingSetID _wsidForFetch;
};
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index fc28e914acf..1cd5ead1ca0 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -426,7 +426,7 @@ namespace mongo {
void invalidateHelper(OperationContext* txn,
WorkingSet* ws, // may flag for review
- const DiskLoc& dl,
+ const RecordId& dl,
list<WorkingSetID>* idsToInvalidate,
const Collection* collection) {
for (list<WorkingSetID>::iterator it = idsToInvalidate->begin();
@@ -448,7 +448,7 @@ namespace mongo {
}
void MultiPlanStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
if (_failure) { return; }
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index fe2af28d4e8..e0e2070847d 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -44,7 +44,7 @@ namespace mongo {
* This stage outputs its mainChild, and possibly it's backup child
* and also updates the cache.
*
- * Preconditions: Valid DiskLoc.
+ * Preconditions: Valid RecordId.
*
* Owns the query solutions and PlanStage roots for all candidate plans.
*/
@@ -63,7 +63,7 @@ namespace mongo {
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 294808970bd..8b318921f54 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -228,7 +228,7 @@ namespace mongo {
StatusWith<double> distanceStatus = computeDistance(nextMember);
- // Store the member's DiskLoc, if available, for quick invalidation
+ // Store the member's RecordId, if available, for quick invalidation
if (nextMember->hasLoc()) {
_nextIntervalSeen.insert(make_pair(nextMember->loc, nextMemberID));
}
@@ -319,15 +319,15 @@ namespace mongo {
}
}
- void NearStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void NearStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_stats->common.invalidates;
for (size_t i = 0; i < _childrenIntervals.size(); i++) {
_childrenIntervals[i]->covering->invalidate(txn, dl, type);
}
- // If a result is in _resultBuffer and has a DiskLoc it will be in _nextIntervalSeen as
- // well. It's safe to return the result w/o the DiskLoc, so just fetch the result.
- unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher>::iterator seenIt = _nextIntervalSeen
+ // If a result is in _resultBuffer and has a RecordId it will be in _nextIntervalSeen as
+ // well. It's safe to return the result w/o the RecordId, so just fetch the result.
+ unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt = _nextIntervalSeen
.find(dl);
if (seenIt != _nextIntervalSeen.end()) {
@@ -337,7 +337,7 @@ namespace mongo {
WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
verify(!member->hasLoc());
- // Don't keep it around in the seen map since there's no valid DiskLoc anymore
+ // Don't keep it around in the seen map since there's no valid RecordId anymore
_nextIntervalSeen.erase(seenIt);
}
}
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index fbdba1e56db..62741f8014b 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -85,7 +85,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual vector<PlanStage*> getChildren() const;
@@ -176,7 +176,7 @@ namespace mongo {
// May need to track disklocs from the child stage to do our own deduping, also to do
// invalidation of buffered results.
- unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> _nextIntervalSeen;
+ unordered_map<RecordId, WorkingSetID, RecordId::Hasher> _nextIntervalSeen;
// Stats for the stage covering this interval
scoped_ptr<IntervalStats> _nextIntervalStats;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index c73eaeb3087..1edc3bc2b94 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -83,7 +83,7 @@ namespace mongo {
}
// we work from the back to the front since the back has the newest data.
- const DiskLoc loc = _subIterators.back()->getNext();
+ const RecordId loc = _subIterators.back()->getNext();
_subIterators.popAndDeleteBack();
// TODO: should we ever try and return NEED_FETCH here?
@@ -130,7 +130,7 @@ namespace mongo {
if (!_filter->matchesBSON(member->obj)) {
_done = true;
- // DiskLoc is returned in *out.
+ // RecordId is returned in *out.
return PlanStage::ADVANCED;
}
else {
@@ -141,7 +141,7 @@ namespace mongo {
bool OplogStart::isEOF() { return _done; }
- void OplogStart::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void OplogStart::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
if (_needInit) { return; }
if (INVALIDATION_DELETION != type) { return; }
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index f8eeba337ff..d18b2450e7f 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -54,7 +54,7 @@ namespace mongo {
* inserted before documents in a subsequent extent. As such we can skip through entire extents
* looking only at the first document.
*
- * Why is this a stage? Because we want to yield, and we want to be notified of DiskLoc
+ * Why is this a stage? Because we want to yield, and we want to be notified of RecordId
* invalidations. :(
*/
class OplogStart : public PlanStage {
@@ -69,7 +69,7 @@ namespace mongo {
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index e115edbbef0..91364d9807e 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -73,7 +73,7 @@ namespace mongo {
if (_dedup && member->hasLoc()) {
++_specificStats.dupsTested;
- // ...and we've seen the DiskLoc before
+ // ...and we've seen the RecordId before
if (_seen.end() != _seen.find(member->loc)) {
// ...drop it.
++_specificStats.dupsDropped;
@@ -155,7 +155,7 @@ namespace mongo {
}
}
- void OrStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void OrStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
if (isEOF()) { return; }
@@ -167,7 +167,7 @@ namespace mongo {
// If we see DL again it is not the same record as it once was so we still want to
// return it.
if (_dedup && INVALIDATION_DELETION == type) {
- unordered_set<DiskLoc, DiskLoc::Hasher>::iterator it = _seen.find(dl);
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _seen.find(dl);
if (_seen.end() != it) {
++_specificStats.locsForgotten;
_seen.erase(dl);
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 3aaa72e0de9..c0f92a01327 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -37,11 +37,11 @@
namespace mongo {
/**
- * This stage outputs the union of its children. It optionally deduplicates on DiskLoc.
+ * This stage outputs the union of its children. It optionally deduplicates on RecordId.
*
- * Preconditions: Valid DiskLoc.
+ * Preconditions: Valid RecordId.
*
- * If we're deduping, we may fail to dedup any invalidated DiskLoc properly.
+ * If we're deduping, we may fail to dedup any invalidated RecordId properly.
*/
class OrStage : public PlanStage {
public:
@@ -56,7 +56,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -83,11 +83,11 @@ namespace mongo {
// Which of _children are we calling work(...) on now?
size_t _currentChild;
- // True if we dedup on DiskLoc, false otherwise.
+ // True if we dedup on RecordId, false otherwise.
bool _dedup;
- // Which DiskLocs have we returned?
- unordered_set<DiskLoc, DiskLoc::Hasher> _seen;
+ // Which RecordIds have we returned?
+ unordered_set<RecordId, RecordId::Hasher> _seen;
// Stats
CommonStats _commonStats;
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index 69c18d5c060..77125fbd616 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -82,7 +82,7 @@ namespace mongo {
}
void PipelineProxyStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
// propagate to child executor if still in use
if (boost::shared_ptr<PlanExecutor> exec = _childExec.lock()) {
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 82955833fef..58024c917d1 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -52,7 +52,7 @@ namespace mongo {
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
//
// Manage our OperationContext. We intentionally don't propagate to the child
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 1adba4bd906..da85ceb2d58 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -35,7 +35,7 @@
namespace mongo {
class Collection;
- class DiskLoc;
+ class RecordId;
class OperationContext;
/**
@@ -208,9 +208,9 @@ namespace mongo {
virtual void restoreState(OperationContext* opCtx) = 0;
/**
- * Notifies a stage that a DiskLoc is going to be deleted (or in-place updated) so that the
+ * Notifies a stage that a RecordId is going to be deleted (or in-place updated) so that the
* stage can invalidate or modify any state required to continue processing without this
- * DiskLoc.
+ * RecordId.
*
* Can only be called after a saveState but before a restoreState.
*
@@ -219,7 +219,7 @@ namespace mongo {
* stage's own OperationContext is inactive during the invalidate and should not be used).
*/
virtual void invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) = 0;
/**
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 803d822c26c..a8ed7bd2184 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -164,7 +164,7 @@ namespace mongo {
size_t flaggedInProgress;
// How many entries are in the map after each child?
- // child 'i' produced children[i].common.advanced DiskLocs, of which mapAfterChild[i] were
+ // child 'i' produced children[i].common.advanced RecordIds, of which mapAfterChild[i] were
// intersections.
std::vector<size_t> mapAfterChild;
@@ -446,7 +446,7 @@ namespace mongo {
size_t dupsTested;
size_t dupsDropped;
- // How many calls to invalidate(...) actually removed a DiskLoc from our deduping map?
+ // How many calls to invalidate(...) actually removed a RecordId from our deduping map?
size_t locsForgotten;
// We know how many passed (it's the # of advanced) and therefore how many failed.
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 451fa1c8b2f..9d13644f1a7 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -189,7 +189,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->keyData.clear();
- member->loc = DiskLoc();
+ member->loc = RecordId();
member->obj = bob.obj();
return Status::OK();
}
@@ -257,7 +257,7 @@ namespace mongo {
}
void ProjectionStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index d6b4990e200..9302ba836cb 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -85,7 +85,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index 3c4f9e6dee9..f44dc3c551e 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -247,7 +247,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->obj = keyObj;
member->keyData.clear();
- member->loc = DiskLoc();
+ member->loc = RecordId();
return Status::OK();
}
@@ -344,7 +344,7 @@ namespace mongo {
member->state = WorkingSetMember::OWNED_OBJ;
member->obj = newObj;
member->keyData.clear();
- member->loc = DiskLoc();
+ member->loc = RecordId();
return Status::OK();
}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 4dc828b1a58..d5344af6dc2 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -132,7 +132,7 @@ namespace mongo {
}
void ShardFilterStage::invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index 4fcd087125f..614b0bb6ad9 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -81,7 +81,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index d16801adf36..34d4e07c9f3 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -101,7 +101,7 @@ namespace mongo {
_child->restoreState(opCtx);
}
- void SkipStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void SkipStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 82d73cec57d..b22ee7734a6 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -50,7 +50,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 2105c370fcf..aaa2b852206 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -276,7 +276,7 @@ namespace mongo {
if (0 != result) {
return result < 0;
}
- // Indices use DiskLoc as an additional sort key so we must as well.
+ // Indices use RecordId as an additional sort key so we must as well.
return lhs.loc < rhs.loc;
}
@@ -340,8 +340,8 @@ namespace mongo {
StageState code = _child->work(&id);
if (PlanStage::ADVANCED == code) {
- // Add it into the map for quick invalidation if it has a valid DiskLoc.
- // A DiskLoc may be invalidated at any time (during a yield). We need to get into
+ // Add it into the map for quick invalidation if it has a valid RecordId.
+ // A RecordId may be invalidated at any time (during a yield). We need to get into
// the WorkingSet as quickly as possible to handle it.
WorkingSetMember* member = _ws->get(id);
@@ -362,7 +362,7 @@ namespace mongo {
}
item.wsid = id;
if (member->hasLoc()) {
- // The DiskLoc breaks ties when sorting two WSMs with the same sort key.
+ // The RecordId breaks ties when sorting two WSMs with the same sort key.
item.loc = member->loc;
}
@@ -431,7 +431,7 @@ namespace mongo {
_child->restoreState(opCtx);
}
- void SortStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void SortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
@@ -440,11 +440,11 @@ namespace mongo {
// So, no matter what, fetch and keep the doc in play.
// _data contains indices into the WorkingSet, not actual data. If a WorkingSetMember in
- // the WorkingSet needs to change state as a result of a DiskLoc invalidation, it will still
+ // the WorkingSet needs to change state as a result of a RecordId invalidation, it will still
// be at the same spot in the WorkingSet. As such, we don't need to modify _data.
DataMap::iterator it = _wsidByDiskLoc.find(dl);
- // If we're holding on to data that's got the DiskLoc we're invalidating...
+ // If we're holding on to data that's got the RecordId we're invalidating...
if (_wsidByDiskLoc.end() != it) {
// Grab the WSM that we're nuking.
WorkingSetMember* member = _ws->get(it->second);
@@ -452,7 +452,7 @@ namespace mongo {
WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- // Remove the DiskLoc from our set of active DLs.
+ // Remove the RecordId from our set of active DLs.
_wsidByDiskLoc.erase(it);
++_specificStats.forcedFetches;
}
@@ -559,7 +559,7 @@ namespace mongo {
}
// If the working set ID is valid, remove from
- // DiskLoc invalidation map and free from working set.
+ // RecordId invalidation map and free from working set.
if (wsidToFree != WorkingSet::INVALID_ID) {
WorkingSetMember* member = _ws->get(wsidToFree);
if (member->hasLoc()) {
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 4d8ac5f68d0..44453974be4 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -49,7 +49,7 @@ namespace mongo {
public:
SortStageParams() : collection(NULL), limit(0) { }
- // Used for resolving DiskLocs to BSON
+ // Used for resolving RecordIds to BSON
const Collection* collection;
// How we're sorting.
@@ -153,7 +153,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -209,15 +209,15 @@ namespace mongo {
WorkingSetID wsid;
BSONObj sortKey;
// Since we must replicate the behavior of a covered sort as much as possible we use the
- // DiskLoc to break sortKey ties.
+ // RecordId to break sortKey ties.
// See sorta.js.
- DiskLoc loc;
+ RecordId loc;
};
// Comparison object for data buffers (vector and set).
// Items are compared on (sortKey, loc). This is also how the items are
// ordered in the indices.
- // Keys are compared using BSONObj::woCompare() with DiskLoc as a tie-breaker.
+ // Keys are compared using BSONObj::woCompare() with RecordId as a tie-breaker.
struct WorkingSetComparator {
explicit WorkingSetComparator(BSONObj p);
@@ -258,8 +258,8 @@ namespace mongo {
// Iterates through _data post-sort returning it.
std::vector<SortableDataItem>::iterator _resultIterator;
- // We buffer a lot of data and we want to look it up by DiskLoc quickly upon invalidation.
- typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataMap;
+ // We buffer a lot of data and we want to look it up by RecordId quickly upon invalidation.
+ typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
DataMap _wsidByDiskLoc;
//
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 7c177377c8a..a83b346cc03 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -518,7 +518,7 @@ namespace mongo {
}
}
- void SubplanStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void SubplanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
if (_killed) {
return;
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index c436b848266..c2ae5b78ed1 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -67,7 +67,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index d06f302e43b..f95257308e2 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -127,7 +127,7 @@ namespace mongo {
}
}
- void TextStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void TextStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
// Propagate invalidate to children.
@@ -135,7 +135,7 @@ namespace mongo {
_scanners.mutableVector()[i]->invalidate(txn, dl, type);
}
- // We store the score keyed by DiskLoc. We have to toss out our state when the DiskLoc
+ // We store the score keyed by RecordId. We have to toss out our state when the RecordId
// changes.
// TODO: If we're RETURNING_RESULTS we could somehow buffer the object.
ScoreMap::iterator scoreIt = _scores.find(dl);
@@ -269,7 +269,7 @@ namespace mongo {
}
// Filter for phrases and negative terms, score and truncate.
- DiskLoc loc = _scoreIterator->first;
+ RecordId loc = _scoreIterator->first;
double score = _scoreIterator->second;
_scoreIterator++;
@@ -299,7 +299,7 @@ namespace mongo {
TextMatchableDocument(OperationContext* txn,
const BSONObj& keyPattern,
const BSONObj& key,
- DiskLoc loc,
+ RecordId loc,
const Collection* collection,
bool *fetched)
: _txn(txn),
@@ -348,11 +348,11 @@ namespace mongo {
const Collection* _collection;
BSONObj _keyPattern;
BSONObj _key;
- DiskLoc _loc;
+ RecordId _loc;
bool* _fetched;
};
- void TextStage::addTerm(const BSONObj& key, const DiskLoc& loc) {
+ void TextStage::addTerm(const BSONObj& key, const RecordId& loc) {
double *documentAggregateScore = &_scores[loc];
++_specificStats.keysExamined;
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 5118a3f7541..a4e851bbedb 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -110,7 +110,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -141,7 +141,7 @@ namespace mongo {
* score) pair for this document. Also rejects documents that don't match this stage's
* filter.
*/
- void addTerm(const BSONObj& key, const DiskLoc& loc);
+ void addTerm(const BSONObj& key, const RecordId& loc);
/**
* Possibly return a result. FYI, this may perform a fetch directly if it is needed to
@@ -181,7 +181,7 @@ namespace mongo {
// Temporary score data filled out by sub-scans. Used in READING_TERMS and
// RETURNING_RESULTS.
// Maps from diskloc -> aggregate score for doc.
- typedef unordered_map<DiskLoc, double, DiskLoc::Hasher> ScoreMap;
+ typedef unordered_map<RecordId, double, RecordId::Hasher> ScoreMap;
ScoreMap _scores;
ScoreMap::const_iterator _scoreIterator;
};
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index dcba59dd5a6..c7032a68b34 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -410,7 +410,7 @@ namespace mongo {
params.driver->setContext(ModifierInterface::ExecInfo::UPDATE_CONTEXT);
}
- void UpdateStage::transformAndUpdate(BSONObj& oldObj, DiskLoc& loc) {
+ void UpdateStage::transformAndUpdate(BSONObj& oldObj, RecordId& loc) {
const UpdateRequest* request = _params.request;
UpdateDriver* driver = _params.driver;
CanonicalQuery* cq = _params.canonicalQuery;
@@ -520,13 +520,13 @@ namespace mongo {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
- StatusWith<DiskLoc> res = _collection->updateDocument(_txn,
+ StatusWith<RecordId> res = _collection->updateDocument(_txn,
loc,
newObj,
true,
_params.opDebug);
uassertStatusOK(res.getStatus());
- DiskLoc newLoc = res.getValue();
+ RecordId newLoc = res.getValue();
// If the document moved, we might see it again in a collection scan (maybe it's
// a document after our current document).
@@ -641,7 +641,7 @@ namespace mongo {
WriteUnitOfWork wunit(_txn);
invariant(_collection);
- StatusWith<DiskLoc> newLoc = _collection->insertDocument(_txn,
+ StatusWith<RecordId> newLoc = _collection->insertDocument(_txn,
newObj,
!request->isGod()/*enforceQuota*/);
uassertStatusOK(newLoc.getStatus());
@@ -707,7 +707,7 @@ namespace mongo {
if (PlanStage::ADVANCED == status) {
// Need to get these things from the result returned by the child.
- DiskLoc loc;
+ RecordId loc;
BSONObj oldObj;
WorkingSetMember* member = _ws->get(id);
@@ -726,7 +726,7 @@ namespace mongo {
invariant(member->hasObj());
oldObj = member->obj;
- // If we're here, then we have retrieved both a DiskLoc and the corresponding
+ // If we're here, then we have retrieved both a RecordId and the corresponding
// unowned object from the child stage. Since we have the object and the diskloc,
// we can free the WSM.
_ws->free(id);
@@ -858,7 +858,7 @@ namespace mongo {
uassertStatusOK(restoreUpdateState(opCtx));
}
- void UpdateStage::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void UpdateStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
_child->invalidate(txn, dl, type);
}
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 8fd8fc3df2d..0713c542819 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -87,7 +87,7 @@ namespace mongo {
virtual void saveState();
virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual std::vector<PlanStage*> getChildren() const;
@@ -103,10 +103,10 @@ namespace mongo {
private:
/**
- * Computes the result of applying mods to the document 'oldObj' at DiskLoc 'loc' in
+ * Computes the result of applying mods to the document 'oldObj' at RecordId 'loc' in
* memory, then commits these changes to the database.
*/
- void transformAndUpdate(BSONObj& oldObj, DiskLoc& loc);
+ void transformAndUpdate(BSONObj& oldObj, RecordId& loc);
/**
* Computes the document to insert and inserts it into the collection. Used if the
@@ -161,7 +161,7 @@ namespace mongo {
// document and we wouldn't want to update that.
//
// So, no matter what, we keep track of where the doc wound up.
- typedef unordered_set<DiskLoc, DiskLoc::Hasher> DiskLocSet;
+ typedef unordered_set<RecordId, RecordId::Hasher> DiskLocSet;
const boost::scoped_ptr<DiskLocSet> _updatedLocs;
// These get reused for each update.
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index 2bf14b22d48..bf624ffa917 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -189,7 +189,7 @@ namespace mongo {
size_t memUsage = 0;
if (hasLoc()) {
- memUsage += sizeof(DiskLoc);
+ memUsage += sizeof(RecordId);
}
// XXX: Unowned objects count towards current size.
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index 287198a1cf7..bcdea8f2cf9 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -84,7 +84,7 @@ namespace mongo {
void free(const WorkingSetID& i);
/**
- * The DiskLoc in WSM 'i' was invalidated while being processed. Any predicates over the
+ * The RecordId in WSM 'i' was invalidated while being processed. Any predicates over the
* WSM could not be fully evaluated, so the WSM may or may not satisfy them. As such, if we
* wish to output the WSM, we must do some clean-up work later. Adds the WSM with id 'i' to
* the list of flagged WSIDs.
@@ -217,7 +217,7 @@ namespace mongo {
// Data is from a collection scan, or data is from an index scan and was fetched.
LOC_AND_UNOWNED_OBJ,
- // DiskLoc has been invalidated, or the obj doesn't correspond to an on-disk document
+ // RecordId has been invalidated, or the obj doesn't correspond to an on-disk document
// anymore (e.g. is a computed expression).
OWNED_OBJ,
};
@@ -226,7 +226,7 @@ namespace mongo {
// Core attributes
//
- DiskLoc loc;
+ RecordId loc;
BSONObj obj;
std::vector<IndexKeyDatum> keyData;
MemberState state;
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 843297a92bc..cd382e00298 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -39,14 +39,14 @@ namespace mongo {
// Already in our desired state.
if (member->state == WorkingSetMember::OWNED_OBJ) { return true; }
- // We can't do anything without a DiskLoc.
+ // We can't do anything without a RecordId.
if (!member->hasLoc()) { return false; }
// Do the fetch, invalidate the DL.
member->obj = collection->docFor(txn, member->loc).getOwned();
member->state = WorkingSetMember::OWNED_OBJ;
- member->loc = DiskLoc();
+ member->loc = RecordId();
return true;
}
@@ -63,7 +63,7 @@ namespace mongo {
return;
}
- // We should have a DiskLoc but need to retrieve the obj. Get the obj now and reset all WSM
+ // We should have a RecordId but need to retrieve the obj. Get the obj now and reset all WSM
// state appropriately.
invariant(member->hasLoc());
member->obj = collection->docFor(txn, member->loc);
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 2efb94da036..88f41a0653f 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -36,7 +36,7 @@ namespace mongo {
public:
/**
* Get an owned copy of the BSONObj the WSM refers to.
- * Requires either a valid BSONObj or valid DiskLoc.
+ * Requires either a valid BSONObj or valid RecordId.
* Returns true if the fetch and invalidate succeeded, false otherwise.
*/
static bool fetchAndInvalidateLoc(OperationContext* txn,
diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp
index 776fd9485dd..1d9d9cc400d 100644
--- a/src/mongo/db/index/btree_based_access_method.cpp
+++ b/src/mongo/db/index/btree_based_access_method.cpp
@@ -50,7 +50,7 @@ namespace mongo {
MONGO_EXPORT_SERVER_PARAMETER(failIndexKeyTooLong, bool, true);
void BtreeBasedAccessMethod::InvalidateCursorsNotification::aboutToDeleteBucket(
- const DiskLoc& bucket) {
+ const RecordId& bucket) {
BtreeIndexCursor::aboutToDeleteBucket(bucket);
}
@@ -67,7 +67,7 @@ namespace mongo {
// Find the keys for obj, put them in the tree pointing to loc
Status BtreeBasedAccessMethod::insert(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numInserted) {
*numInserted = 0;
@@ -127,7 +127,7 @@ namespace mongo {
void BtreeBasedAccessMethod::removeOneKey(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
try {
_newInterface->unindex(txn, key, loc, dupsAllowed);
@@ -149,7 +149,7 @@ namespace mongo {
// Remove the provided doc from the index.
Status BtreeBasedAccessMethod::remove(OperationContext* txn,
const BSONObj &obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions &options,
int64_t* numDeleted) {
@@ -195,7 +195,7 @@ namespace mongo {
boost::scoped_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn, 1));
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- cursor->locate(*i, DiskLoc());
+ cursor->locate(*i, RecordId());
}
return Status::OK();
@@ -206,23 +206,23 @@ namespace mongo {
return _newInterface->touch(txn);
}
- DiskLoc BtreeBasedAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
+ RecordId BtreeBasedAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
boost::scoped_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn, 1));
- cursor->locate(key, minDiskLoc);
+ cursor->locate(key, RecordId::min());
// A null bucket means the key wasn't found (nor was anything found after it).
if (cursor->isEOF()) {
- return DiskLoc();
+ return RecordId();
}
// We found something but it could be a key after 'key'. Examine what we're pointing at.
if (0 != key.woCompare(cursor->getKey(), BSONObj(), false)) {
// If the keys don't match, return "not found."
- return DiskLoc();
+ return RecordId();
}
- // Return the DiskLoc found.
- return cursor->getDiskLoc();
+ // Return the RecordId found.
+ return cursor->getRecordId();
}
Status BtreeBasedAccessMethod::validate(OperationContext* txn, bool full, int64_t* numKeys,
@@ -241,7 +241,7 @@ namespace mongo {
Status BtreeBasedAccessMethod::validateUpdate(OperationContext* txn,
const BSONObj &from,
const BSONObj &to,
- const DiskLoc &record,
+ const RecordId &record,
const InsertDeleteOptions &options,
UpdateTicket* status) {
@@ -312,7 +312,7 @@ namespace mongo {
Status BtreeBasedAccessMethod::commitBulk(IndexAccessMethod* bulkRaw,
bool mayInterrupt,
bool dupsAllowed,
- set<DiskLoc>* dupsToDrop) {
+ set<RecordId>* dupsToDrop) {
BtreeBasedBulkAccessMethod* bulk = static_cast<BtreeBasedBulkAccessMethod*>(bulkRaw);
diff --git a/src/mongo/db/index/btree_based_access_method.h b/src/mongo/db/index/btree_based_access_method.h
index 50fb49f0b66..586d3d6523e 100644
--- a/src/mongo/db/index/btree_based_access_method.h
+++ b/src/mongo/db/index/btree_based_access_method.h
@@ -64,20 +64,20 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numInserted);
virtual Status remove(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numDeleted);
virtual Status validateUpdate(OperationContext* txn,
const BSONObj& from,
const BSONObj& to,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
UpdateTicket* ticket);
@@ -96,7 +96,7 @@ namespace mongo {
virtual Status commitBulk( IndexAccessMethod* bulk,
bool mayInterrupt,
bool dupsAllowed,
- std::set<DiskLoc>* dups );
+ std::set<RecordId>* dups );
virtual Status touch(OperationContext* txn, const BSONObj& obj);
@@ -108,7 +108,7 @@ namespace mongo {
virtual long long getSpaceUsedBytes( OperationContext* txn ) const;
// XXX: consider migrating callers to use IndexCursor instead
- virtual DiskLoc findSingle( OperationContext* txn, const BSONObj& key ) const;
+ virtual RecordId findSingle( OperationContext* txn, const BSONObj& key ) const;
/**
* Invalidates all active cursors, which point at the bucket being deleted.
@@ -116,7 +116,7 @@ namespace mongo {
*/
class InvalidateCursorsNotification : public BucketDeletionNotification {
public:
- virtual void aboutToDeleteBucket(const DiskLoc& bucket);
+ virtual void aboutToDeleteBucket(const RecordId& bucket);
};
static InvalidateCursorsNotification invalidateCursors;
@@ -135,7 +135,7 @@ namespace mongo {
private:
void removeOneKey(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
scoped_ptr<SortedDataInterface> _newInterface;
@@ -154,7 +154,7 @@ namespace mongo {
// These point into the sets oldKeys and newKeys.
std::vector<BSONObj*> removed, added;
- DiskLoc loc;
+ RecordId loc;
bool dupsAllowed;
};
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.cpp b/src/mongo/db/index/btree_based_bulk_access_method.cpp
index 1936b83ef65..b50769c351e 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.cpp
+++ b/src/mongo/db/index/btree_based_bulk_access_method.cpp
@@ -56,7 +56,7 @@ namespace mongo {
invariant(version == 1 || version == 0);
}
- typedef std::pair<BSONObj, DiskLoc> Data;
+ typedef std::pair<BSONObj, RecordId> Data;
int operator() (const Data& l, const Data& r) const {
int x = (_version == 1
@@ -91,7 +91,7 @@ namespace mongo {
Status BtreeBasedBulkAccessMethod::insert(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numInserted) {
BSONObjSet keys;
@@ -114,7 +114,7 @@ namespace mongo {
return Status::OK();
}
- Status BtreeBasedBulkAccessMethod::commit(set<DiskLoc>* dupsToDrop,
+ Status BtreeBasedBulkAccessMethod::commit(set<RecordId>* dupsToDrop,
bool mayInterrupt,
bool dupsAllowed) {
Timer timer;
@@ -187,4 +187,4 @@ namespace mongo {
} // namespace mongo
#include "mongo/db/sorter/sorter.cpp"
-MONGO_CREATE_SORTER(mongo::BSONObj, mongo::DiskLoc, mongo::BtreeExternalSortComparison);
+MONGO_CREATE_SORTER(mongo::BSONObj, mongo::RecordId, mongo::BtreeExternalSortComparison);
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.h b/src/mongo/db/index/btree_based_bulk_access_method.h
index eb82ec359af..0c731609403 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.h
+++ b/src/mongo/db/index/btree_based_bulk_access_method.h
@@ -55,11 +55,11 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numInserted);
- Status commit(std::set<DiskLoc>* dupsToDrop, bool mayInterrupt, bool dupsAllowed);
+ Status commit(std::set<RecordId>* dupsToDrop, bool mayInterrupt, bool dupsAllowed);
// Exposed for testing.
static ExternalSortComparison* getComparison(int version, const BSONObj& keyPattern);
@@ -71,7 +71,7 @@ namespace mongo {
virtual Status commitBulk(IndexAccessMethod* bulk,
bool mayInterrupt,
bool dupsAllowed,
- std::set<DiskLoc>* dups) {
+ std::set<RecordId>* dups) {
invariant(this == bulk);
return Status::OK();
}
@@ -90,7 +90,7 @@ namespace mongo {
virtual Status remove(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numDeleted) {
return _notAllowed();
@@ -99,7 +99,7 @@ namespace mongo {
virtual Status validateUpdate(OperationContext* txn,
const BSONObj& from,
const BSONObj& to,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
UpdateTicket* ticket) {
return _notAllowed();
@@ -132,7 +132,7 @@ namespace mongo {
OperationContext* getOperationContext() { return _txn; }
private:
- typedef Sorter<BSONObj, DiskLoc> BSONObjExternalSorter;
+ typedef Sorter<BSONObj, RecordId> BSONObjExternalSorter;
Status _notAllowed() const {
return Status(ErrorCodes::InternalError, "cannot use bulk for this yet");
diff --git a/src/mongo/db/index/btree_index_cursor.cpp b/src/mongo/db/index/btree_index_cursor.cpp
index eb843abe30a..ea305a6e417 100644
--- a/src/mongo/db/index/btree_index_cursor.cpp
+++ b/src/mongo/db/index/btree_index_cursor.cpp
@@ -58,7 +58,7 @@ namespace mongo {
bool BtreeIndexCursor::isEOF() const { return _cursor->isEOF(); }
- void BtreeIndexCursor::aboutToDeleteBucket(const DiskLoc& bucket) {
+ void BtreeIndexCursor::aboutToDeleteBucket(const RecordId& bucket) {
SimpleMutex::scoped_lock lock(_activeCursorsMutex);
for (unordered_set<BtreeIndexCursor*>::iterator i = _activeCursors.begin();
i != _activeCursors.end(); ++i) {
@@ -69,7 +69,7 @@ namespace mongo {
Status BtreeIndexCursor::seek(const BSONObj& position) {
_cursor->locate(position,
- 1 == _cursor->getDirection() ? minDiskLoc : maxDiskLoc);
+ 1 == _cursor->getDirection() ? RecordId::min() : RecordId::max());
return Status::OK();
}
@@ -77,7 +77,7 @@ namespace mongo {
// XXX This used a hard-coded direction of 1 and is only correct in the forward direction.
invariant(_cursor->getDirection() == 1);
_cursor->locate(position,
- afterKey ? maxDiskLoc : minDiskLoc);
+ afterKey ? RecordId::max() : RecordId::min());
}
bool BtreeIndexCursor::pointsAt(const BtreeIndexCursor& other) {
@@ -115,8 +115,8 @@ namespace mongo {
return _cursor->getKey();
}
- DiskLoc BtreeIndexCursor::getValue() const {
- return _cursor->getDiskLoc();
+ RecordId BtreeIndexCursor::getValue() const {
+ return _cursor->getRecordId();
}
void BtreeIndexCursor::next() {
diff --git a/src/mongo/db/index/btree_index_cursor.h b/src/mongo/db/index/btree_index_cursor.h
index d92e0099143..bff946979e3 100644
--- a/src/mongo/db/index/btree_index_cursor.h
+++ b/src/mongo/db/index/btree_index_cursor.h
@@ -48,7 +48,7 @@ namespace mongo {
/**
* Called from btree_logic.cpp when we're about to delete a Btree bucket.
*/
- static void aboutToDeleteBucket(const DiskLoc& bucket);
+ static void aboutToDeleteBucket(const RecordId& bucket);
virtual Status seek(const BSONObj& position);
@@ -71,7 +71,7 @@ namespace mongo {
const std::vector<bool>& keyEndInclusive);
virtual BSONObj getKey() const;
- virtual DiskLoc getValue() const;
+ virtual RecordId getValue() const;
virtual void next();
/**
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index c79fe77c8c9..d282ce32d43 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -97,18 +97,18 @@ namespace mongo {
BSONObj key = bb.obj();
- unordered_set<DiskLoc, DiskLoc::Hasher> thisPass;
+ unordered_set<RecordId, RecordId::Hasher> thisPass;
scoped_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection,
_descriptor, key, key, true));
PlanExecutor::ExecState state;
- DiskLoc loc;
+ RecordId loc;
while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if (hopper.limitReached()) { break; }
- pair<unordered_set<DiskLoc, DiskLoc::Hasher>::iterator, bool> p
+ pair<unordered_set<RecordId, RecordId::Hasher>::iterator, bool> p
= thisPass.insert(loc);
- // If a new element was inserted (haven't seen the DiskLoc before), p.second
+ // If a new element was inserted (haven't seen the RecordId before), p.second
// is true.
if (p.second) {
hopper.consider(loc);
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 792a708977f..14a5bf9f05e 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -43,7 +43,7 @@ namespace mongo {
* @param n The centroid that we're searching
* @param maxDistance The maximum distance to consider from that point
* @param limit The maximum number of results to return
- * @param geoField Which field in the provided DiskLoc has the point to test.
+ * @param geoField Which field in the provided RecordId has the point to test.
*/
GeoHaystackSearchHopper(OperationContext* txn,
const BSONObj& nearObj,
@@ -60,7 +60,7 @@ namespace mongo {
// Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
// it)
- void consider(const DiskLoc& loc) {
+ void consider(const RecordId& loc) {
if (limitReached()) return;
Point p(_collection->docFor(_txn, loc).getFieldDotted(_geoField));
if (distance(_near, p) > _maxDistance)
@@ -86,7 +86,7 @@ namespace mongo {
double _maxDistance;
unsigned _limit;
const std::string _geoField;
- std::vector<DiskLoc> _locs;
+ std::vector<RecordId> _locs;
};
} // namespace mongo
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 59cf48b9f1c..58d889edaf7 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -68,7 +68,7 @@ namespace mongo {
*/
virtual Status insert(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numInserted) = 0;
@@ -78,7 +78,7 @@ namespace mongo {
*/
virtual Status remove(OperationContext* txn,
const BSONObj& obj,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
int64_t* numDeleted) = 0;
@@ -95,7 +95,7 @@ namespace mongo {
virtual Status validateUpdate(OperationContext* txn,
const BSONObj& from,
const BSONObj& to,
- const DiskLoc& loc,
+ const RecordId& loc,
const InsertDeleteOptions& options,
UpdateTicket* ticket) = 0;
@@ -191,12 +191,12 @@ namespace mongo {
* @param mayInterrupt - is this commit interruptable (will cancel)
* @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found
* @param dups - if NULL, error out on dups if not allowed
- * if not NULL, put the bad DiskLocs there
+ * if not NULL, put the bad RecordIds there
*/
virtual Status commitBulk( IndexAccessMethod* bulk,
bool mayInterrupt,
bool dupsAllowed,
- std::set<DiskLoc>* dups ) = 0;
+ std::set<RecordId>* dups ) = 0;
};
/**
diff --git a/src/mongo/db/index/index_cursor.h b/src/mongo/db/index/index_cursor.h
index 31dbb62fc49..38187055a92 100644
--- a/src/mongo/db/index/index_cursor.h
+++ b/src/mongo/db/index/index_cursor.h
@@ -85,7 +85,7 @@ namespace mongo {
virtual BSONObj getKey() const = 0;
// Current value we point at. Assumes !isEOF().
- virtual DiskLoc getValue() const = 0;
+ virtual RecordId getValue() const = 0;
//
// Yielding support
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 57f5cf5fb2b..5d0199aef9a 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -830,7 +830,7 @@ namespace {
BSON("create" << nsToCollectionSubstring(ns)));
}
- StatusWith<DiskLoc> status = collection->insertDocument( txn, js, true );
+ StatusWith<RecordId> status = collection->insertDocument( txn, js, true );
uassertStatusOK( status.getStatus() );
repl::logOp(txn, "i", ns, js);
wunit.commit();
diff --git a/src/mongo/db/invalidation_type.h b/src/mongo/db/invalidation_type.h
index b9aa7dde213..d51df69a67c 100644
--- a/src/mongo/db/invalidation_type.h
+++ b/src/mongo/db/invalidation_type.h
@@ -31,11 +31,11 @@
namespace mongo {
enum InvalidationType {
- // The DiskLoc is about to be deleted. The receiver of this invalidate call cannot use
- // the DiskLoc after it returns from the invalidate.
+ // The RecordId is about to be deleted. The receiver of this invalidate call cannot use
+ // the RecordId after it returns from the invalidate.
INVALIDATION_DELETION,
- // The DiskLoc's contents are about to change.
+ // The RecordId's contents are about to change.
INVALIDATION_MUTATION,
};
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 88c27b8132b..5f992a65b73 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -53,10 +53,10 @@ namespace mongo {
enum IndexScanOptions {
// The client is interested in the default outputs of an index scan: BSONObj of the key,
- // DiskLoc of the record that's indexed. The client does its own fetching if required.
+ // RecordId of the record that's indexed. The client does its own fetching if required.
IXSCAN_DEFAULT = 0,
- // The client wants the fetched object and the DiskLoc that refers to it. Delegating
+ // The client wants the fetched object and the RecordId that refers to it. Delegating
// the fetch to the runner allows fetching outside of a lock.
IXSCAN_FETCH = 1,
};
@@ -68,7 +68,7 @@ namespace mongo {
const StringData& ns,
Collection* collection,
const Direction direction = FORWARD,
- const DiskLoc startLoc = DiskLoc()) {
+ const RecordId startLoc = RecordId()) {
WorkingSet* ws = new WorkingSet();
if (NULL == collection) {
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 3d567f9efe9..7e0642ee75a 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -457,12 +457,12 @@ namespace mongo {
"$gt or $gte over the 'ts' field.");
}
- DiskLoc startLoc = DiskLoc().setInvalid();
+ RecordId startLoc = RecordId().setInvalid();
// See if the RecordStore supports the oplogStartHack
const BSONElement tsElem = extractOplogTsOptime(tsExpr);
if (tsElem.type() == Timestamp) {
- StatusWith<DiskLoc> goal = oploghack::keyForOptime(tsElem._opTime());
+ StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem._opTime());
if (goal.isOK()) {
startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
}
@@ -485,7 +485,7 @@ namespace mongo {
invariant(execStatus.isOK());
scoped_ptr<PlanExecutor> exec(rawExec);
- // The stage returns a DiskLoc of where to start.
+ // The stage returns a RecordId of where to start.
PlanExecutor::ExecState state = exec->getNext(NULL, &startLoc);
// This is normal. The start of the oplog is the beginning of the collection.
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 3a46d47a9ce..2dc8a7db2e2 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -251,11 +251,11 @@ namespace mongo {
return !_killed;
}
- void PlanExecutor::invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
if (!_killed) { _root->invalidate(txn, dl, type); }
}
- PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
+ PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
if (_killed) { return PlanExecutor::DEAD; }
// When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 50a11c0d2f8..aa5c845fe17 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -38,7 +38,7 @@ namespace mongo {
class BSONObj;
class Collection;
- class DiskLoc;
+ class RecordId;
class PlanStage;
class PlanExecutor;
struct PlanStageStats;
@@ -88,7 +88,7 @@ namespace mongo {
// 0. Let's say you have PlanExecutor* exec.
//
// 1. Register your PlanExecutor with ClientCursor. Registered executors are informed
- // about DiskLoc deletions and namespace invalidation, as well as other important
+ // about RecordId deletions and namespace invalidation, as well as other important
// events. Do this by calling registerExec() on the executor. Alternatively, this can
// be done per-yield (as described below).
//
@@ -247,7 +247,7 @@ namespace mongo {
*
* If a YIELD_AUTO policy is set, then this method may yield.
*/
- ExecState getNext(BSONObj* objOut, DiskLoc* dlOut);
+ ExecState getNext(BSONObj* objOut, RecordId* dlOut);
/**
* Returns 'true' if the plan is done producing results (or writing), 'false' otherwise.
@@ -296,7 +296,7 @@ namespace mongo {
* state. As such, if the plan yields, it must be notified of relevant writes so that
* we can ensure that it doesn't crash if we try to access invalid state.
*/
- void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type);
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
/**
* Helper method to aid in displaying an ExecState for debug or other recreational purposes.
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 34a98a46d9c..5f1f698886c 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -429,9 +429,9 @@ namespace mongo {
}
bool IndexScanNode::sortedByDiskLoc() const {
- // Indices use DiskLoc as an additional key after the actual index key.
+ // Indices use RecordId as an additional key after the actual index key.
// Therefore, if we're only examining one index key, the output is sorted
- // by DiskLoc.
+ // by RecordId.
// If it's a simple range query, it's easy to determine if the range is a point.
if (bounds.isSimpleRange) {
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 15921450f9d..81a93d2c26c 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -459,10 +459,10 @@ namespace mongo {
}
bool sortedByDiskLoc() const {
- // Projections destroy the DiskLoc. By returning true here, this kind of implies that a
+ // Projections destroy the RecordId. By returning true here, this kind of implies that a
// fetch could still be done upstream.
//
- // Perhaps this should be false to not imply that there *is* a DiskLoc? Kind of a
+ // Perhaps this should be false to not imply that there *is* a RecordId? Kind of a
// corner case.
return children[0]->sortedByDiskLoc();
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 0f68a52a6a8..aeb85b9fde5 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -97,7 +97,7 @@ namespace repl {
}
// so we can fail the same way
- void checkOplogInsert( StatusWith<DiskLoc> result ) {
+ void checkOplogInsert( StatusWith<RecordId> result ) {
massert( 17322,
str::stream() << "write to oplog failed: " << result.getStatus().toString(),
result.isOK() );
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 7a602df6304..9af312db360 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -134,7 +134,7 @@ namespace {
set<string> collectionsToResync;
OpTime commonPoint;
- DiskLoc commonPointOurDiskloc;
+ RecordId commonPointOurDiskloc;
int rbid; // remote server's current rollback sequence #
};
@@ -256,7 +256,7 @@ namespace {
InternalPlanner::BACKWARD));
BSONObj ourObj;
- DiskLoc ourLoc;
+ RecordId ourLoc;
if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
throw RSFatalException("our oplog empty or unreadable");
@@ -589,12 +589,12 @@ namespace {
// TODO: IIRC cappedTruncateAfter does not handle completely empty.
// this will crazy slow if no _id index.
long long start = Listener::getElapsedTimeMillis();
- DiskLoc loc = Helpers::findOne(txn, collection, pattern, false);
+ RecordId loc = Helpers::findOne(txn, collection, pattern, false);
if (Listener::getElapsedTimeMillis() - start > 200)
log() << "replSet warning roll back slow no _id index for "
<< doc.ns << " perhaps?";
// would be faster but requires index:
- // DiskLoc loc = Helpers::findById(nsd, pattern);
+ // RecordId loc = Helpers::findById(nsd, pattern);
if (!loc.isNull()) {
try {
collection->temp_cappedTruncateAfter(txn, loc, true);
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index ee0790fa78a..bf2294b34d6 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -133,7 +133,7 @@ namespace repl {
Collection* collection = ctx.db()->getOrCreateCollection(txn, ns);
invariant(collection != NULL); // should never happen
- StatusWith<DiskLoc> result = collection->insertDocument(txn, missingObj, true);
+ StatusWith<RecordId> result = collection->insertDocument(txn, missingObj, true);
uassert(15917,
str::stream() << "failed to insert missing doc: " << result.toString(),
result.isOK() );
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index a1048a4d401..2a2d65007e8 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -86,7 +86,7 @@ namespace mongo {
return md.indexes[offset].multikey;
}
- DiskLoc BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
+ RecordId BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
const StringData& indexName ) const {
MetaData md = _getMetaData( txn );
@@ -186,7 +186,7 @@ namespace mongo {
IndexMetaData imd;
imd.spec = idx["spec"].Obj().getOwned();
imd.ready = idx["ready"].trueValue();
- imd.head = DiskLoc( idx["head_a"].Int(),
+ imd.head = RecordId( idx["head_a"].Int(),
idx["head_b"].Int() );
imd.multikey = idx["multikey"].trueValue();
indexes.push_back( imd );
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 5af770ae4d6..188a7a8430a 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -62,7 +62,7 @@ namespace mongo {
virtual bool isIndexMultikey( OperationContext* txn,
const StringData& indexName) const;
- virtual DiskLoc getIndexHead( OperationContext* txn,
+ virtual RecordId getIndexHead( OperationContext* txn,
const StringData& indexName ) const;
virtual bool isIndexReady( OperationContext* txn,
@@ -72,7 +72,7 @@ namespace mongo {
struct IndexMetaData {
IndexMetaData() {}
- IndexMetaData( BSONObj s, bool r, DiskLoc h, bool m )
+ IndexMetaData( BSONObj s, bool r, RecordId h, bool m )
: spec( s ), ready( r ), head( h ), multikey( m ) {}
void updateTTLSetting( long long newExpireSeconds );
@@ -81,7 +81,7 @@ namespace mongo {
BSONObj spec;
bool ready;
- DiskLoc head;
+ RecordId head;
bool multikey;
};
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index c5b1954f4ff..a86c9e9d2dc 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -48,7 +48,7 @@ namespace mongo {
/**
* This will be called right before loc is deleted when wrapping.
*/
- virtual Status aboutToDeleteCapped( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc ) = 0;
};
}
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index d6f1e8fa0af..499e5f64d6c 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -38,12 +38,12 @@ namespace mongo {
class EmptyRecordIterator: public RecordIterator {
public:
virtual bool isEOF() { return true; }
- virtual DiskLoc curr() { return DiskLoc(); }
- virtual DiskLoc getNext() { return DiskLoc(); }
- virtual void invalidate(const DiskLoc& dl) { }
+ virtual RecordId curr() { return RecordId(); }
+ virtual RecordId getNext() { return RecordId(); }
+ virtual void invalidate(const RecordId& dl) { }
virtual void saveState() { }
virtual bool restoreState(OperationContext* txn) { return false; }
- virtual RecordData dataFor( const DiskLoc& loc ) const {
+ virtual RecordData dataFor( const RecordId& loc ) const {
invariant( false );
}
};
@@ -72,42 +72,42 @@ namespace mongo {
return 0;
}
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc) const {
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc) const {
return RecordData( _dummy.objdata(), _dummy.objsize() );
}
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* rd ) const {
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const {
return false;
}
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl ) {}
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) {}
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
_numInserts++;
- return StatusWith<DiskLoc>( DiskLoc( 6, 4 ) );
+ return StatusWith<RecordId>( RecordId( 6, 4 ) );
}
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
_numInserts++;
- return StatusWith<DiskLoc>( DiskLoc( 6, 4 ) );
+ return StatusWith<RecordId>( RecordId( 6, 4 ) );
}
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier ) {
- return StatusWith<DiskLoc>( oldLocation );
+ return StatusWith<RecordId>( oldLocation );
}
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -115,7 +115,7 @@ namespace mongo {
}
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir ) const {
return new EmptyRecordIterator();
}
@@ -133,7 +133,7 @@ namespace mongo {
virtual Status truncate( OperationContext* txn ) { return Status::OK(); }
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) { }
virtual bool compactSupported() const { return false; }
diff --git a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
index 396be5f5375..8a86917287a 100644
--- a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
@@ -74,8 +74,8 @@ namespace {
return Status(ErrorCodes::DuplicateKey, sb.str());
}
- bool isDup(const IndexSet& data, const BSONObj& key, DiskLoc loc) {
- const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, DiskLoc()));
+ bool isDup(const IndexSet& data, const BSONObj& key, RecordId loc) {
+ const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
if (it == data.end())
return false;
@@ -93,8 +93,8 @@ namespace {
invariant(_data->empty());
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
- // inserts should be in ascending (key, DiskLoc) order.
+ Status addKey(const BSONObj& key, const RecordId& loc) {
+ // inserts should be in ascending (key, RecordId) order.
if ( key.objsize() >= TempKeyMaxSize ) {
return Status(ErrorCodes::KeyTooLong, "key too big");
@@ -105,11 +105,11 @@ namespace {
invariant(!hasFieldNames(key));
if (!_data->empty()) {
- // Compare specified key with last inserted key, ignoring its DiskLoc
- int cmp = _comparator.compare(IndexKeyEntry(key, DiskLoc()), *_last);
+ // Compare specified key with last inserted key, ignoring its RecordId
+ int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last);
if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
return Status(ErrorCodes::InternalError,
- "expected ascending (key, DiskLoc) order in bulk builder");
+ "expected ascending (key, RecordId) order in bulk builder");
}
else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
return dupKeyError(key);
@@ -129,7 +129,7 @@ namespace {
const bool _dupsAllowed;
IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
- IndexSet::const_iterator _last; // or (key, DiskLoc) ordering violations
+ IndexSet::const_iterator _last; // or (key, RecordId) ordering violations
};
class InMemoryBtreeImpl : public SortedDataInterface {
@@ -146,7 +146,7 @@ namespace {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
@@ -174,7 +174,7 @@ namespace {
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -199,7 +199,7 @@ namespace {
return _currentKeySize + ( sizeof(IndexKeyEntry) * _data->size() );
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
invariant(!hasFieldNames(key));
if (isDup(*_data, key, loc))
return dupKeyError(key);
@@ -235,11 +235,11 @@ namespace {
return _it == other._it;
}
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) {
+ virtual void aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
+ virtual bool locate(const BSONObj& keyRaw, const RecordId& loc) {
const BSONObj key = stripFieldNames(keyRaw);
_it = _data.lower_bound(IndexKeyEntry(key, loc)); // lower_bound is >= key
if ( _it == _data.end() ) {
@@ -266,7 +266,7 @@ namespace {
keyEnd,
keyEndInclusive,
1), // forward
- DiskLoc()));
+ RecordId()));
}
void advanceTo(const BSONObj &keyBegin,
@@ -282,7 +282,7 @@ namespace {
return _it->key;
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _it->loc;
}
@@ -320,7 +320,7 @@ namespace {
// For save/restorePosition since _it may be invalidated durring a yield.
bool _savedAtEnd;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
@@ -345,11 +345,11 @@ namespace {
return _it == other._it;
}
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) {
+ virtual void aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- virtual bool locate(const BSONObj& keyRaw, const DiskLoc& loc) {
+ virtual bool locate(const BSONObj& keyRaw, const RecordId& loc) {
const BSONObj key = stripFieldNames(keyRaw);
_it = lower_bound(IndexKeyEntry(key, loc)); // lower_bound is <= query
@@ -378,7 +378,7 @@ namespace {
keyEnd,
keyEndInclusive,
-1), // reverse
- DiskLoc()));
+ RecordId()));
}
void advanceTo(const BSONObj &keyBegin,
@@ -394,7 +394,7 @@ namespace {
return _it->key;
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _it->loc;
}
@@ -446,7 +446,7 @@ namespace {
// For save/restorePosition since _it may be invalidated durring a yield.
bool _savedAtEnd;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
virtual SortedDataInterface::Cursor* newCursor(OperationContext* txn, int direction) const {
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
index 6afa48c5fa5..fbca40124fb 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
@@ -43,7 +43,7 @@
namespace mongo {
class InMemoryRecordStore::InsertChange : public RecoveryUnit::Change {
public:
- InsertChange(Data* data, DiskLoc loc) :_data(data), _loc(loc) {}
+ InsertChange(Data* data, RecordId loc) :_data(data), _loc(loc) {}
virtual void commit() {}
virtual void rollback() {
Records::iterator it = _data->records.find(_loc);
@@ -55,13 +55,13 @@ namespace mongo {
private:
Data* const _data;
- const DiskLoc _loc;
+ const RecordId _loc;
};
// Works for both removes and updates
class InMemoryRecordStore::RemoveChange : public RecoveryUnit::Change {
public:
- RemoveChange(Data* data, DiskLoc loc, const InMemoryRecord& rec)
+ RemoveChange(Data* data, RecordId loc, const InMemoryRecord& rec)
:_data(data), _loc(loc), _rec(rec)
{}
@@ -78,7 +78,7 @@ namespace mongo {
private:
Data* const _data;
- const DiskLoc _loc;
+ const RecordId _loc;
const InMemoryRecord _rec;
};
@@ -136,12 +136,12 @@ namespace mongo {
const char* InMemoryRecordStore::name() const { return "InMemory"; }
- RecordData InMemoryRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc ) const {
+ RecordData InMemoryRecordStore::dataFor( OperationContext* txn, const RecordId& loc ) const {
return recordFor(loc)->toRecordData();
}
const InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(
- const DiskLoc& loc) const {
+ const RecordId& loc) const {
Records::const_iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
@@ -151,7 +151,7 @@ namespace mongo {
return &it->second;
}
- InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const DiskLoc& loc) {
+ InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const RecordId& loc) {
Records::iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
@@ -162,7 +162,7 @@ namespace mongo {
}
bool InMemoryRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* rd ) const {
+ const RecordId& loc, RecordData* rd ) const {
Records::const_iterator it = _data->records.find(loc);
if ( it == _data->records.end() ) {
return false;
@@ -171,7 +171,7 @@ namespace mongo {
return true;
}
- void InMemoryRecordStore::deleteRecord(OperationContext* txn, const DiskLoc& loc) {
+ void InMemoryRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
InMemoryRecord* rec = recordFor(loc);
txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
_data->dataSize -= rec->size;
@@ -195,7 +195,7 @@ namespace mongo {
while (cappedAndNeedDelete(txn)) {
invariant(!_data->records.empty());
- DiskLoc oldest = _data->records.begin()->first;
+ RecordId oldest = _data->records.begin()->first;
if (_cappedDeleteCallback)
uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest));
@@ -204,35 +204,35 @@ namespace mongo {
}
}
- StatusWith<DiskLoc> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
+ StatusWith<RecordId> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
int len) const {
- StatusWith<DiskLoc> status = oploghack::extractKey(data, len);
+ StatusWith<RecordId> status = oploghack::extractKey(data, len);
if (!status.isOK())
return status;
if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts not higher than highest");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts not higher than highest");
return status;
}
- StatusWith<DiskLoc> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
const char* data,
int len,
bool enforceQuota) {
if (_isCapped && len > _cappedMaxSize) {
// We use dataSize for capped rollover and we don't want to delete everything if we know
// this won't fit.
- return StatusWith<DiskLoc>(ErrorCodes::BadValue,
+ return StatusWith<RecordId>(ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize");
}
InMemoryRecord rec(len);
memcpy(rec.data.get(), data, len);
- DiskLoc loc;
+ RecordId loc;
if (_data->isOplog) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(data, len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -247,26 +247,26 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
- StatusWith<DiskLoc> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
const DocWriter* doc,
bool enforceQuota) {
const int len = doc->documentSize();
if (_isCapped && len > _cappedMaxSize) {
// We use dataSize for capped rollover and we don't want to delete everything if we know
// this won't fit.
- return StatusWith<DiskLoc>(ErrorCodes::BadValue,
+ return StatusWith<RecordId>(ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize");
}
InMemoryRecord rec(len);
doc->writeDocument(rec.data.get());
- DiskLoc loc;
+ RecordId loc;
if (_data->isOplog) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(rec.data.get(), len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(rec.data.get(), len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -281,11 +281,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
- StatusWith<DiskLoc> InMemoryRecordStore::updateRecord(OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> InMemoryRecordStore::updateRecord(OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -294,7 +294,7 @@ namespace mongo {
int oldLen = oldRecord->size;
if (_isCapped && len > oldLen) {
- return StatusWith<DiskLoc>( ErrorCodes::InternalError,
+ return StatusWith<RecordId>( ErrorCodes::InternalError,
"failing update: objects in a capped ns cannot grow",
10003 );
}
@@ -308,11 +308,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>(loc);
+ return StatusWith<RecordId>(loc);
}
Status InMemoryRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -343,7 +343,7 @@ namespace mongo {
RecordIterator* InMemoryRecordStore::getIterator(
OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir) const {
if (dir == CollectionScanParams::FORWARD) {
@@ -375,7 +375,7 @@ namespace mongo {
}
void InMemoryRecordStore::temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) {
Records::iterator it = inclusive ? _data->records.lower_bound(end)
: _data->records.upper_bound(end);
@@ -470,23 +470,23 @@ namespace mongo {
return _data->dataSize + recordOverhead;
}
- DiskLoc InMemoryRecordStore::allocateLoc() {
+ RecordId InMemoryRecordStore::allocateLoc() {
const int64_t id = _data->nextId++;
- // This is a hack, but both the high and low order bits of DiskLoc offset must be 0, and the
+ // This is a hack, but both the high and low order bits of RecordId offset must be 0, and the
// file must fit in 23 bits. This gives us a total of 30 + 23 == 53 bits.
invariant(id < (1LL << 53));
- return DiskLoc(int(id >> 30), int((id << 1) & ~(1<<31)));
+ return RecordId(int(id >> 30), int((id << 1) & ~(1<<31)));
}
- DiskLoc InMemoryRecordStore::oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
+ RecordId InMemoryRecordStore::oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
if (!_data->isOplog)
- return DiskLoc().setInvalid();
+ return RecordId().setInvalid();
const Records& records = _data->records;
if (records.empty())
- return DiskLoc();
+ return RecordId();
Records::const_iterator it = records.lower_bound(startingPosition);
if (it == records.end() || it->first > startingPosition)
@@ -502,11 +502,11 @@ namespace mongo {
InMemoryRecordIterator::InMemoryRecordIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start,
+ RecordId start,
bool tailable)
: _txn(txn),
_tailable(tailable),
- _lastLoc(minDiskLoc),
+ _lastLoc(RecordId::min()),
_killedByInvalidate(false),
_records(records),
_rs(rs) {
@@ -523,19 +523,19 @@ namespace mongo {
return _it == _records.end();
}
- DiskLoc InMemoryRecordIterator::curr() {
+ RecordId InMemoryRecordIterator::curr() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
return _it->first;
}
- DiskLoc InMemoryRecordIterator::getNext() {
+ RecordId InMemoryRecordIterator::getNext() {
if (isEOF()) {
if (!_tailable)
- return DiskLoc();
+ return RecordId();
if (_records.empty())
- return DiskLoc();
+ return RecordId();
invariant(!_killedByInvalidate);
@@ -545,17 +545,17 @@ namespace mongo {
invariant(_it != _records.end());
if (++_it == _records.end())
- return DiskLoc();
+ return RecordId();
}
- const DiskLoc out = _it->first;
+ const RecordId out = _it->first;
++_it;
if (_tailable && _it == _records.end())
_lastLoc = out;
return out;
}
- void InMemoryRecordIterator::invalidate(const DiskLoc& loc) {
+ void InMemoryRecordIterator::invalidate(const RecordId& loc) {
if (_rs.isCapped()) {
// Capped iterators die on invalidation rather than advancing.
if (isEOF()) {
@@ -582,7 +582,7 @@ namespace mongo {
return !_killedByInvalidate;
}
- RecordData InMemoryRecordIterator::dataFor(const DiskLoc& loc) const {
+ RecordData InMemoryRecordIterator::dataFor(const RecordId& loc) const {
return _rs.dataFor(_txn, loc);
}
@@ -594,7 +594,7 @@ namespace mongo {
OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start) : _txn(txn),
+ RecordId start) : _txn(txn),
_killedByInvalidate(false),
_records(records),
_rs(rs) {
@@ -615,22 +615,22 @@ namespace mongo {
return _it == _records.rend();
}
- DiskLoc InMemoryRecordReverseIterator::curr() {
+ RecordId InMemoryRecordReverseIterator::curr() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
return _it->first;
}
- DiskLoc InMemoryRecordReverseIterator::getNext() {
+ RecordId InMemoryRecordReverseIterator::getNext() {
if (isEOF())
- return DiskLoc();
+ return RecordId();
- const DiskLoc out = _it->first;
+ const RecordId out = _it->first;
++_it;
return out;
}
- void InMemoryRecordReverseIterator::invalidate(const DiskLoc& loc) {
+ void InMemoryRecordReverseIterator::invalidate(const RecordId& loc) {
if (_killedByInvalidate)
return;
@@ -650,7 +650,7 @@ namespace mongo {
void InMemoryRecordReverseIterator::saveState() {
if (isEOF()) {
- _savedLoc = DiskLoc();
+ _savedLoc = RecordId();
}
else {
_savedLoc = _it->first;
@@ -667,7 +667,7 @@ namespace mongo {
return !_killedByInvalidate;
}
- RecordData InMemoryRecordReverseIterator::dataFor(const DiskLoc& loc) const {
+ RecordData InMemoryRecordReverseIterator::dataFor(const RecordId& loc) const {
return _rs.dataFor(_txn, loc);
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.h b/src/mongo/db/storage/in_memory/in_memory_record_store.h
index 75862e22c7c..c5e69f2f72d 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.h
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.h
@@ -57,36 +57,36 @@ namespace mongo {
virtual const char* name() const;
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* rd ) const;
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir) const;
virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const;
@@ -95,7 +95,7 @@ namespace mongo {
virtual Status truncate( OperationContext* txn );
- virtual void temp_cappedTruncateAfter( OperationContext* txn, DiskLoc end, bool inclusive );
+ virtual void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
virtual bool compactSupported() const;
virtual Status compact( OperationContext* txn,
@@ -131,8 +131,8 @@ namespace mongo {
return _data->records.size();
}
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const;
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
protected:
struct InMemoryRecord {
@@ -145,15 +145,15 @@ namespace mongo {
boost::shared_array<char> data;
};
- virtual const InMemoryRecord* recordFor( const DiskLoc& loc ) const;
- virtual InMemoryRecord* recordFor( const DiskLoc& loc );
+ virtual const InMemoryRecord* recordFor( const RecordId& loc ) const;
+ virtual InMemoryRecord* recordFor( const RecordId& loc );
public:
//
// Not in RecordStore interface
//
- typedef std::map<DiskLoc, InMemoryRecord> Records;
+ typedef std::map<RecordId, InMemoryRecord> Records;
bool isCapped() const { return _isCapped; }
void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
@@ -167,9 +167,9 @@ namespace mongo {
class RemoveChange;
class TruncateChange;
- StatusWith<DiskLoc> extractAndCheckLocForOplog(const char* data, int len) const;
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
- DiskLoc allocateLoc();
+ RecordId allocateLoc();
bool cappedAndNeedDelete(OperationContext* txn) const;
void cappedDeleteAsNeeded(OperationContext* txn);
@@ -197,28 +197,28 @@ namespace mongo {
InMemoryRecordIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start = DiskLoc(),
+ RecordId start = RecordId(),
bool tailable = false);
virtual bool isEOF();
- virtual DiskLoc curr();
+ virtual RecordId curr();
- virtual DiskLoc getNext();
+ virtual RecordId getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
OperationContext* _txn; // not owned
InMemoryRecordStore::Records::const_iterator _it;
bool _tailable;
- DiskLoc _lastLoc; // only for restarting tailable
+ RecordId _lastLoc; // only for restarting tailable
bool _killedByInvalidate;
const InMemoryRecordStore::Records& _records;
@@ -230,27 +230,27 @@ namespace mongo {
InMemoryRecordReverseIterator(OperationContext* txn,
const InMemoryRecordStore::Records& records,
const InMemoryRecordStore& rs,
- DiskLoc start = DiskLoc());
+ RecordId start = RecordId());
virtual bool isEOF();
- virtual DiskLoc curr();
+ virtual RecordId curr();
- virtual DiskLoc getNext();
+ virtual RecordId getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
OperationContext* _txn; // not owned
InMemoryRecordStore::Records::const_reverse_iterator _it;
bool _killedByInvalidate;
- DiskLoc _savedLoc; // isNull if saved at EOF
+ RecordId _savedLoc; // isNull if saved at EOF
const InMemoryRecordStore::Records& _records;
const InMemoryRecordStore& _rs;
diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h
index 5765ad6d6bd..ea2eb005e9b 100644
--- a/src/mongo/db/storage/index_entry_comparison.h
+++ b/src/mongo/db/storage/index_entry_comparison.h
@@ -42,10 +42,10 @@ namespace mongo {
* and a disk location.
*/
struct IndexKeyEntry {
- IndexKeyEntry(const BSONObj& key, DiskLoc loc) :key(key), loc(loc) {}
+ IndexKeyEntry(const BSONObj& key, RecordId loc) :key(key), loc(loc) {}
BSONObj key;
- DiskLoc loc;
+ RecordId loc;
};
/**
@@ -66,7 +66,7 @@ namespace mongo {
* otherwise.
*
* IndexKeyEntries are compared lexicographically field by field in the BSONObj, followed by
- * the DiskLoc. Either lhs or rhs (but not both) can be a query object returned by
+ * the RecordId. Either lhs or rhs (but not both) can be a query object returned by
* makeQueryObject(). See makeQueryObject() for a description of how its arguments affect
* the outcome of the comparison.
*/
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 2724b57a8e6..21cfaca98fd 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -118,7 +118,7 @@ namespace {
// No locking needed since called single threaded.
scoped_ptr<RecordIterator> it( _rs->getIterator( opCtx ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
BSONObj obj( data.data() );
@@ -177,7 +177,7 @@ namespace {
obj = b.obj();
}
- StatusWith<DiskLoc> res = _rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), false );
+ StatusWith<RecordId> res = _rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), false );
if ( !res.isOK() )
return res.getStatus();
@@ -203,7 +203,7 @@ namespace {
BSONObj KVCatalog::_findEntry( OperationContext* opCtx,
const StringData& ns,
- DiskLoc* out ) const {
+ RecordId* out ) const {
boost::scoped_ptr<Lock::ResourceLock> rLk;
if (!_isRsThreadSafe && opCtx->lockState()) {
@@ -212,7 +212,7 @@ namespace {
MODE_S));
}
- DiskLoc dl;
+ RecordId dl;
{
boost::mutex::scoped_lock lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
@@ -256,7 +256,7 @@ namespace {
MODE_X));
}
- DiskLoc loc;
+ RecordId loc;
BSONObj obj = _findEntry( opCtx, ns, &loc );
{
@@ -287,7 +287,7 @@ namespace {
obj = b.obj();
}
- StatusWith<DiskLoc> status = _rs->updateRecord( opCtx,
+ StatusWith<RecordId> status = _rs->updateRecord( opCtx,
loc,
obj.objdata(),
obj.objsize(),
@@ -309,7 +309,7 @@ namespace {
MODE_X));
}
- DiskLoc loc;
+ RecordId loc;
BSONObj old = _findEntry( opCtx, fromNS, &loc ).getOwned();
{
BSONObjBuilder b;
@@ -326,7 +326,7 @@ namespace {
b.appendElementsUnique( old );
BSONObj obj = b.obj();
- StatusWith<DiskLoc> status = _rs->updateRecord( opCtx,
+ StatusWith<RecordId> status = _rs->updateRecord( opCtx,
loc,
obj.objdata(),
obj.objsize(),
@@ -396,7 +396,7 @@ namespace {
scoped_ptr<RecordIterator> it( _rs->getIterator( opCtx ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
BSONObj obj( data.data() );
v.push_back( obj["ident"].String() );
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index 6370568515e..5c6f1f52e1a 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -95,7 +95,7 @@ namespace mongo {
BSONObj _findEntry( OperationContext* opCtx,
const StringData& ns,
- DiskLoc* out=NULL ) const;
+ RecordId* out=NULL ) const;
std::string _newUniqueIdent(const char* kind);
@@ -112,10 +112,10 @@ namespace mongo {
struct Entry {
Entry(){}
- Entry( std::string i, DiskLoc l )
+ Entry( std::string i, RecordId l )
: ident(i), storedLoc( l ) {}
std::string ident;
- DiskLoc storedLoc;
+ RecordId storedLoc;
};
typedef std::map<std::string,Entry> NSToIdentMap;
NSToIdentMap _idents;
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 1552598e70e..1aeed1a1277 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -108,7 +108,7 @@ namespace mongo {
void KVCollectionCatalogEntry::setIndexHead( OperationContext* txn,
const StringData& indexName,
- const DiskLoc& newHead ) {
+ const RecordId& newHead ) {
MetaData md = _getMetaData( txn );
int offset = md.findIndexOffset( indexName );
invariant( offset >= 0 );
@@ -132,7 +132,7 @@ namespace mongo {
Status KVCollectionCatalogEntry::prepareForIndexBuild( OperationContext* txn,
const IndexDescriptor* spec ) {
MetaData md = _getMetaData( txn );
- md.indexes.push_back( IndexMetaData( spec->infoObj(), false, DiskLoc(), false ) );
+ md.indexes.push_back( IndexMetaData( spec->infoObj(), false, RecordId(), false ) );
_catalog->putMetaData( txn, ns().toString(), md );
string ident = _catalog->getIndexIdent( txn, ns().ns(), spec->indexName() );
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
index 678b6722ead..646b0c07fa3 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
@@ -57,7 +57,7 @@ namespace mongo {
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
- const DiskLoc& newHead );
+ const RecordId& newHead );
virtual Status removeIndex( OperationContext* txn,
const StringData& indexName );
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 86ed0fd7886..6ef9cbe1b3d 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -64,11 +64,11 @@ namespace mongo {
}
- DiskLoc loc;
+ RecordId loc;
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false );
+ StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -96,7 +96,7 @@ namespace mongo {
string ns = "a.b";
// 'loc' holds location of "abc" and is referenced after restarting engine.
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<RecordStore> rs;
{
@@ -109,7 +109,7 @@ namespace mongo {
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false );
+ StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -151,7 +151,7 @@ namespace mongo {
{
MyOperationContext opCtx( engine );
WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), DiskLoc( 6, 4 ), true ) );
+ ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), RecordId( 6, 4 ), true ) );
uow.commit();
}
@@ -237,7 +237,7 @@ namespace mongo {
md.ns ="a.b";
md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
false,
- DiskLoc(),
+ RecordId(),
false ) );
catalog->putMetaData( &opCtx, "a.b", md );
uow.commit();
@@ -263,7 +263,7 @@ namespace mongo {
catalog->putMetaData( &opCtx, "a.b", md ); // remove index
md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
false,
- DiskLoc(),
+ RecordId(),
false ) );
catalog->putMetaData( &opCtx, "a.b", md );
uow.commit();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index 35bb2080e31..c7644db33ab 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -188,7 +188,7 @@ namespace mongo {
return _btree->getKey(_txn, _bucket, _ofs);
}
- virtual DiskLoc getDiskLoc() const {
+ virtual RecordId getRecordId() const {
return _btree->getDiskLoc(_txn, _bucket, _ofs);
}
@@ -201,7 +201,7 @@ namespace mongo {
virtual void savePosition() {
if (!_bucket.isNull()) {
_savedKey = getKey().getOwned();
- _savedLoc = getDiskLoc();
+ _savedLoc = getRecordId();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index 290d6a16123..c05b2453484 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -1089,7 +1089,7 @@ namespace mongo {
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
+ DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
while (!bucket.isNull()) {
FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
@@ -1121,7 +1121,7 @@ namespace mongo {
int position;
bool found;
- DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
+ DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
while (!posLoc.isNull()) {
FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
index 717e5bcb37b..15997d5681c 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
@@ -40,8 +40,8 @@ namespace mongo {
void DiskLoc56Bit::operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
int la = loc.a();
- if (la == maxDiskLoc.a()) {
- invariant(ofs == maxDiskLoc.getOfs());
+ if (la == DiskLoc::max().a()) {
+ invariant(ofs == DiskLoc::max().getOfs());
la = OurMaxA;
}
invariant( la <= OurMaxA ); // must fit in 3 bytes
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index 020bd7a58f1..971af81eb92 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -216,9 +216,9 @@ namespace mongo {
}
void DataFileHeader::checkUpgrade(OperationContext* txn) {
- if ( freeListStart == minDiskLoc ) {
+ if ( freeListStart == DiskLoc(0, 0) ) {
// we are upgrading from 2.4 to 2.6
- invariant( freeListEnd == minDiskLoc ); // both start and end should be (0,0) or real
+ invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
WriteUnitOfWork wunit(txn);
*txn->recoveryUnit()->writing( &freeListStart ) = DiskLoc();
*txn->recoveryUnit()->writing( &freeListEnd ) = DiskLoc();
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 7ac89937842..07c69cc9b25 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -44,12 +44,16 @@ namespace mongo {
template< class Version > class BtreeBucket;
+ // TODO make DiskLoc and RecordId different types
+ class RecordId;
+ typedef RecordId DiskLoc;
+
#pragma pack(1)
/** represents a disk location/offset on disk in a database. 64 bits.
it is assumed these will be passed around by value a lot so don't do anything to make them large
(such as adding a virtual function)
*/
- class DiskLoc {
+ class RecordId {
int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
int ofs;
@@ -65,8 +69,17 @@ namespace mongo {
MaxFiles=16000
};
- DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) { }
- DiskLoc() { Null(); }
+ RecordId(int a, int Ofs) : _a(a), ofs(Ofs) { }
+ RecordId() { Null(); }
+
+ // Minimum allowed DiskLoc. No Record may begin at this location because file and extent
+ // headers must precede Records in a file.
+ static DiskLoc min() { return DiskLoc(0, 0); }
+
+ // Maximum allowed DiskLoc.
+ // No Record may begin at this location because the minimum size of a Record is larger than
+ // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ static DiskLoc max() { return DiskLoc(0x7fffffff, 0x7ffffffe); }
bool questionable() const {
return ofs < -1 ||
@@ -164,13 +177,4 @@ namespace mongo {
return stream << loc.toString();
}
- // Minimum allowed DiskLoc. No Record may begin at this location because file and extent
- // headers must precede Records in a file.
- const DiskLoc minDiskLoc(0, 0);
-
- // Maximum allowed DiskLoc.
- // No Record may begin at this location because the minimum size of a Record is larger than one
- // byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
- const DiskLoc maxDiskLoc(0x7fffffff, 0x7ffffffe);
-
} // namespace mongo
diff --git a/src/mongo/db/storage/oplog_hack.cpp b/src/mongo/db/storage/oplog_hack.cpp
index f2361fecc6f..f3f6a2e4937 100644
--- a/src/mongo/db/storage/oplog_hack.cpp
+++ b/src/mongo/db/storage/oplog_hack.cpp
@@ -41,37 +41,37 @@
namespace mongo {
namespace oploghack {
- StatusWith<DiskLoc> keyForOptime(const OpTime& opTime) {
+ StatusWith<RecordId> keyForOptime(const OpTime& opTime) {
// Make sure secs and inc wouldn't be negative if treated as signed. This ensures that they
- // don't sort differently when put in a DiskLoc. It also avoids issues with Null/Invalid
- // DiskLocs
+ // don't sort differently when put in a RecordId. It also avoids issues with Null/Invalid
+ // RecordIds
if (opTime.getSecs() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts secs too high");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts secs too high");
if (opTime.getInc() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts inc too high");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts inc too high");
- const DiskLoc out = DiskLoc(opTime.getSecs(), opTime.getInc());
- if (out <= minDiskLoc)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts too low");
- if (out >= maxDiskLoc)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts too high");
+ const RecordId out = RecordId(opTime.getSecs(), opTime.getInc());
+ if (out <= RecordId::min())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too low");
+ if (out >= RecordId::max())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too high");
- return StatusWith<DiskLoc>(out);
+ return StatusWith<RecordId>(out);
}
/**
* data and len must be the arguments from RecordStore::insert() on an oplog collection.
*/
- StatusWith<DiskLoc> extractKey(const char* data, int len) {
+ StatusWith<RecordId> extractKey(const char* data, int len) {
DEV invariant(validateBSON(data, len).isOK());
const BSONObj obj(data);
const BSONElement elem = obj["ts"];
if (elem.eoo())
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "no ts field");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "no ts field");
if (elem.type() != Timestamp)
- return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts must be a Timestamp");
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts must be a Timestamp");
return keyForOptime(elem._opTime());
}
diff --git a/src/mongo/db/storage/oplog_hack.h b/src/mongo/db/storage/oplog_hack.h
index 8c89b3c1287..20708c1db4c 100644
--- a/src/mongo/db/storage/oplog_hack.h
+++ b/src/mongo/db/storage/oplog_hack.h
@@ -32,21 +32,21 @@
#include "mongo/base/status_with.h"
namespace mongo {
- class DiskLoc;
+ class RecordId;
class OpTime;
namespace oploghack {
/**
- * Converts OpTime to a DiskLoc in an unspecified manor that is safe to use as the key to in a
+ * Converts OpTime to a RecordId in an unspecified manor that is safe to use as the key to in a
* RecordStore.
*/
- StatusWith<DiskLoc> keyForOptime(const OpTime& opTime);
+ StatusWith<RecordId> keyForOptime(const OpTime& opTime);
/**
* data and len must be the arguments from RecordStore::insert() on an oplog collection.
*/
- StatusWith<DiskLoc> extractKey(const char* data, int len);
+ StatusWith<RecordId> extractKey(const char* data, int len);
} // namespace oploghack
} // namespace mongo
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 9540067f83c..453f0fdc3ee 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -73,7 +73,7 @@ namespace mongo {
public:
virtual ~UpdateMoveNotifier(){}
virtual Status recordStoreGoingToMove( OperationContext* txn,
- const DiskLoc& oldLocation,
+ const RecordId& oldLocation,
const char* oldBuffer,
size_t oldSize ) = 0;
};
@@ -89,17 +89,17 @@ namespace mongo {
// True if getNext will produce no more data, false otherwise.
virtual bool isEOF() = 0;
- // Return the DiskLoc that the iterator points at. Returns DiskLoc() if isEOF.
- virtual DiskLoc curr() = 0;
+ // Return the RecordId that the iterator points at. Returns RecordId() if isEOF.
+ virtual RecordId curr() = 0;
- // Return the DiskLoc that the iterator points at and move the iterator to the next item
- // from the collection. Returns DiskLoc() if isEOF.
- virtual DiskLoc getNext() = 0;
+ // Return the RecordId that the iterator points at and move the iterator to the next item
+ // from the collection. Returns RecordId() if isEOF.
+ virtual RecordId getNext() = 0;
// Can only be called after saveState and before restoreState.
- virtual void invalidate(const DiskLoc& dl) = 0;
+ virtual void invalidate(const RecordId& dl) = 0;
- // Save any state required to resume operation (without crashing) after DiskLoc deletion or
+ // Save any state required to resume operation (without crashing) after RecordId deletion or
// a collection drop.
virtual void saveState() = 0;
@@ -110,7 +110,7 @@ namespace mongo {
// normally this will just go back to the RecordStore and convert
// but this gives the iterator an oppurtnity to optimize
- virtual RecordData dataFor( const DiskLoc& loc ) const = 0;
+ virtual RecordData dataFor( const RecordId& loc ) const = 0;
};
@@ -146,24 +146,24 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc) const = 0;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc) const = 0;
/**
* @param out - If the record exists, the contents of this are set.
* @return true iff there is a Record for loc
*/
virtual bool findRecord( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
RecordData* out ) const = 0;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl ) = 0;
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) = 0;
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) = 0;
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) = 0;
@@ -171,17 +171,17 @@ namespace mongo {
* @param notifier - this is called if the document is moved
* it is to be called after the document has been written to new
* location, before deleted from old.
- * @return Status or DiskLoc, DiskLoc might be different
+ * @return Status or RecordId, RecordId might be different
*/
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier ) = 0;
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) = 0;
@@ -202,14 +202,14 @@ namespace mongo {
* Storage engines which support document-level locking need not implement this.
*/
virtual RecordFetcher* recordNeedsFetch( OperationContext* txn,
- const DiskLoc& loc ) const { return NULL; }
+ const RecordId& loc ) const { return NULL; }
/**
* returned iterator owned by caller
* Default arguments return all items in record store.
*/
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD
) const = 0;
@@ -245,7 +245,7 @@ namespace mongo {
* XXX: this will go away soon, just needed to move for now
*/
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive) = 0;
// does this RecordStore support the compact operation
@@ -299,15 +299,15 @@ namespace mongo {
BSONObjBuilder* info = NULL ) = 0;
/**
- * Return the DiskLoc of an oplog entry as close to startingPosition as possible without
- * being higher. If there are no entries <= startingPosition, return DiskLoc().
+ * Return the RecordId of an oplog entry as close to startingPosition as possible without
+ * being higher. If there are no entries <= startingPosition, return RecordId().
*
* If you don't implement the oplogStartHack, just use the default implementation which
- * returns an Invalid DiskLoc.
+ * returns an Invalid RecordId.
*/
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
- return DiskLoc().setInvalid();
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
+ return RecordId().setInvalid();
}
/**
@@ -330,7 +330,7 @@ namespace mongo {
virtual ~RecordStoreCompactAdaptor(){}
virtual bool isDataValid( const RecordData& recData ) = 0;
virtual size_t dataSize( const RecordData& recData ) = 0;
- virtual void inserted( const RecordData& recData, const DiskLoc& newLocation ) = 0;
+ virtual void inserted( const RecordData& recData, const RecordId& newLocation ) = 0;
};
struct ValidateResults {
diff --git a/src/mongo/db/storage/record_store_test_datafor.cpp b/src/mongo/db/storage/record_store_test_datafor.cpp
index 82f445fd2ab..13acd9270ee 100644
--- a/src/mongo/db/storage/record_store_test_datafor.cpp
+++ b/src/mongo/db/storage/record_store_test_datafor.cpp
@@ -41,7 +41,7 @@ using std::stringstream;
namespace mongo {
// Insert a record and verify its contents by calling dataFor()
- // on the returned DiskLoc.
+ // on the returned RecordId.
TEST( RecordStoreTestHarness, DataFor ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "record-";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -83,7 +83,7 @@ namespace mongo {
}
// Insert multiple records and verify their contents by calling dataFor()
- // on each of the returned DiskLocs.
+ // on each of the returned RecordIds.
TEST( RecordStoreTestHarness, DataForMultiple ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -94,7 +94,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -103,7 +103,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_datasize.cpp b/src/mongo/db/storage/record_store_test_datasize.cpp
index 4bcb67f8477..f2b7730403b 100644
--- a/src/mongo/db/storage/record_store_test_datasize.cpp
+++ b/src/mongo/db/storage/record_store_test_datasize.cpp
@@ -73,7 +73,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_deleterecord.cpp b/src/mongo/db/storage/record_store_test_deleterecord.cpp
index d66708a021c..afc4d78c92c 100644
--- a/src/mongo/db/storage/record_store_test_deleterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_deleterecord.cpp
@@ -51,12 +51,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -97,7 +97,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -106,7 +106,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 83eee87db40..cd0694132cc 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -45,13 +45,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc1;
+ RecordId loc1;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -66,7 +66,7 @@ namespace mongo {
ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
RecordData rd;
- ASSERT( !rs->findRecord( opCtx.get(), DiskLoc(111,17), &rd ) );
+ ASSERT( !rs->findRecord( opCtx.get(), RecordId(111,17), &rd ) );
ASSERT( rd.data() == NULL );
ASSERT( rs->findRecord( opCtx.get(), loc1, &rd ) );
@@ -77,7 +77,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
uow.commit();
}
@@ -108,7 +108,7 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
+ RecordId loc1;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -116,7 +116,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
DummyDocWriter dw;
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), &dw, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), &dw, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -137,13 +137,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -183,13 +183,13 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
@@ -227,12 +227,12 @@ namespace mongo {
string s1 = "eliot was here";
string s2 = "eliot was here again";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
s1.c_str(), s1.size() + 1,
false );
ASSERT_OK( res.getStatus() );
@@ -251,7 +251,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(), loc,
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(), loc,
s2.c_str(), s2.size() + 1,
false, NULL );
ASSERT_OK( res.getStatus() );
@@ -276,13 +276,13 @@ namespace mongo {
string s1 = "aaa111bbb";
string s2 = "aaa222bbb";
- DiskLoc loc;
+ RecordId loc;
const RecordData s1Rec(s1.c_str(), s1.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
s1Rec.data(),
s1Rec.size(),
-1 );
@@ -336,12 +336,12 @@ namespace mongo {
string s = "eliot was here";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
uow.commit();
@@ -410,7 +410,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get() ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
string s = str::stream() << "eliot" << x++;
ASSERT_EQUALS( s, data.data() );
@@ -422,10 +422,10 @@ namespace mongo {
int x = N;
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::BACKWARD ) );
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
RecordData data = it->dataFor( loc );
string s = str::stream() << "eliot" << --x;
ASSERT_EQUALS( s, data.data() );
diff --git a/src/mongo/db/storage/record_store_test_insertrecord.cpp b/src/mongo/db/storage/record_store_test_insertrecord.cpp
index 5415f8ad39e..df539a013d8 100644
--- a/src/mongo/db/storage/record_store_test_insertrecord.cpp
+++ b/src/mongo/db/storage/record_store_test_insertrecord.cpp
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -85,7 +85,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -94,7 +94,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -121,14 +121,14 @@ namespace mongo {
ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
}
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
StringDocWriter docWriter( "my record", false );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
&docWriter,
false );
ASSERT_OK( res.getStatus() );
@@ -155,7 +155,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -164,7 +164,7 @@ namespace mongo {
StringDocWriter docWriter( ss.str(), false );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
&docWriter,
false );
ASSERT_OK( res.getStatus() );
diff --git a/src/mongo/db/storage/record_store_test_manyiter.cpp b/src/mongo/db/storage/record_store_test_manyiter.cpp
index adbdf550f49..83ddcdc5c8f 100644
--- a/src/mongo/db/storage/record_store_test_manyiter.cpp
+++ b/src/mongo/db/storage/record_store_test_manyiter.cpp
@@ -61,10 +61,10 @@ namespace mongo {
RecordIterator *rIter = *vIter;
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
- ASSERT_EQUALS( DiskLoc(), rIter->getNext() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->getNext() );
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
delete rIter;
}
@@ -82,7 +82,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -91,7 +91,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -106,7 +106,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- set<DiskLoc> remain( locs, locs + nToInsert );
+ set<RecordId> remain( locs, locs + nToInsert );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
vector<RecordIterator*> v = rs->getManyIterators( opCtx.get() );
@@ -116,15 +116,15 @@ namespace mongo {
RecordIterator *rIter = *vIter;
while ( !rIter->isEOF() ) {
- DiskLoc loc = rIter->curr();
+ RecordId loc = rIter->curr();
ASSERT( 1 == remain.erase( loc ) );
ASSERT_EQUALS( loc, rIter->getNext() );
}
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
- ASSERT_EQUALS( DiskLoc(), rIter->getNext() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->getNext() );
ASSERT( rIter->isEOF() );
- ASSERT_EQUALS( DiskLoc(), rIter->curr() );
+ ASSERT_EQUALS( RecordId(), rIter->curr() );
delete rIter;
}
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 932644e2f65..0b0bcb0e7a4 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -45,7 +45,7 @@ namespace mongo {
// Insert multiple records and iterate through them in the forward direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns DiskLoc() and stays at EOF.
+ // the iterator returns RecordId() and stays at EOF.
TEST( RecordStoreTestHarness, IterateOverMultipleRecords ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -56,7 +56,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -65,7 +65,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -80,12 +80,12 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::FORWARD );
for ( int i = 0; i < nToInsert; i++ ) {
@@ -95,10 +95,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -106,7 +106,7 @@ namespace mongo {
// Insert multiple records and iterate through them in the reverse direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns DiskLoc() and stays at EOF.
+ // the iterator returns RecordId() and stays at EOF.
TEST( RecordStoreTestHarness, IterateOverMultipleRecordsReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
@@ -117,7 +117,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -126,7 +126,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -141,12 +141,12 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIterator( opCtx.get(),
- DiskLoc(),
+ RecordId(),
CollectionScanParams::BACKWARD );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
@@ -156,10 +156,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -177,7 +177,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -186,7 +186,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -201,7 +201,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -217,10 +217,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -238,7 +238,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -247,7 +247,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -262,7 +262,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- std::sort( locs, locs + nToInsert ); // inserted records may not be in DiskLoc order
+ std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -278,10 +278,10 @@ namespace mongo {
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -300,7 +300,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -309,7 +309,7 @@ namespace mongo {
string data = sb.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -333,7 +333,7 @@ namespace mongo {
// Iterate, checking EOF along the way.
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !it->isEOF() );
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
ASSERT( !nextLoc.isNull() );
}
ASSERT( it->isEOF() );
@@ -347,7 +347,7 @@ namespace mongo {
string data = sb.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index 260db54829a..81d40d9900d 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -60,10 +60,10 @@ namespace mongo {
return;
}
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
@@ -81,7 +81,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -90,7 +90,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -105,7 +105,7 @@ namespace mongo {
ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
}
- set<DiskLoc> remain( locs, locs + nToInsert );
+ set<RecordId> remain( locs, locs + nToInsert );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
RecordIterator *it = rs->getIteratorForRepair( opCtx.get() );
@@ -115,15 +115,15 @@ namespace mongo {
}
while ( !it->isEOF() ) {
- DiskLoc loc = it->getNext();
+ RecordId loc = it->getNext();
remain.erase( loc ); // can happen more than once per doc
}
ASSERT( remain.empty() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
- ASSERT_EQUALS( DiskLoc(), it->getNext() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->getNext() );
ASSERT( it->isEOF() );
- ASSERT_EQUALS( DiskLoc(), it->curr() );
+ ASSERT_EQUALS( RecordId(), it->curr() );
delete it;
}
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index f12fe08052f..edbaa28d045 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -57,7 +57,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index b692cae2d0e..14084ceadd4 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -78,7 +78,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -141,7 +141,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index 538ddab3c54..1beb17278ca 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -83,7 +83,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index 66bc1a55158..63ca8fd77e1 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -52,12 +52,12 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -77,7 +77,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
loc,
data.c_str(),
data.size() + 1,
@@ -110,7 +110,7 @@ namespace mongo {
}
const int nToInsert = 10;
- DiskLoc locs[nToInsert];
+ RecordId locs[nToInsert];
for ( int i = 0; i < nToInsert; i++ ) {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
@@ -119,7 +119,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
@@ -142,7 +142,7 @@ namespace mongo {
string data = ss.str();
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
locs[i],
data.c_str(),
data.size() + 1,
@@ -179,12 +179,12 @@ namespace mongo {
}
string oldData = "my record";
- DiskLoc loc;
+ RecordId loc;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
oldData.c_str(),
oldData.size() + 1,
false );
@@ -206,7 +206,7 @@ namespace mongo {
UpdateMoveNotifierSpy umn( opCtx.get(), loc, oldData.c_str(), oldData.size() );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
loc,
newData.c_str(),
newData.size() + 1,
@@ -214,7 +214,7 @@ namespace mongo {
&umn );
ASSERT_OK( res.getStatus() );
// UpdateMoveNotifier::recordStoreGoingToMove() called only if
- // the DiskLoc for the record changes
+ // the RecordId for the record changes
if ( loc == res.getValue() ) {
ASSERT_EQUALS( 0, umn.getNumCalls() );
} else {
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h
index 76e7653cbcd..479ac9f7748 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.h
+++ b/src/mongo/db/storage/record_store_test_updaterecord.h
@@ -42,7 +42,7 @@ namespace {
class UpdateMoveNotifierSpy : public UpdateMoveNotifier {
public:
- UpdateMoveNotifierSpy( OperationContext* txn, const DiskLoc &loc,
+ UpdateMoveNotifierSpy( OperationContext* txn, const RecordId &loc,
const char *buf, size_t size )
: _txn( txn ), _loc( loc ), _data( buf, size ), nCalls( 0 ) {
}
@@ -50,7 +50,7 @@ namespace {
~UpdateMoveNotifierSpy() { }
Status recordStoreGoingToMove( OperationContext *txn,
- const DiskLoc &oldLocation,
+ const RecordId &oldLocation,
const char *oldBuffer,
size_t oldSize ) {
nCalls++;
@@ -64,7 +64,7 @@ namespace {
private:
OperationContext *_txn;
- DiskLoc _loc;
+ RecordId _loc;
string _data;
int nCalls; // to verify that recordStoreGoingToMove() gets called once
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index 52a3d7fc705..63280494557 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -50,13 +50,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -113,13 +113,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -174,13 +174,13 @@ namespace mongo {
}
string data = "00010111";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
@@ -233,13 +233,13 @@ namespace mongo {
}
string data = "my record";
- DiskLoc loc;
+ RecordId loc;
const RecordData rec(data.c_str(), data.size() + 1);
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
rec.data(),
rec.size(),
false );
diff --git a/src/mongo/db/storage/record_store_test_validate.h b/src/mongo/db/storage/record_store_test_validate.h
index 79512193356..1cb66c43c94 100644
--- a/src/mongo/db/storage/record_store_test_validate.h
+++ b/src/mongo/db/storage/record_store_test_validate.h
@@ -97,7 +97,7 @@ namespace {
ASSERT( _remain.insert( data ).second );
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = _rs->insertRecord( opCtx.get(),
+ StatusWith<RecordId> res = _rs->insertRecord( opCtx.get(),
data.c_str(),
data.size() + 1,
false );
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
index fe4f368876b..75bbe1c82ff 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store.cpp
@@ -80,11 +80,11 @@ namespace mongo {
iter->SeekToLast();
if (iter->Valid()) {
rocksdb::Slice lastSlice = iter->key();
- DiskLoc lastLoc = _makeDiskLoc( lastSlice );
+ RecordId lastLoc = _makeDiskLoc( lastSlice );
_nextIdNum.store( lastLoc.getOfs() + ( uint64_t( lastLoc.a() ) << 32 ) + 1) ;
}
else {
- // Need to start at 1 so we are always higher than minDiskLoc
+ // Need to start at 1 so we are always higher than RecordId::min()
_nextIdNum.store( 1 );
}
@@ -117,16 +117,16 @@ namespace mongo {
BSONObjBuilder* extraInfo,
int infoLevel ) const {
uint64_t storageSize;
- rocksdb::Range wholeRange( _makeKey( minDiskLoc ), _makeKey( maxDiskLoc ) );
+ rocksdb::Range wholeRange( _makeKey( RecordId::min() ), _makeKey( RecordId::max() ) );
_db->GetApproximateSizes(_columnFamily.get(), &wholeRange, 1, &storageSize);
return static_cast<int64_t>( storageSize );
}
- RecordData RocksRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc) const {
+ RecordData RocksRecordStore::dataFor( OperationContext* txn, const RecordId& loc) const {
return _getDataFor(_db, _columnFamily.get(), txn, loc);
}
- void RocksRecordStore::deleteRecord( OperationContext* txn, const DiskLoc& dl ) {
+ void RocksRecordStore::deleteRecord( OperationContext* txn, const RecordId& dl ) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
std::string oldValue;
@@ -177,7 +177,7 @@ namespace mongo {
invariant(numRecords(txn) > 0);
rocksdb::Slice slice = iter->key();
- DiskLoc oldest = _makeDiskLoc( slice );
+ RecordId oldest = _makeDiskLoc( slice );
if ( _cappedDeleteCallback )
uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest));
@@ -187,18 +187,18 @@ namespace mongo {
}
}
- StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<DiskLoc>( ErrorCodes::BadValue,
+ return StatusWith<RecordId>( ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize" );
}
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit( txn );
- DiskLoc loc = _nextId();
+ RecordId loc = _nextId();
ru->writeBatch()->Put(_columnFamily.get(), _makeKey(loc), rocksdb::Slice(data, len));
@@ -207,10 +207,10 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
- StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> RocksRecordStore::insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
const int len = doc->documentSize();
@@ -220,8 +220,8 @@ namespace mongo {
return insertRecord( txn, buf.get(), len, enforceQuota );
}
- StatusWith<DiskLoc> RocksRecordStore::updateRecord( OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> RocksRecordStore::updateRecord( OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -232,7 +232,7 @@ namespace mongo {
auto status = ru->Get(_columnFamily.get(), _makeKey(loc), &old_value);
if ( !status.ok() ) {
- return StatusWith<DiskLoc>( ErrorCodes::InternalError, status.ToString() );
+ return StatusWith<RecordId>( ErrorCodes::InternalError, status.ToString() );
}
int old_length = old_value.size();
@@ -243,11 +243,11 @@ namespace mongo {
cappedDeleteAsNeeded(txn);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
Status RocksRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
@@ -284,7 +284,7 @@ namespace mongo {
}
RecordIterator* RocksRecordStore::getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir
) const {
return new Iterator(txn, _db, _columnFamily, dir, start);
@@ -308,7 +308,7 @@ namespace mongo {
//AFB add Clear(ColumnFamilyHandle*)
boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
deleteRecord( txn, loc );
}
@@ -412,8 +412,8 @@ namespace mongo {
virtual ~RocksCollectionComparator() { }
virtual int Compare( const rocksdb::Slice& a, const rocksdb::Slice& b ) const {
- DiskLoc lhs = reinterpret_cast<const DiskLoc*>( a.data() )[0];
- DiskLoc rhs = reinterpret_cast<const DiskLoc*>( b.data() )[0];
+ RecordId lhs = reinterpret_cast<const RecordId*>( a.data() )[0];
+ RecordId rhs = reinterpret_cast<const RecordId*>( b.data() )[0];
return lhs.compare( rhs );
}
@@ -441,14 +441,14 @@ namespace mongo {
}
void RocksRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive ) {
boost::scoped_ptr<RecordIterator> iter(
- getIterator( txn, maxDiskLoc, CollectionScanParams::BACKWARD ) );
+ getIterator( txn, RecordId::max(), CollectionScanParams::BACKWARD ) );
while( !iter->isEOF() ) {
WriteUnitOfWork wu( txn );
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
if ( loc < end || ( !inclusive && loc == end))
return;
@@ -476,25 +476,25 @@ namespace mongo {
return options;
}
- DiskLoc RocksRecordStore::_nextId() {
+ RecordId RocksRecordStore::_nextId() {
const uint64_t myId = _nextIdNum.fetchAndAdd(1);
int a = myId >> 32;
// This masks the lowest 4 bytes of myId
int ofs = myId & 0x00000000FFFFFFFF;
- DiskLoc loc( a, ofs );
+ RecordId loc( a, ofs );
return loc;
}
- rocksdb::Slice RocksRecordStore::_makeKey(const DiskLoc& loc) {
+ rocksdb::Slice RocksRecordStore::_makeKey(const RecordId& loc) {
return rocksdb::Slice(reinterpret_cast<const char*>(&loc), sizeof(loc));
}
- DiskLoc RocksRecordStore::_makeDiskLoc( const rocksdb::Slice& slice ) {
- return reinterpret_cast<const DiskLoc*>( slice.data() )[0];
+ RecordId RocksRecordStore::_makeDiskLoc( const rocksdb::Slice& slice ) {
+ return reinterpret_cast<const RecordId*>( slice.data() )[0];
}
bool RocksRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* out ) const {
+ const RecordId& loc, RecordData* out ) const {
RecordData rd = _getDataFor(_db, _columnFamily.get(), txn, loc);
if ( rd.data() == NULL )
return false;
@@ -503,7 +503,7 @@ namespace mongo {
}
RecordData RocksRecordStore::_getDataFor(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
- OperationContext* txn, const DiskLoc& loc) {
+ OperationContext* txn, const RecordId& loc) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
std::string value_storage;
@@ -549,7 +549,7 @@ namespace mongo {
RocksRecordStore::Iterator::Iterator(
OperationContext* txn, rocksdb::DB* db,
boost::shared_ptr<rocksdb::ColumnFamilyHandle> columnFamily,
- const CollectionScanParams::Direction& dir, const DiskLoc& start)
+ const CollectionScanParams::Direction& dir, const RecordId& start)
: _txn(txn),
_db(db),
_cf(columnFamily),
@@ -570,20 +570,20 @@ namespace mongo {
return _eof;
}
- DiskLoc RocksRecordStore::Iterator::curr() {
+ RecordId RocksRecordStore::Iterator::curr() {
if (_eof) {
- return DiskLoc();
+ return RecordId();
}
return _curr;
}
- DiskLoc RocksRecordStore::Iterator::getNext() {
+ RecordId RocksRecordStore::Iterator::getNext() {
if (_eof) {
- return DiskLoc();
+ return RecordId();
}
- DiskLoc toReturn = _curr;
+ RecordId toReturn = _curr;
if ( _forward() )
_iterator->Next();
@@ -599,7 +599,7 @@ namespace mongo {
return toReturn;
}
- void RocksRecordStore::Iterator::invalidate( const DiskLoc& dl ) {
+ void RocksRecordStore::Iterator::invalidate( const RecordId& dl ) {
_iterator.reset( NULL );
}
@@ -619,7 +619,7 @@ namespace mongo {
return true;
}
- RecordData RocksRecordStore::Iterator::dataFor(const DiskLoc& loc) const {
+ RecordData RocksRecordStore::Iterator::dataFor(const RecordId& loc) const {
if (!_eof && loc == _curr && _iterator->Valid() && _iterator->status().ok()) {
SharedBuffer data = SharedBuffer::allocate(_iterator->value().size());
memcpy(data.get(), _iterator->value().data(), _iterator->value().size());
@@ -628,7 +628,7 @@ namespace mongo {
return RocksRecordStore::_getDataFor(_db, _cf.get(), _txn, loc);
}
- void RocksRecordStore::Iterator::_locate(const DiskLoc& loc) {
+ void RocksRecordStore::Iterator::_locate(const RecordId& loc) {
if (_forward()) {
if (loc.isNull()) {
_iterator->SeekToFirst();
@@ -659,7 +659,7 @@ namespace mongo {
}
}
- DiskLoc RocksRecordStore::Iterator::_decodeCurr() const {
+ RecordId RocksRecordStore::Iterator::_decodeCurr() const {
invariant(_iterator && _iterator->Valid());
return _makeDiskLoc(_iterator->key());
}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.h b/src/mongo/db/storage/rocks/rocks_record_store.h
index 0dc97c8dab9..096543fe19d 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.h
+++ b/src/mongo/db/storage/rocks/rocks_record_store.h
@@ -77,38 +77,38 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
virtual bool findRecord( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
RecordData* out ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD ) const;
@@ -141,7 +141,7 @@ namespace mongo {
BSONObjBuilder* info = NULL );
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive);
void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
@@ -163,19 +163,19 @@ namespace mongo {
public:
Iterator(OperationContext* txn, rocksdb::DB* db,
boost::shared_ptr<rocksdb::ColumnFamilyHandle> columnFamily,
- const CollectionScanParams::Direction& dir, const DiskLoc& start);
+ const CollectionScanParams::Direction& dir, const RecordId& start);
virtual bool isEOF();
- virtual DiskLoc curr();
- virtual DiskLoc getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual RecordId curr();
+ virtual RecordId getNext();
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext* txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
- void _locate(const DiskLoc& loc);
- DiskLoc _decodeCurr() const;
+ void _locate(const RecordId& loc);
+ RecordId _decodeCurr() const;
bool _forward() const;
void _checkStatus();
@@ -184,7 +184,7 @@ namespace mongo {
boost::shared_ptr<rocksdb::ColumnFamilyHandle> _cf;
CollectionScanParams::Direction _dir;
bool _eof;
- DiskLoc _curr;
+ RecordId _curr;
boost::scoped_ptr<rocksdb::Iterator> _iterator;
};
@@ -194,18 +194,18 @@ namespace mongo {
*/
static rocksdb::ReadOptions _readOptions(OperationContext* opCtx = NULL);
- static DiskLoc _makeDiskLoc( const rocksdb::Slice& slice );
+ static RecordId _makeDiskLoc( const rocksdb::Slice& slice );
static RecordData _getDataFor(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
- OperationContext* txn, const DiskLoc& loc);
+ OperationContext* txn, const RecordId& loc);
- DiskLoc _nextId();
+ RecordId _nextId();
bool cappedAndNeedDelete(OperationContext* txn) const;
void cappedDeleteAsNeeded(OperationContext* txn);
- // The use of this function requires that the passed in DiskLoc outlives the returned Slice
+ // The use of this function requires that the passed in RecordId outlives the returned Slice
// TODO possibly make this safer in the future
- static rocksdb::Slice _makeKey( const DiskLoc& loc );
+ static rocksdb::Slice _makeKey( const RecordId& loc );
void _changeNumRecords(OperationContext* txn, bool insert);
void _increaseDataSize(OperationContext* txn, int amount);
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
index 69c78a09d3a..34ec1a9c676 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
@@ -55,7 +55,7 @@ namespace mongo {
rocksdb::Slice emptyByteSlice( "" );
rocksdb::SliceParts emptyByteSliceParts( &emptyByteSlice, 1 );
- // functions for converting between BSONObj-DiskLoc pairs and strings/rocksdb::Slices
+ // functions for converting between BSONObj-RecordId pairs and strings/rocksdb::Slices
/**
* Strips the field names from a BSON object
@@ -77,21 +77,21 @@ namespace mongo {
* strings, and false otherwise. Useful because field names are not necessary in an index
* key, because the ordering of the fields is already known.
*/
- string makeString( const BSONObj& key, const DiskLoc loc, bool removeFieldNames = true ) {
+ string makeString( const BSONObj& key, const RecordId loc, bool removeFieldNames = true ) {
const BSONObj& finalKey = removeFieldNames ? stripFieldNames( key ) : key;
string s( finalKey.objdata(), finalKey.objsize() );
- s.append( reinterpret_cast<const char*>( &loc ), sizeof( DiskLoc ) );
+ s.append( reinterpret_cast<const char*>( &loc ), sizeof( RecordId ) );
return s;
}
/**
* Constructs an IndexKeyEntry from a slice containing the bytes of a BSONObject followed
- * by the bytes of a DiskLoc
+ * by the bytes of a RecordId
*/
IndexKeyEntry makeIndexKeyEntry( const rocksdb::Slice& slice ) {
BSONObj key = BSONObj( slice.data() ).getOwned();
- DiskLoc loc = *reinterpret_cast<const DiskLoc*>( slice.data() + key.objsize() );
+ RecordId loc = *reinterpret_cast<const RecordId*>( slice.data() + key.objsize() );
return IndexKeyEntry( key, loc );
}
@@ -141,11 +141,11 @@ namespace mongo {
( valid && otherValid && _iterator->key() == realOther->_iterator->key() );
}
- void aboutToDeleteBucket(const DiskLoc& bucket) {
+ void aboutToDeleteBucket(const RecordId& bucket) {
invariant( !"aboutToDeleteBucket should never be called from RocksSortedDataImpl" );
}
- bool locate(const BSONObj& key, const DiskLoc& loc) {
+ bool locate(const BSONObj& key, const RecordId& loc) {
if (_forward) {
return _locate(stripFieldNames(key), loc);
} else {
@@ -169,9 +169,9 @@ namespace mongo {
getDirection() );
if (_forward) {
- _locate(key, minDiskLoc);
+ _locate(key, RecordId::min());
} else {
- _reverseLocate(key, maxDiskLoc);
+ _reverseLocate(key, RecordId::max());
}
}
@@ -194,7 +194,7 @@ namespace mongo {
return _cachedKey;
}
- DiskLoc getDiskLoc() const {
+ RecordId getRecordId() const {
_load();
return _cachedLoc;
}
@@ -216,7 +216,7 @@ namespace mongo {
_savedAtEnd = false;
_savePositionObj = getKey().getOwned();
- _savePositionLoc = getDiskLoc();
+ _savePositionLoc = getRecordId();
}
void restorePosition(OperationContext* txn) {
@@ -252,7 +252,7 @@ namespace mongo {
}
// _locate() for reverse iterators
- bool _reverseLocate( const BSONObj& key, const DiskLoc loc ) {
+ bool _reverseLocate( const BSONObj& key, const RecordId loc ) {
invariant( !_forward );
const IndexKeyEntry keyEntry( key, loc );
@@ -289,7 +289,7 @@ namespace mongo {
* helper so that its possible to choose whether or not to strip the fieldnames before
* performing the actual locate logic.
*/
- bool _locate( const BSONObj& key, const DiskLoc loc ) {
+ bool _locate( const BSONObj& key, const RecordId loc ) {
invariant(_forward);
_isCached = false;
@@ -325,7 +325,7 @@ namespace mongo {
_isCached = true;
rocksdb::Slice slice = _iterator->key();
_cachedKey = BSONObj( slice.data() ).getOwned();
- _cachedLoc = *reinterpret_cast<const DiskLoc*>( slice.data() +
+ _cachedLoc = *reinterpret_cast<const RecordId*>( slice.data() +
_cachedKey.objsize() );
}
@@ -336,12 +336,12 @@ namespace mongo {
mutable bool _isCached;
mutable BSONObj _cachedKey;
- mutable DiskLoc _cachedLoc;
+ mutable RecordId _cachedLoc;
// not for caching, but rather for savePosition() and restorePosition()
bool _savedAtEnd;
BSONObj _savePositionObj;
- DiskLoc _savePositionLoc;
+ RecordId _savePositionLoc;
// Used for comparing elements in reverse iterators. Because the rocksdb::Iterator is
// only a forward iterator, it is sometimes necessary to compare index keys manually
@@ -350,7 +350,7 @@ namespace mongo {
};
/**
- * Custom comparator for rocksdb used to compare Index Entries by BSONObj and DiskLoc
+ * Custom comparator for rocksdb used to compare Index Entries by BSONObj and RecordId
*/
class RocksIndexEntryComparator : public rocksdb::Comparator {
public:
@@ -408,7 +408,7 @@ namespace mongo {
invariant(index->isEmpty(txn));
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
+ Status addKey(const BSONObj& key, const RecordId& loc) {
// TODO maybe optimize based on a fact that index is empty?
return _index->insert(_txn, key, loc, _dupsAllowed);
}
@@ -456,7 +456,7 @@ namespace mongo {
Status RocksSortedDataImpl::insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
if (key.objsize() >= kTempKeyMaxSize) {
@@ -485,7 +485,7 @@ namespace mongo {
void RocksSortedDataImpl::unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn);
@@ -503,11 +503,11 @@ namespace mongo {
Status RocksSortedDataImpl::dupKeyCheck(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) {
+ const RecordId& loc) {
boost::scoped_ptr<SortedDataInterface::Cursor> cursor(newCursor(txn, 1));
- cursor->locate(key, DiskLoc(0, 0));
+ cursor->locate(key, RecordId(0, 0));
- if (cursor->isEOF() || cursor->getKey() != key || cursor->getDiskLoc() == loc) {
+ if (cursor->isEOF() || cursor->getKey() != key || cursor->getRecordId() == loc) {
return Status::OK();
} else {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key));
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
index accd8b25abc..404e57b4f12 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
@@ -50,16 +50,16 @@ namespace mongo {
class RocksSortedDataBuilderImpl : public SortedDataBuilderInterface {
public:
- virtual Status addKey(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
};
/**
* Rocks implementation of the SortedDataInterface. Each index is stored as a single column
- * family. Each mapping from a BSONObj to a DiskLoc is stored as the key of a key-value pair
+ * family. Each mapping from a BSONObj to a RecordId is stored as the key of a key-value pair
* in the column family. Consequently, each value in the database is simply an empty string.
* This is done because RocksDB only supports unique keys, and because RocksDB can take a custom
* comparator to use when ordering keys. We use a custom comparator which orders keys based
- * first upon the BSONObj in the key, and uses the DiskLoc as a tiebreaker.
+ * first upon the BSONObj in the key, and uses the RecordId as a tiebreaker.
*/
class RocksSortedDataImpl : public SortedDataInterface {
MONGO_DISALLOW_COPYING( RocksSortedDataImpl );
@@ -71,13 +71,13 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
- virtual void unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& loc,
+ virtual void unindex(OperationContext* txn, const BSONObj& key, const RecordId& loc,
bool dupsAllowed);
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc);
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
virtual void fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
BSONObjBuilder* output) const;
@@ -101,7 +101,7 @@ namespace mongo {
static rocksdb::Comparator* newRocksComparator( const Ordering& order );
private:
- typedef DiskLoc RecordId;
+ typedef RecordId RecordId;
rocksdb::DB* _db; // not owned
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index 1fecf2591f0..d4c9c7be9c6 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -82,7 +82,7 @@ namespace mongo {
bool dupsAllowed) = 0;
/**
- * Insert an entry into the index with the specified key and DiskLoc.
+ * Insert an entry into the index with the specified key and RecordId.
*
* @param txn the transaction under which the insert takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
@@ -91,15 +91,15 @@ namespace mongo {
* @return Status::OK() if the insert succeeded,
*
* ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
- * at a DiskLoc other than 'loc' and duplicates were not allowed
+ * at a RecordId other than 'loc' and duplicates were not allowed
*/
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) = 0;
/**
- * Remove the entry from the index with the specified key and DiskLoc.
+ * Remove the entry from the index with the specified key and RecordId.
*
* @param txn the transaction under which the remove takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
@@ -107,12 +107,12 @@ namespace mongo {
*/
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) = 0;
/**
* Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
- * index at a DiskLoc other than 'loc', and Status::OK() otherwise.
+ * index at a RecordId other than 'loc', and Status::OK() otherwise.
*
* @param txn the transaction under which this operation takes place
*
@@ -120,7 +120,7 @@ namespace mongo {
*/
virtual Status dupKeyCheck(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) = 0;
+ const RecordId& loc) = 0;
//
// Information about the tree
@@ -201,7 +201,7 @@ namespace mongo {
/**
* Return true if 'this' cursor and the 'other' cursor are positioned at
- * the same key and DiskLoc, or if both cursors are at EOF. Otherwise,
+ * the same key and RecordId, or if both cursors are at EOF. Otherwise,
* this function returns false.
*
* Implementations should prohibit the comparison of cursors associated
@@ -214,17 +214,17 @@ namespace mongo {
* be forwarded to all Cursors over that SortedData.
* TODO something better.
*/
- virtual void aboutToDeleteBucket(const DiskLoc& bucket) = 0;
+ virtual void aboutToDeleteBucket(const RecordId& bucket) = 0;
/**
* Position 'this' forward (reverse) cursor either at the entry or
- * immediately after (or immediately before) the specified key and DiskLoc.
+ * immediately after (or immediately before) the specified key and RecordId.
* The cursor should be positioned at EOF if no such entry exists.
*
- * @return true if the entry (key, DiskLoc) exists within the index,
+ * @return true if the entry (key, RecordId) exists within the index,
* and false otherwise
*/
- virtual bool locate(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual bool locate(const BSONObj& key, const RecordId& loc) = 0;
/**
* Position 'this' forward (reverse) cursor either at the next
@@ -298,9 +298,9 @@ namespace mongo {
virtual BSONObj getKey() const = 0;
/**
- * Return the DiskLoc associated with the current position of 'this' cursor.
+ * Return the RecordId associated with the current position of 'this' cursor.
*/
- virtual DiskLoc getDiskLoc() const = 0;
+ virtual RecordId getRecordId() const = 0;
/**
* Position 'this' forward (reverse) cursor at the next (preceding) entry
@@ -313,7 +313,7 @@ namespace mongo {
//
/**
- * Save the entry in the index (i.e. its key and DiskLoc) of where
+ * Save the entry in the index (i.e. its key and RecordId) of where
* 'this' cursor is currently positioned.
*
* Implementations can assume that no operations other than delete
@@ -326,7 +326,7 @@ namespace mongo {
* Restore 'this' cursor to the previously saved entry in the index.
*
* Implementations should have the same behavior as calling locate()
- * with the saved key and DiskLoc.
+ * with the saved key and RecordId.
*/
virtual void restorePosition(OperationContext* txn) = 0;
};
@@ -369,7 +369,7 @@ namespace mongo {
* 'key' must be > or >= the last key passed to this function (depends on _dupsAllowed). If
* this is violated an error Status (ErrorCodes::InternalError) will be returned.
*/
- virtual Status addKey(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
/**
* Do any necessary work to finish building the tree.
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
index 87583fdcdd8..c6adfd3de3f 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
@@ -73,7 +73,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
ASSERT( cursor->isEOF() );
// Cursor at EOF should remain at EOF when advanced
@@ -96,7 +96,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
ASSERT( cursor->isEOF() );
// Cursor at EOF should remain at EOF when advanced
@@ -122,7 +122,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -136,11 +136,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
}
ASSERT( cursor->isEOF() );
@@ -168,7 +168,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -182,11 +182,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
}
ASSERT( cursor->isEOF() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
index 3f70c28ce1a..da78e71e595 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
@@ -39,7 +39,7 @@ namespace mongo {
// using a forward cursor by specifying their exact key. When
// advanceTo() is called on a duplicate key, the cursor is
// positioned at the next occurrence of that key in ascending
- // order by DiskLoc.
+ // order by RecordId.
TEST( SortedDataInterface, AdvanceTo ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -73,7 +73,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -83,7 +83,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first occurrence of key in index
// when advanceTo() called on duplicate key
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -92,7 +92,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -101,7 +101,7 @@ namespace mongo {
cursor->advanceTo( key3, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
}
{
@@ -118,7 +118,7 @@ namespace mongo {
// using a reverse cursor by specifying their exact key. When
// advanceTo() is called on a duplicate key, the cursor is
// positioned at the next occurrence of that key in descending
- // order by DiskLoc.
+ // order by RecordId.
TEST( SortedDataInterface, AdvanceToReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -152,7 +152,7 @@ namespace mongo {
ASSERT( cursor->locate( key3, loc5 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -162,7 +162,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last occurrence of key in index
// when advanceTo() called on duplicate key
// ASSERT_EQUALS( key3, cursor->getKey() );
- // ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -171,7 +171,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -180,7 +180,7 @@ namespace mongo {
cursor->advanceTo( key1, 1, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
{
@@ -226,7 +226,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -236,7 +236,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first key in index
// when advanceTo() called with key smaller than any entry
// ASSERT_EQUALS( key2, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
}
@@ -246,7 +246,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -256,7 +256,7 @@ namespace mongo {
// SERVER-15489 forward cursor is positioned at first key in index
// when advanceTo() called with key smaller than any entry
// ASSERT_EQUALS( key2, cursor->getKey() );
- // ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
}
}
@@ -294,7 +294,7 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -304,7 +304,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last key in index
// when advanceTo() called with key larger than any entry
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
@@ -314,7 +314,7 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -324,7 +324,7 @@ namespace mongo {
// SERVER-15490 reverse cursor is positioned at last key in index
// when advanceTo() called with key larger than any entry
// ASSERT_EQUALS( key1, cursor->getKey() );
- // ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ // ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
@@ -363,7 +363,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -382,7 +382,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -428,7 +428,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -447,7 +447,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -496,7 +496,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -504,7 +504,7 @@ namespace mongo {
cursor->advanceTo( key1, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
}
{
@@ -513,7 +513,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
}
{
@@ -573,7 +573,7 @@ namespace mongo {
ASSERT( cursor->locate( key3, loc5 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -581,7 +581,7 @@ namespace mongo {
cursor->advanceTo( key3, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -590,7 +590,7 @@ namespace mongo {
cursor->advanceTo( key2, 1, true, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
{
@@ -649,7 +649,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -661,7 +661,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -674,7 +674,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
}
@@ -715,7 +715,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -727,7 +727,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -740,7 +740,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
@@ -783,7 +783,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -796,7 +796,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -810,7 +810,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
@@ -820,7 +820,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -833,7 +833,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
}
}
}
@@ -876,7 +876,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -889,7 +889,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
}
{
@@ -903,7 +903,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
@@ -913,7 +913,7 @@ namespace mongo {
ASSERT( cursor->locate( key5, loc3 ) );
ASSERT_EQUALS( key5, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
{
vector<const BSONElement*> keyEnd( 1 );
@@ -926,7 +926,7 @@ namespace mongo {
cursor->advanceTo( unusedKey, 0, false, keyEnd, keyEndInclusive );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
}
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
index b1ffe0ff3bf..5c21f00616e 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
@@ -36,7 +36,7 @@
namespace mongo {
// Insert a key and try to locate it using a forward cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, Locate ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -62,7 +62,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -70,7 +70,7 @@ namespace mongo {
}
// Insert a key and try to locate it using a reverse cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -96,7 +96,7 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -104,7 +104,7 @@ namespace mongo {
}
// Insert a compound key and try to locate it using a forward cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateCompoundKey ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -130,7 +130,7 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -138,7 +138,7 @@ namespace mongo {
}
// Insert a compound key and try to locate it using a reverse cursor
- // by specifying its exact key and DiskLoc.
+ // by specifying its exact key and RecordId.
TEST( SortedDataInterface, LocateCompoundKeyReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -164,7 +164,7 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -172,7 +172,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a forward cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultiple ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -199,11 +199,11 @@ namespace mongo {
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -224,26 +224,26 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
ASSERT( cursor->locate( key1, loc1 ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -251,7 +251,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a reverse cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -278,11 +278,11 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -303,26 +303,26 @@ namespace mongo {
ASSERT( cursor->locate( key2, loc2 ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
ASSERT( cursor->locate( key3, loc3 ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -330,7 +330,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleCompoundKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -358,15 +358,15 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -388,23 +388,23 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey1a, loc1 ) );
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -412,7 +412,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying their exact key and DiskLoc.
+ // by specifying their exact key and RecordId.
TEST( SortedDataInterface, LocateMultipleCompoundKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -440,15 +440,15 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey2b, loc3 ) );
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -470,23 +470,23 @@ namespace mongo {
ASSERT( cursor->locate( compoundKey3a, loc5 ) );
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -494,7 +494,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a forward cursor
- // by specifying either a smaller key or DiskLoc.
+ // by specifying either a smaller key or RecordId.
TEST( SortedDataInterface, LocateIndirect ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -519,9 +519,9 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( key1, maxDiskLoc ) );
+ ASSERT( !cursor->locate( key1, RecordId::max() ) );
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -540,17 +540,17 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( key1, minDiskLoc ) );
+ ASSERT( !cursor->locate( key1, RecordId::min() ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -558,7 +558,7 @@ namespace mongo {
}
// Insert multiple keys and try to locate them using a reverse cursor
- // by specifying either a larger key or DiskLoc.
+ // by specifying either a larger key or RecordId.
TEST( SortedDataInterface, LocateIndirectReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -583,9 +583,9 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( key2, minDiskLoc ) );
+ ASSERT( !cursor->locate( key2, RecordId::min() ) );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -604,17 +604,17 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( key3, maxDiskLoc ) );
+ ASSERT( !cursor->locate( key3, RecordId::max() ) );
ASSERT_EQUALS( key3, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key2, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -622,7 +622,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying either a smaller key or DiskLoc.
+ // by specifying either a smaller key or RecordId.
TEST( SortedDataInterface, LocateIndirectCompoundKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -648,13 +648,13 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( compoundKey1a, maxDiskLoc ) );
+ ASSERT( !cursor->locate( compoundKey1a, RecordId::max() ) );
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -676,11 +676,11 @@ namespace mongo {
ASSERT( !cursor->locate( compoundKey2a, loc1 ) );
ASSERT_EQUALS( compoundKey2b, cursor->getKey() );
- ASSERT_EQUALS( loc3, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc3, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey3a, cursor->getKey() );
- ASSERT_EQUALS( loc5, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc5, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -688,7 +688,7 @@ namespace mongo {
}
// Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying either a larger key or DiskLoc.
+ // by specifying either a larger key or RecordId.
TEST( SortedDataInterface, LocateIndirectCompoundKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
@@ -714,13 +714,13 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( compoundKey2b, minDiskLoc ) );
+ ASSERT( !cursor->locate( compoundKey2b, RecordId::min() ) );
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -742,15 +742,15 @@ namespace mongo {
ASSERT( !cursor->locate( compoundKey1d, loc1 ) );
ASSERT_EQUALS( compoundKey1c, cursor->getKey() );
- ASSERT_EQUALS( loc4, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc4, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1b, cursor->getKey() );
- ASSERT_EQUALS( loc2, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc2, cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( compoundKey1a, cursor->getKey() );
- ASSERT_EQUALS( loc1, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc1, cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -772,7 +772,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSONObj(), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId::min() ) );
ASSERT( cursor->isEOF() );
}
}
@@ -792,7 +792,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSONObj(), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId::max() ) );
ASSERT( cursor->isEOF() );
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
index 8f9b8906e92..ba368de3e34 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_position.cpp
@@ -51,8 +51,8 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor1( sorted->newCursor( opCtx.get(), 1 ) );
scoped_ptr<SortedDataInterface::Cursor> cursor2( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor1->locate( minKey, minDiskLoc ) );
- ASSERT( !cursor2->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor1->locate( minKey, RecordId::min() ) );
+ ASSERT( !cursor2->locate( minKey, RecordId::min() ) );
ASSERT( cursor1->isEOF() );
ASSERT( cursor2->isEOF() );
ASSERT( cursor1->pointsToSamePlaceAs( *cursor2 ) );
@@ -76,8 +76,8 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor1( sorted->newCursor( opCtx.get(), -1 ) );
scoped_ptr<SortedDataInterface::Cursor> cursor2( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor1->locate( maxKey, maxDiskLoc ) );
- ASSERT( !cursor2->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor1->locate( maxKey, RecordId::max() ) );
+ ASSERT( !cursor2->locate( maxKey, RecordId::max() ) );
ASSERT( cursor1->isEOF() );
ASSERT( cursor2->isEOF() );
ASSERT( cursor1->pointsToSamePlaceAs( *cursor2 ) );
@@ -258,7 +258,7 @@ namespace mongo {
}
// Verify that two forward cursors positioned at a duplicate key, but with
- // different DiskLocs are not considered to point to the same place.
+ // different RecordIds are not considered to point to the same place.
TEST( SortedDataInterface, CursorsPointToDifferentDiskLocs ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -296,7 +296,7 @@ namespace mongo {
}
// Verify that two reverse cursors positioned at a duplicate key, but with
- // different DiskLocs are not considered to point to the same place.
+ // different RecordIds are not considered to point to the same place.
TEST( SortedDataInterface, CursorsPointToDifferentDiskLocsReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -410,7 +410,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -424,7 +424,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT( cursor->pointsToSamePlaceAs( *cursor ) );
@@ -450,7 +450,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -464,7 +464,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT( cursor->pointsToSamePlaceAs( *cursor ) );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index 36ee79d930b..2031e1cf278 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -53,7 +53,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -67,11 +67,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -98,7 +98,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
@@ -112,11 +112,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -127,7 +127,7 @@ namespace mongo {
// Insert the same key multiple times and try to iterate through each
// occurrence using a forward cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the DiskLoc is saved
+ // restorePosition() in succession. Verify that the RecordId is saved
// as part of the current position of the cursor.
TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -143,7 +143,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
uow.commit();
}
@@ -157,11 +157,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( minKey, minDiskLoc ) );
+ ASSERT( !cursor->locate( minKey, RecordId::min() ) );
for ( int i = 0; i < nToInsert; i++ ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
@@ -172,7 +172,7 @@ namespace mongo {
// Insert the same key multiple times and try to iterate through each
// occurrence using a reverse cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the DiskLoc is saved
+ // restorePosition() in succession. Verify that the RecordId is saved
// as part of the current position of the cursor.
TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysReversed ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -188,7 +188,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
uow.commit();
}
@@ -202,11 +202,11 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( maxKey, maxDiskLoc ) );
+ ASSERT( !cursor->locate( maxKey, RecordId::max() ) );
for ( int i = nToInsert - 1; i >= 0; i-- ) {
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( key1, cursor->getKey() );
- ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId( 42, i * 2 ), cursor->getRecordId() );
cursor->advance();
cursor->savePosition();
cursor->restorePosition( opCtx.get() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
index b160ae9b56e..a8644536ea2 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
@@ -36,7 +36,7 @@
namespace mongo {
// Insert a key and verify that dupKeyCheck() returns a non-OK status for
- // the same key. When dupKeyCheck() is called with the exact (key, DiskLoc)
+ // the same key. When dupKeyCheck() is called with the exact (key, RecordId)
// pair that was inserted, it should still return an OK status.
TEST( SortedDataInterface, DupKeyCheckAfterInsert ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
@@ -66,7 +66,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
ASSERT_OK( sorted->dupKeyCheck( opCtx.get(), key1, loc1 ) );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, minDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
uow.commit();
}
}
@@ -94,7 +94,7 @@ namespace mongo {
}
// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a DiskLoc that comes after the one specified.
+ // when the insert key is located at a RecordId that comes after the one specified.
TEST( SortedDataInterface, DupKeyCheckWhenDiskLocBefore ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -122,14 +122,14 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, minDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
uow.commit();
}
}
}
// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a DiskLoc that comes before the one specified.
+ // when the insert key is located at a RecordId that comes before the one specified.
TEST( SortedDataInterface, DupKeyCheckWhenDiskLocAfter ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
@@ -157,7 +157,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, maxDiskLoc ) );
+ ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::max() ) );
uow.commit();
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
index a1cd7669a31..703d281b520 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
@@ -52,7 +52,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index a0bd37aa780..be03fbcc917 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -43,7 +43,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), true );
uow.commit();
}
}
@@ -52,7 +52,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 6, 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 6, 2 ), true );
uow.commit();
}
}
@@ -75,7 +75,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -84,7 +84,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
uow.commit();
}
}
@@ -103,7 +103,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -112,7 +112,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
// no commit
}
}
@@ -131,7 +131,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), false );
uow.commit();
}
}
@@ -140,7 +140,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 20 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 20 ), false );
uow.commit();
}
}
@@ -160,7 +160,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 2 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), false );
uow.commit();
}
}
@@ -169,7 +169,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 4 ), false );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 4 ), false );
uow.commit();
}
}
@@ -189,7 +189,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -203,7 +203,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 20 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
uow.commit();
}
@@ -218,7 +218,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 2 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 18 ), true );
ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
uow.commit();
}
@@ -234,7 +234,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
ASSERT( sorted->isEmpty( opCtx.get() ) );
uow.commit();
}
@@ -255,7 +255,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
uow.commit();
}
}
@@ -269,7 +269,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), DiskLoc( 5, 18 ), true );
+ sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
ASSERT( sorted->isEmpty( opCtx.get() ) );
// no commit
}
@@ -292,7 +292,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true ) );
uow.commit();
}
}
@@ -300,10 +300,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
@@ -324,7 +324,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc( 5, i * 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true );
uow.commit();
}
}
@@ -332,10 +332,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
ASSERT_EQUALS( BSON( "" << n ), cursor->getKey() );
n++;
@@ -358,7 +358,7 @@ namespace mongo {
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 5 ), DiskLoc( 5, i * 2 ), true );
+ sorted->insert( opCtx.get(), BSON( "" << 5 ), RecordId( 5, i * 2 ), true );
uow.commit();
}
}
@@ -366,10 +366,10 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- cursor->locate( BSONObj(), minDiskLoc );
+ cursor->locate( BSONObj(), RecordId::min() );
int n = 0;
while ( !cursor->isEOF() ) {
- DiskLoc loc = cursor->getDiskLoc();
+ RecordId loc = cursor->getRecordId();
ASSERT_EQUALS( n * 2, loc.getOfs() );
n++;
cursor->advance();
@@ -387,7 +387,7 @@ namespace mongo {
scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
BSONObj key = BSON( "" << 1 );
- DiskLoc loc( 5, 16 );
+ RecordId loc( 5, 16 );
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
@@ -410,7 +410,7 @@ namespace mongo {
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
ASSERT( cursor->locate( key, loc ) );
ASSERT_EQUALS( key, cursor->getKey() );
- ASSERT_EQUALS( loc, cursor->getDiskLoc() );
+ ASSERT_EQUALS( loc, cursor->getRecordId() );
}
}
@@ -423,9 +423,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
uow.commit();
}
}
@@ -433,14 +433,14 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 2 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "a" << 2 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 2 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -456,9 +456,9 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), DiskLoc(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
uow.commit();
}
}
@@ -466,16 +466,16 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 1 ), cursor->getKey() );
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSONObj(), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSONObj(), RecordId(0,0) ) );
ASSERT( cursor->isEOF() );
}
@@ -492,47 +492,47 @@ namespace mongo {
if ( i == 6 )
continue;
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), DiskLoc(1,i*2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), RecordId(1,i*2), true ) );
uow.commit();
}
}
scoped_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 7 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), DiskLoc(0,0) ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId(0,0) ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId::max() ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "" << 5 ), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "" << 5 ), RecordId::min() ) );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 3 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- cursor->locate( BSON( "" << 6 ), maxDiskLoc );
+ cursor->locate( BSON( "" << 6 ), RecordId::max() );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 5 ), cursor->getKey() );
cursor->advance();
ASSERT_EQUALS( BSON( "" << 4 ), cursor->getKey() );
cursor.reset( sorted->newCursor( opCtx.get(), -1 ) );
- cursor->locate( BSON( "" << 500 ), maxDiskLoc );
+ cursor->locate( BSON( "" << 500 ), RecordId::max() );
ASSERT( !cursor->isEOF() );
ASSERT_EQUALS( BSON( "" << 9 ), cursor->getKey() );
cursor->advance();
@@ -549,10 +549,10 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), DiskLoc(1,6), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), DiskLoc(1,8), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,4), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,6), true ) );
+ ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,8), true ) );
uow.commit();
}
}
@@ -560,18 +560,18 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 1 ), minDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), RecordId::min() ) );
ASSERT( !cursor->isEOF() );
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,8), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,8), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
@@ -580,16 +580,16 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
- ASSERT( !cursor->locate( BSON( "a" << 1 ), maxDiskLoc ) );
+ ASSERT( !cursor->locate( BSON( "a" << 1 ), RecordId::max() ) );
ASSERT( !cursor->isEOF() );
ASSERT( cursor->getDirection() == -1 );
- ASSERT_EQUALS( DiskLoc(1,6), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,6), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,4), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,4), cursor->getRecordId() );
cursor->advance();
- ASSERT_EQUALS( DiskLoc(1,2), cursor->getDiskLoc() );
+ ASSERT_EQUALS( RecordId(1,2), cursor->getRecordId() );
cursor->advance();
ASSERT( cursor->isEOF() );
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index f059cc114ec..3b830e1a313 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -55,14 +55,14 @@ namespace mongo {
const BSONObj compoundKey3b = BSON( "" << 3 << "" << "b" );
const BSONObj compoundKey3c = BSON( "" << 3 << "" << "c" );
- const DiskLoc loc1( 10, 42 );
- const DiskLoc loc2( 10, 44 );
- const DiskLoc loc3( 10, 46 );
- const DiskLoc loc4( 10, 48 );
- const DiskLoc loc5( 10, 50 );
- const DiskLoc loc6( 10, 52 );
- const DiskLoc loc7( 10, 54 );
- const DiskLoc loc8( 10, 56 );
+ const RecordId loc1( 10, 42 );
+ const RecordId loc2( 10, 44 );
+ const RecordId loc3( 10, 46 );
+ const RecordId loc4( 10, 48 );
+ const RecordId loc5( 10, 50 );
+ const RecordId loc6( 10, 52 );
+ const RecordId loc7( 10, 54 );
+ const RecordId loc8( 10, 56 );
class RecoveryUnit;
class SortedDataInterface;
diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
index 03e2f98c485..2ae1675ca74 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
@@ -85,7 +85,7 @@ namespace mongo {
}
}
- // Insert multiple, distinct keys at the same DiskLoc and verify that the
+ // Insert multiple, distinct keys at the same RecordId and verify that the
// number of entries in the index equals the number that were inserted, even
// when duplicates are not allowed.
TEST( SortedDataInterface, InsertSameDiskLoc ) {
@@ -127,7 +127,7 @@ namespace mongo {
}
}
- // Insert multiple, distinct keys at the same DiskLoc and verify that the
+ // Insert multiple, distinct keys at the same RecordId and verify that the
// number of entries in the index equals the number that were inserted, even
// when duplicates are allowed.
TEST( SortedDataInterface, InsertSameDiskLocWithDupsAllowed ) {
diff --git a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
index 30c4ee75569..f11bbe3b49b 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
@@ -69,7 +69,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
BSONObj key = BSON( "" << i );
- DiskLoc loc( 42, i * 2 );
+ RecordId loc( 42, i * 2 );
ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
uow.commit();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index c240ef94fe5..108a691a822 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -73,38 +73,38 @@ namespace {
/**
* Constructs an IndexKeyEntry from a slice containing the bytes of a BSONObject followed
- * by the bytes of a DiskLoc
+ * by the bytes of a RecordId
*/
static IndexKeyEntry makeIndexKeyEntry(const WT_ITEM *keyCols) {
const char* data = reinterpret_cast<const char*>( keyCols->data );
BSONObj key( data );
if ( keyCols->size == static_cast<size_t>( key.objsize() ) ) {
// in unique mode
- return IndexKeyEntry( key, DiskLoc() );
+ return IndexKeyEntry( key, RecordId() );
}
- invariant( keyCols->size == key.objsize() + sizeof(DiskLoc) );
- DiskLoc loc = reinterpret_cast<const DiskLoc*>( data + key.objsize() )[0];
+ invariant( keyCols->size == key.objsize() + sizeof(RecordId) );
+ RecordId loc = reinterpret_cast<const RecordId*>( data + key.objsize() )[0];
return IndexKeyEntry( key, loc );
}
- WiredTigerItem _toItem( const BSONObj& key, const DiskLoc& loc,
+ WiredTigerItem _toItem( const BSONObj& key, const RecordId& loc,
boost::scoped_array<char>*out ) {
- size_t keyLen = key.objsize() + sizeof(DiskLoc);
+ size_t keyLen = key.objsize() + sizeof(RecordId);
out->reset( new char[keyLen] );
memcpy( out->get(), key.objdata(), key.objsize() );
- memcpy( out->get() + key.objsize(), reinterpret_cast<const char*>(&loc), sizeof(DiskLoc) );
+ memcpy( out->get() + key.objsize(), reinterpret_cast<const char*>(&loc), sizeof(RecordId) );
return WiredTigerItem( out->get(), keyLen );
}
- DiskLoc _toDiskLoc( const WT_ITEM& item ) {
- DiskLoc l;
- memcpy( &l, item.data, sizeof(DiskLoc) );
+ RecordId _toDiskLoc( const WT_ITEM& item ) {
+ RecordId l;
+ memcpy( &l, item.data, sizeof(RecordId) );
return l;
}
/**
- * Custom comparator used to compare Index Entries by BSONObj and DiskLoc
+ * Custom comparator used to compare Index Entries by BSONObj and RecordId
*/
struct WiredTigerIndexCollator : public WT_COLLATOR {
public:
@@ -223,7 +223,7 @@ namespace {
Status WiredTigerIndex::insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -241,7 +241,7 @@ namespace {
void WiredTigerIndex::unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant(!loc.isNull());
invariant(loc.isValid());
@@ -257,7 +257,7 @@ namespace {
void WiredTigerIndex::fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
BSONObjBuilder* output) const {
IndexCursor cursor(*this, txn, true );
- cursor.locate( minKey, minDiskLoc );
+ cursor.locate( minKey, RecordId::min() );
long long count = 0;
while ( !cursor.isEOF() ) {
cursor.advance();
@@ -286,7 +286,7 @@ namespace {
Status WiredTigerIndex::dupKeyCheck( OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc) {
+ const RecordId& loc) {
invariant(!hasFieldNames(key));
invariant(unique());
@@ -316,7 +316,7 @@ namespace {
_uri ) );
}
- bool WiredTigerIndex::isDup(WT_CURSOR *c, const BSONObj& key, const DiskLoc& loc ) {
+ bool WiredTigerIndex::isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc ) {
invariant( unique() );
// First check whether the key exists.
WiredTigerItem item( key.objdata(), key.objsize() );
@@ -328,7 +328,7 @@ namespace {
WT_ITEM value;
invariantWTOK( c->get_value(c,&value) );
- DiskLoc found = _toDiskLoc( value );
+ RecordId found = _toDiskLoc( value );
return found != loc;
}
@@ -399,7 +399,7 @@ namespace {
: BulkBuilder(idx, txn) {
}
- Status addKey(const BSONObj& key, const DiskLoc& loc) {
+ Status addKey(const BSONObj& key, const RecordId& loc) {
{
const Status s = checkKeySize(key);
if (!s.isOK())
@@ -407,10 +407,10 @@ namespace {
}
// Build a buffer with the key and loc concatenated.
- const size_t keyLen = key.objsize() + sizeof(DiskLoc);
+ const size_t keyLen = key.objsize() + sizeof(RecordId);
invariant(keyLen <= kBufferSize);
memcpy(_buffer, key.objdata(), key.objsize());
- memcpy(_buffer + key.objsize(), &loc, sizeof(DiskLoc));
+ memcpy(_buffer + key.objsize(), &loc, sizeof(RecordId));
WiredTigerItem item(_buffer, keyLen);
// Can't use WiredTigerCursor since we aren't using the cache.
@@ -431,7 +431,7 @@ namespace {
private:
// Will need to support dynamic sizing if we remove TempKeyMaxSize.
- static const size_t kBufferSize = TempKeyMaxSize + sizeof(DiskLoc);
+ static const size_t kBufferSize = TempKeyMaxSize + sizeof(RecordId);
char _buffer[kBufferSize];
};
@@ -449,7 +449,7 @@ namespace {
: BulkBuilder(idx, txn), _dupsAllowed(dupsAllowed) {
}
- Status addKey(const BSONObj& newKey, const DiskLoc& loc) {
+ Status addKey(const BSONObj& newKey, const RecordId& loc) {
{
const Status s = checkKeySize(newKey);
if (!s.isOK())
@@ -500,7 +500,7 @@ namespace {
_cursor->set_key(_cursor, keyItem.Get());
invariant(_locs.size() > 0);
- WiredTigerItem valueItem(&_locs.front(), _locs.size() * sizeof(DiskLoc));
+ WiredTigerItem valueItem(&_locs.front(), _locs.size() * sizeof(RecordId));
_cursor->set_value(_cursor, valueItem.Get());
invariantWTOK(_cursor->insert(_cursor));
@@ -511,7 +511,7 @@ namespace {
const bool _dupsAllowed;
BSONObj _key;
- std::vector<DiskLoc> _locs;
+ std::vector<RecordId> _locs;
};
SortedDataBuilderInterface* WiredTigerIndex::getBulkBuilder( OperationContext* txn,
@@ -553,26 +553,26 @@ namespace {
else if ( _eof || other._eof )
return false;
- if ( getDiskLoc() != other.getDiskLoc() )
+ if ( getRecordId() != other.getRecordId() )
return false;
return getKey() == other.getKey();
}
- void WiredTigerIndex::IndexCursor::aboutToDeleteBucket(const DiskLoc& bucket) {
+ void WiredTigerIndex::IndexCursor::aboutToDeleteBucket(const RecordId& bucket) {
invariant(!"aboutToDeleteBucket should not be called");
}
- bool WiredTigerIndex::IndexCursor::_locate(const BSONObj &key, const DiskLoc& loc) {
+ bool WiredTigerIndex::IndexCursor::_locate(const BSONObj &key, const RecordId& loc) {
_uniqueLen = -1;
WT_CURSOR *c = _cursor.get();
- DiskLoc searchLoc = loc;
+ RecordId searchLoc = loc;
// Null cursors should start at the zero key to maintain search ordering in the
// collator.
// Reverse cursors should start on the last matching key.
if (loc.isNull())
- searchLoc = _forward ? DiskLoc(0, 0) : DiskLoc(INT_MAX, INT_MAX);
+ searchLoc = _forward ? RecordId(0, 0) : RecordId(INT_MAX, INT_MAX);
boost::scoped_array<char> data;
WiredTigerItem myKey = _toItem( key, searchLoc, &data );
@@ -613,20 +613,20 @@ namespace {
return true;
}
- // we're looking for a specific DiskLoc, lets see if we can find
+ // we're looking for a specific RecordId, lets see if we can find
WT_ITEM item;
invariantWTOK( c->get_value(c, &item ) );
- _uniqueLen = item.size / sizeof(DiskLoc);
+ _uniqueLen = item.size / sizeof(RecordId);
invariant( _uniqueLen > 0 );
if ( _forward ) {
_uniquePos = 0;
for ( ; _uniquePos < _uniqueLen; _uniquePos++ ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( temp == loc )
break;
@@ -637,10 +637,10 @@ namespace {
else {
_uniquePos = _uniqueLen-1;
for ( ; _uniquePos >= 0; _uniquePos-- ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( _uniquePos * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( temp == loc )
break;
@@ -658,12 +658,12 @@ namespace {
return true;
}
- bool WiredTigerIndex::IndexCursor::locate(const BSONObj &key, const DiskLoc& loc) {
+ bool WiredTigerIndex::IndexCursor::locate(const BSONObj &key, const RecordId& loc) {
const BSONObj finalKey = stripFieldNames(key);
bool result = _locate(finalKey, loc);
// An explicit search at the start of the range should always return false
- if (loc == minDiskLoc || loc == maxDiskLoc )
+ if (loc == RecordId::min() || loc == RecordId::max() )
return false;
return result;
}
@@ -678,7 +678,7 @@ namespace {
keyBegin, keyBeginLen,
afterKey, keyEnd, keyEndInclusive, getDirection() );
- _locate(key, DiskLoc());
+ _locate(key, RecordId());
}
void WiredTigerIndex::IndexCursor::customLocate(const BSONObj& keyBegin,
@@ -697,9 +697,9 @@ namespace {
return makeIndexKeyEntry(&keyItem).key;
}
- DiskLoc WiredTigerIndex::IndexCursor::getDiskLoc() const {
+ RecordId WiredTigerIndex::IndexCursor::getRecordId() const {
if ( _eof )
- return DiskLoc();
+ return RecordId();
WT_CURSOR *c = _cursor.get();
WT_ITEM item;
@@ -707,12 +707,12 @@ namespace {
invariantWTOK( c->get_value(c, &item ) );
if ( _uniqueLen == -1 ) {
// first time at this spot
- _uniqueLen = item.size / sizeof(DiskLoc);
+ _uniqueLen = item.size / sizeof(RecordId);
invariant( _uniqueLen > 0 );
_uniquePos = 0;
}
- DiskLoc loc;
+ RecordId loc;
int posToUse = _uniquePos;
if ( !_forward )
posToUse = _uniqueLen - 1 - _uniquePos;
@@ -720,8 +720,8 @@ namespace {
memcpy( &loc,
- reinterpret_cast<const char*>(item.data) + ( posToUse * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>(item.data) + ( posToUse * sizeof(RecordId) ),
+ sizeof(RecordId) );
invariant( posToUse >= 0 && posToUse < _uniqueLen );
@@ -739,7 +739,7 @@ namespace {
if ( _idx.unique() ) {
if ( _uniqueLen == -1 ) {
// we need to investigate
- getDiskLoc();
+ getRecordId();
}
_uniquePos++; // advance
@@ -768,7 +768,7 @@ namespace {
if ( !wt_keeptxnopen() && !_eof ) {
_savedKey = getKey().getOwned();
- _savedLoc = getDiskLoc();
+ _savedLoc = getRecordId();
_cursor.reset();
}
@@ -794,7 +794,7 @@ namespace {
Status WiredTigerIndexUnique::_insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
WiredTigerItem keyItem( key.objdata(), key.objsize() );
@@ -821,14 +821,14 @@ namespace {
WT_ITEM old;
invariantWTOK( c->get_value(c, &old ) );
- std::set<DiskLoc> all;
+ std::set<RecordId> all;
// see if its already in the array
- for ( size_t i = 0; i < (old.size/sizeof(DiskLoc)); i++ ) {
- DiskLoc temp;
+ for ( size_t i = 0; i < (old.size/sizeof(RecordId)); i++ ) {
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>( old.data ) + ( i * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>( old.data ) + ( i * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( loc == temp )
return Status::OK();
all.insert( temp );
@@ -841,14 +841,14 @@ namespace {
all.insert( loc );
// not in the array, add it to the back
- size_t newSize = all.size() * sizeof(DiskLoc);
+ size_t newSize = all.size() * sizeof(RecordId);
boost::scoped_array<char> bigger( new char[newSize] );
size_t offset = 0;
- for ( std::set<DiskLoc>::const_iterator it = all.begin(); it != all.end(); ++it ) {
- DiskLoc dl = *it;
- memcpy( bigger.get() + offset, &dl, sizeof(DiskLoc) );
- offset += sizeof(DiskLoc);
+ for ( std::set<RecordId>::const_iterator it = all.begin(); it != all.end(); ++it ) {
+ RecordId dl = *it;
+ memcpy( bigger.get() + offset, &dl, sizeof(RecordId) );
+ offset += sizeof(RecordId);
}
valueItem = WiredTigerItem( bigger.get(), newSize );
@@ -858,7 +858,7 @@ namespace {
void WiredTigerIndexUnique::_unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
WiredTigerItem keyItem( key.objdata(), key.objsize() );
c->set_key( c, keyItem.Get() );
@@ -873,7 +873,7 @@ namespace {
return;
}
- // ups are allowed, so we have to deal with a vector of DiskLoc
+ // ups are allowed, so we have to deal with a vector of RecordId
int ret = c->search(c);
if ( ret == WT_NOTFOUND )
@@ -884,17 +884,17 @@ namespace {
invariantWTOK( c->get_value(c, &old ) );
// see if its in the array
- size_t num = old.size / sizeof(DiskLoc);
+ size_t num = old.size / sizeof(RecordId);
for ( size_t i = 0; i < num; i++ ) {
- DiskLoc temp;
+ RecordId temp;
memcpy( &temp,
- reinterpret_cast<const char*>( old.data ) + ( i * sizeof(DiskLoc) ),
- sizeof(DiskLoc) );
+ reinterpret_cast<const char*>( old.data ) + ( i * sizeof(RecordId) ),
+ sizeof(RecordId) );
if ( loc != temp )
continue;
// we found it, now lets re-save array without it
- size_t newSize = old.size - sizeof(DiskLoc);
+ size_t newSize = old.size - sizeof(RecordId);
if ( newSize == 0 ) {
// nothing left, just delete entry
@@ -903,11 +903,11 @@ namespace {
}
boost::scoped_array<char> smaller( new char[newSize] );
- size_t offset = i * sizeof(DiskLoc);
+ size_t offset = i * sizeof(RecordId);
memcpy( smaller.get(), old.data, offset );
memcpy( smaller.get() + offset,
- reinterpret_cast<const char*>( old.data ) + offset + sizeof(DiskLoc),
- old.size - sizeof(DiskLoc) - offset );
+ reinterpret_cast<const char*>( old.data ) + offset + sizeof(RecordId),
+ old.size - sizeof(RecordId) - offset );
WiredTigerItem valueItem = WiredTigerItem( smaller.get(), newSize );
c->set_value( c, valueItem.Get() );
invariantWTOK( c->update( c ) );
@@ -923,7 +923,7 @@ namespace {
Status WiredTigerIndexStandard::_insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant( dupsAllowed );
@@ -943,7 +943,7 @@ namespace {
void WiredTigerIndexStandard::_unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) {
invariant( dupsAllowed );
boost::scoped_array<char> data;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 53c280aafcb..d97749334df 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -59,24 +59,24 @@ namespace mongo {
virtual Status insert(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
virtual void unindex(OperationContext* txn,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed);
virtual void fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
BSONObjBuilder* output) const;
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc);
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
virtual bool isEmpty(OperationContext* txn);
virtual long long getSpaceUsedBytes( OperationContext* txn ) const;
- bool isDup(WT_CURSOR *c, const BSONObj& key, const DiskLoc& loc );
+ bool isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc );
virtual SortedDataInterface::Cursor* newCursor(
OperationContext* txn, int direction) const;
@@ -93,12 +93,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) = 0;
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed ) = 0;
class BulkBuilder;
@@ -119,9 +119,9 @@ namespace mongo {
virtual bool pointsToSamePlaceAs(const SortedDataInterface::Cursor &genother) const;
- virtual void aboutToDeleteBucket(const DiskLoc& bucket);
+ virtual void aboutToDeleteBucket(const RecordId& bucket);
- virtual bool locate(const BSONObj &key, const DiskLoc& loc);
+ virtual bool locate(const BSONObj &key, const RecordId& loc);
virtual void customLocate(const BSONObj& keyBegin,
int keyBeginLen,
@@ -137,7 +137,7 @@ namespace mongo {
virtual BSONObj getKey() const;
- virtual DiskLoc getDiskLoc() const;
+ virtual RecordId getRecordId() const;
virtual void advance();
@@ -146,7 +146,7 @@ namespace mongo {
virtual void restorePosition( OperationContext *txn );
private:
- bool _locate(const BSONObj &key, const DiskLoc& loc);
+ bool _locate(const BSONObj &key, const RecordId& loc);
OperationContext *_txn;
WiredTigerCursor _cursor;
@@ -160,7 +160,7 @@ namespace mongo {
// For save/restorePosition check
RecoveryUnit* _savedForCheck;
BSONObj _savedKey;
- DiskLoc _savedLoc;
+ RecordId _savedLoc;
};
const Ordering _ordering;
@@ -177,12 +177,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
};
@@ -194,12 +194,12 @@ namespace mongo {
virtual Status _insert( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
virtual void _unindex( WT_CURSOR* c,
const BSONObj& key,
- const DiskLoc& loc,
+ const RecordId& loc,
bool dupsAllowed );
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 257265d41ec..7d39bf51dd4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -93,7 +93,7 @@ namespace {
class CappedInsertChange : public RecoveryUnit::Change {
public:
- CappedInsertChange( WiredTigerRecordStore* rs, const DiskLoc& loc )
+ CappedInsertChange( WiredTigerRecordStore* rs, const RecordId& loc )
: _rs( rs ), _loc( loc ) {
}
@@ -107,7 +107,7 @@ namespace {
private:
WiredTigerRecordStore* _rs;
- DiskLoc _loc;
+ RecordId _loc;
};
} // namespace
@@ -193,19 +193,19 @@ namespace {
invariant(_cappedMaxDocs == -1);
}
- // Find the largest DiskLoc currently in use and estimate the number of records.
- scoped_ptr<RecordIterator> iterator( getIterator( ctx, DiskLoc(),
+ // Find the largest RecordId currently in use and estimate the number of records.
+ scoped_ptr<RecordIterator> iterator( getIterator( ctx, RecordId(),
CollectionScanParams::BACKWARD ) );
if ( iterator->isEOF() ) {
_dataSize.store(0);
_numRecords.store(0);
- // Need to start at 1 so we are always higher than minDiskLoc
+ // Need to start at 1 so we are always higher than RecordId::min()
_nextIdNum.store( 1 );
if ( sizeStorer )
_sizeStorer->onCreate( this, 0, 0 );
}
else {
- DiskLoc maxLoc = iterator->curr();
+ RecordId maxLoc = iterator->curr();
uint64_t max = _makeKey( maxLoc );
_oplog_highestSeen = maxLoc;
_nextIdNum.store( 1 + max );
@@ -226,7 +226,7 @@ namespace {
_dataSize.store(0);
while( !iterator->isEOF() ) {
- DiskLoc loc = iterator->getNext();
+ RecordId loc = iterator->getNext();
RecordData data = iterator->dataFor( loc );
_numRecords.fetchAndAdd(1);
_dataSize.fetchAndAdd(data.size());
@@ -314,7 +314,7 @@ namespace {
return RecordData(data.moveFrom(), value.size);
}
- RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const DiskLoc& loc) const {
+ RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
// ownership passes to the shared_array created below
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
@@ -322,14 +322,14 @@ namespace {
c->set_key(c, _makeKey(loc));
int ret = c->search(c);
massert( 28556,
- "Didn't find DiskLoc in WiredTigerRecordStore",
+ "Didn't find RecordId in WiredTigerRecordStore",
ret != WT_NOTFOUND );
invariantWTOK(ret);
return _getData(curwrap);
}
bool WiredTigerRecordStore::findRecord( OperationContext* txn,
- const DiskLoc& loc, RecordData* out ) const {
+ const RecordId& loc, RecordData* out ) const {
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
invariant( c );
@@ -342,7 +342,7 @@ namespace {
return true;
}
- void WiredTigerRecordStore::deleteRecord( OperationContext* txn, const DiskLoc& loc ) {
+ void WiredTigerRecordStore::deleteRecord( OperationContext* txn, const RecordId& loc ) {
WiredTigerCursor cursor( _uri, _instanceId, txn );
WT_CURSOR *c = cursor.get();
c->set_key(c, _makeKey(loc));
@@ -380,7 +380,7 @@ namespace {
}
void WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
- const DiskLoc& justInserted ) {
+ const RecordId& justInserted ) {
if ( _isOplog ) {
if ( oplogCounter++ % 100 > 0 )
@@ -408,7 +408,7 @@ namespace {
WiredTigerCursor curwrap( _uri, _instanceId, txn);
WT_CURSOR *c = curwrap.get();
int ret = c->next(c);
- DiskLoc oldest;
+ RecordId oldest;
while ( ret == 0 && cappedAndNeedDelete() ) {
WriteUnitOfWork wuow( txn );
@@ -459,23 +459,23 @@ namespace {
}
}
- StatusWith<DiskLoc> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data,
+ StatusWith<RecordId> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data,
int len) {
return oploghack::extractKey(data, len);
}
- StatusWith<DiskLoc> WiredTigerRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota ) {
if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<DiskLoc>( ErrorCodes::BadValue,
+ return StatusWith<RecordId>( ErrorCodes::BadValue,
"object to insert exceeds cappedMaxSize" );
}
- DiskLoc loc;
+ RecordId loc;
if ( _useOplogHack ) {
- StatusWith<DiskLoc> status = extractAndCheckLocForOplog(data, len);
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
if (!status.isOK())
return status;
loc = status.getValue();
@@ -504,7 +504,7 @@ namespace {
c->set_value(c, value.Get());
int ret = c->insert(c);
if ( ret ) {
- return StatusWith<DiskLoc>( wtRCToStatus( ret,
+ return StatusWith<RecordId>( wtRCToStatus( ret,
"WiredTigerRecordStore::insertRecord" ) );
}
@@ -513,10 +513,10 @@ namespace {
cappedDeleteAsNeeded(txn, loc);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
- void WiredTigerRecordStore::dealtWithCappedLoc( const DiskLoc& loc ) {
+ void WiredTigerRecordStore::dealtWithCappedLoc( const RecordId& loc ) {
boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
SortedDiskLocs::iterator it = std::find(_uncommittedDiskLocs.begin(),
_uncommittedDiskLocs.end(),
@@ -525,7 +525,7 @@ namespace {
_uncommittedDiskLocs.erase(it);
}
- bool WiredTigerRecordStore::isCappedHidden( const DiskLoc& loc ) const {
+ bool WiredTigerRecordStore::isCappedHidden( const RecordId& loc ) const {
boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
if (_uncommittedDiskLocs.empty()) {
return false;
@@ -533,7 +533,7 @@ namespace {
return _uncommittedDiskLocs.front() <= loc;
}
- StatusWith<DiskLoc> WiredTigerRecordStore::insertRecord( OperationContext* txn,
+ StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
const int len = doc->documentSize();
@@ -544,8 +544,8 @@ namespace {
return insertRecord( txn, buf.get(), len, enforceQuota );
}
- StatusWith<DiskLoc> WiredTigerRecordStore::updateRecord( OperationContext* txn,
- const DiskLoc& loc,
+ StatusWith<RecordId> WiredTigerRecordStore::updateRecord( OperationContext* txn,
+ const RecordId& loc,
const char* data,
int len,
bool enforceQuota,
@@ -573,11 +573,11 @@ namespace {
cappedDeleteAsNeeded(txn, loc);
- return StatusWith<DiskLoc>( loc );
+ return StatusWith<RecordId>( loc );
}
Status WiredTigerRecordStore::updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damangeSource,
const mutablebson::DamageVector& damages ) {
@@ -620,7 +620,7 @@ namespace {
}
RecordIterator* WiredTigerRecordStore::getIterator( OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir ) const {
if ( _isOplog && dir == CollectionScanParams::FORWARD ) {
WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
@@ -639,7 +639,7 @@ namespace {
// XXX do we want this to actually return a set of iterators?
std::vector<RecordIterator*> iterators;
- iterators.push_back( new Iterator(*this, txn, DiskLoc(),
+ iterators.push_back( new Iterator(*this, txn, RecordId(),
CollectionScanParams::FORWARD, true) );
return iterators;
@@ -649,7 +649,7 @@ namespace {
// TODO: use a WiredTiger fast truncate
boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
deleteRecord( txn, loc );
}
@@ -684,7 +684,7 @@ namespace {
++nrecords;
if ( full && scanData ) {
size_t dataSize;
- DiskLoc loc = iter->curr();
+ RecordId loc = iter->curr();
RecordData data = dataFor( txn, loc );
Status status = adaptor->validate( data, &dataSize );
if ( !status.isOK() ) {
@@ -760,7 +760,7 @@ namespace {
Status WiredTigerRecordStore::oplogDiskLocRegister( OperationContext* txn,
const OpTime& opTime ) {
- StatusWith<DiskLoc> loc = oploghack::keyForOptime( opTime );
+ StatusWith<RecordId> loc = oploghack::keyForOptime( opTime );
if ( !loc.isOK() )
return loc.getStatus();
@@ -770,7 +770,7 @@ namespace {
}
void WiredTigerRecordStore::_addUncommitedDiskLoc_inlock( OperationContext* txn,
- const DiskLoc& loc ) {
+ const RecordId& loc ) {
// todo: make this a dassert at some point
invariant( _uncommittedDiskLocs.empty() ||
_uncommittedDiskLocs.back() < loc );
@@ -779,10 +779,10 @@ namespace {
_oplog_highestSeen = loc;
}
- DiskLoc WiredTigerRecordStore::oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const {
+ RecordId WiredTigerRecordStore::oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
if (!_useOplogHack)
- return DiskLoc().setInvalid();
+ return RecordId().setInvalid();
{
WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
@@ -796,7 +796,7 @@ namespace {
c->set_key(c, _makeKey(startingPosition));
int ret = c->search_near(c, &cmp);
if (ret == 0 && cmp > 0) ret = c->prev(c); // landed one higher than startingPosition
- if (ret == WT_NOTFOUND) return DiskLoc(); // nothing <= startingPosition
+ if (ret == WT_NOTFOUND) return RecordId(); // nothing <= startingPosition
invariantWTOK(ret);
uint64_t key;
@@ -805,13 +805,13 @@ namespace {
return _fromKey(key);
}
- DiskLoc WiredTigerRecordStore::_nextId() {
+ RecordId WiredTigerRecordStore::_nextId() {
invariant(!_useOplogHack);
const uint64_t myId = _nextIdNum.fetchAndAdd(1);
int a = myId >> 32;
// This masks the lowest 4 bytes of myId
int ofs = myId & 0x00000000FFFFFFFF;
- DiskLoc loc( a, ofs );
+ RecordId loc( a, ofs );
return loc;
}
@@ -873,13 +873,13 @@ namespace {
}
}
- uint64_t WiredTigerRecordStore::_makeKey( const DiskLoc& loc ) {
+ uint64_t WiredTigerRecordStore::_makeKey( const RecordId& loc ) {
return ((uint64_t)loc.a() << 32 | loc.getOfs());
}
- DiskLoc WiredTigerRecordStore::_fromKey( uint64_t key ) {
+ RecordId WiredTigerRecordStore::_fromKey( uint64_t key ) {
uint32_t a = key >> 32;
uint32_t ofs = (uint32_t)key;
- return DiskLoc(a, ofs);
+ return RecordId(a, ofs);
}
// --------
@@ -887,7 +887,7 @@ namespace {
WiredTigerRecordStore::Iterator::Iterator(
const WiredTigerRecordStore& rs,
OperationContext *txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir,
bool forParallelCollectionScan)
: _rs( rs ),
@@ -904,7 +904,7 @@ namespace {
WiredTigerRecordStore::Iterator::~Iterator() {
}
- void WiredTigerRecordStore::Iterator::_locate(const DiskLoc &loc, bool exact) {
+ void WiredTigerRecordStore::Iterator::_locate(const RecordId &loc, bool exact) {
RS_ITERATOR_TRACE("_locate " << loc);
WT_CURSOR *c = _cursor->get();
invariant( c );
@@ -931,7 +931,7 @@ namespace {
ret = c->search_near(c, &cmp);
if ( ret == WT_NOTFOUND ) {
_eof = true;
- _loc = DiskLoc();
+ _loc = RecordId();
return;
}
invariantWTOK(ret);
@@ -958,10 +958,10 @@ namespace {
}
// Allow const functions to use curr to find current location.
- DiskLoc WiredTigerRecordStore::Iterator::_curr() const {
+ RecordId WiredTigerRecordStore::Iterator::_curr() const {
RS_ITERATOR_TRACE( "_curr" );
if (_eof)
- return DiskLoc();
+ return RecordId();
WT_CURSOR *c = _cursor->get();
dassert( c );
@@ -971,7 +971,7 @@ namespace {
return _fromKey(key);
}
- DiskLoc WiredTigerRecordStore::Iterator::curr() {
+ RecordId WiredTigerRecordStore::Iterator::curr() {
return _loc;
}
@@ -990,7 +990,7 @@ namespace {
_loc = _curr();
RS_ITERATOR_TRACE("_getNext " << ret << " " << _eof << " " << _loc );
if ( _rs._isCapped ) {
- DiskLoc loc = _curr();
+ RecordId loc = _curr();
if ( _readUntilForOplog.isNull() ) {
// this is the normal capped case
if ( _rs.isCappedHidden( loc ) ) {
@@ -1011,13 +1011,13 @@ namespace {
}
if (_eof) {
- _loc = DiskLoc();
+ _loc = RecordId();
}
}
- DiskLoc WiredTigerRecordStore::Iterator::getNext() {
+ RecordId WiredTigerRecordStore::Iterator::getNext() {
RS_ITERATOR_TRACE( "getNext" );
- const DiskLoc toReturn = _loc;
+ const RecordId toReturn = _loc;
RS_ITERATOR_TRACE( "getNext toReturn: " << toReturn );
_getNext();
RS_ITERATOR_TRACE( " ----" );
@@ -1025,7 +1025,7 @@ namespace {
return toReturn;
}
- void WiredTigerRecordStore::Iterator::invalidate( const DiskLoc& dl ) {
+ void WiredTigerRecordStore::Iterator::invalidate( const RecordId& dl ) {
// this should never be called
}
@@ -1069,11 +1069,11 @@ namespace {
invariant( _savedRecoveryUnit == txn->recoveryUnit() );
if ( needRestore || !wt_keeptxnopen() ) {
- DiskLoc saved = _lastLoc;
+ RecordId saved = _lastLoc;
_locate(_lastLoc, false);
RS_ITERATOR_TRACE( "isEOF check " << _eof );
if ( _eof ) {
- _lastLoc = DiskLoc();
+ _lastLoc = RecordId();
}
else if ( _loc != saved ) {
// old doc deleted, we're ok
@@ -1089,7 +1089,7 @@ namespace {
return true;
}
- RecordData WiredTigerRecordStore::Iterator::dataFor( const DiskLoc& loc ) const {
+ RecordData WiredTigerRecordStore::Iterator::dataFor( const RecordId& loc ) const {
// Retrieve the data if the iterator is already positioned at loc, otherwise
// open a new cursor and find the data to avoid upsetting the iterators
// cursor position.
@@ -1103,12 +1103,12 @@ namespace {
}
void WiredTigerRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive ) {
WriteUnitOfWork wuow(txn);
boost::scoped_ptr<RecordIterator> iter( getIterator( txn, end ) );
while( !iter->isEOF() ) {
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
if ( end < loc || ( inclusive && end == loc ) ) {
deleteRecord( txn, loc );
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 3f07d83f055..49e91b81129 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -94,36 +94,36 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
- virtual bool findRecord( OperationContext* txn, const DiskLoc& loc, RecordData* out ) const;
+ virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* out ) const;
- virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
+ virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const char* data,
int len,
bool enforceQuota );
- virtual StatusWith<DiskLoc> insertRecord( OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota );
- virtual StatusWith<DiskLoc> updateRecord( OperationContext* txn,
- const DiskLoc& oldLocation,
+ virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
+ const RecordId& oldLocation,
const char* data,
int len,
bool enforceQuota,
UpdateMoveNotifier* notifier );
virtual Status updateWithDamages( OperationContext* txn,
- const DiskLoc& loc,
+ const RecordId& loc,
const RecordData& oldRec,
const char* damangeSource,
const mutablebson::DamageVector& damages );
virtual RecordIterator* getIterator( OperationContext* txn,
- const DiskLoc& start = DiskLoc(),
+ const RecordId& start = RecordId(),
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD ) const;
@@ -154,11 +154,11 @@ namespace mongo {
BSONObjBuilder* info = NULL );
virtual void temp_cappedTruncateAfter(OperationContext* txn,
- DiskLoc end,
+ RecordId end,
bool inclusive);
- virtual DiskLoc oplogStartHack(OperationContext* txn,
- const DiskLoc& startingPosition) const;
+ virtual RecordId oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
virtual Status oplogDiskLocRegister( OperationContext* txn,
const OpTime& opTime );
@@ -177,8 +177,8 @@ namespace mongo {
void setSizeStorer( WiredTigerSizeStorer* ss ) { _sizeStorer = ss; }
- void dealtWithCappedLoc( const DiskLoc& loc );
- bool isCappedHidden( const DiskLoc& loc ) const;
+ void dealtWithCappedLoc( const RecordId& loc );
+ bool isCappedHidden( const RecordId& loc ) const;
private:
@@ -186,24 +186,24 @@ namespace mongo {
public:
Iterator( const WiredTigerRecordStore& rs,
OperationContext* txn,
- const DiskLoc& start,
+ const RecordId& start,
const CollectionScanParams::Direction& dir,
bool forParallelCollectionScan );
virtual ~Iterator();
virtual bool isEOF();
- virtual DiskLoc curr();
- virtual DiskLoc getNext();
- virtual void invalidate(const DiskLoc& dl);
+ virtual RecordId curr();
+ virtual RecordId getNext();
+ virtual void invalidate(const RecordId& dl);
virtual void saveState();
virtual bool restoreState(OperationContext *txn);
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( const RecordId& loc ) const;
private:
void _getNext();
- void _locate( const DiskLoc &loc, bool exact );
- DiskLoc _curr() const; // const version of public curr method
+ void _locate( const RecordId &loc, bool exact );
+ RecordId _curr() const; // const version of public curr method
const WiredTigerRecordStore& _rs;
OperationContext* _txn;
@@ -212,10 +212,10 @@ namespace mongo {
bool _forParallelCollectionScan;
scoped_ptr<WiredTigerCursor> _cursor;
bool _eof;
- const DiskLoc _readUntilForOplog;
+ const RecordId _readUntilForOplog;
- DiskLoc _loc; // Cached key of _cursor. Update any time _cursor is moved.
- DiskLoc _lastLoc; // the last thing returned from getNext()
+ RecordId _loc; // Cached key of _cursor. Update any time _cursor is moved.
+ RecordId _lastLoc; // the last thing returned from getNext()
};
class NumRecordsChange;
@@ -223,19 +223,19 @@ namespace mongo {
static WiredTigerRecoveryUnit* _getRecoveryUnit( OperationContext* txn );
- static uint64_t _makeKey(const DiskLoc &loc);
- static DiskLoc _fromKey(uint64_t k);
+ static uint64_t _makeKey(const RecordId &loc);
+ static RecordId _fromKey(uint64_t k);
- void _addUncommitedDiskLoc_inlock( OperationContext* txn, const DiskLoc& loc );
+ void _addUncommitedDiskLoc_inlock( OperationContext* txn, const RecordId& loc );
- DiskLoc _nextId();
- void _setId(DiskLoc loc);
+ RecordId _nextId();
+ void _setId(RecordId loc);
bool cappedAndNeedDelete() const;
- void cappedDeleteAsNeeded(OperationContext* txn, const DiskLoc& justInserted );
+ void cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted );
void _changeNumRecords(OperationContext* txn, bool insert);
void _increaseDataSize(OperationContext* txn, int amount);
RecordData _getData( const WiredTigerCursor& cursor) const;
- StatusWith<DiskLoc> extractAndCheckLocForOplog(const char* data, int len);
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len);
void _oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const;
const std::string _uri;
@@ -251,10 +251,10 @@ namespace mongo {
const bool _useOplogHack;
- typedef std::vector<DiskLoc> SortedDiskLocs;
+ typedef std::vector<RecordId> SortedDiskLocs;
SortedDiskLocs _uncommittedDiskLocs;
- DiskLoc _oplog_visibleTo;
- DiskLoc _oplog_highestSeen;
+ RecordId _oplog_visibleTo;
+ RecordId _oplog_highestSeen;
mutable boost::mutex _uncommittedDiskLocsMutex;
AtomicUInt64 _nextIdNum;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 570eeace31f..1aecd888cee 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -151,15 +151,15 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
- DiskLoc loc2;
+ RecordId loc1;
+ RecordId loc2;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -202,15 +202,15 @@ namespace mongo {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- DiskLoc loc1;
- DiskLoc loc2;
+ RecordId loc1;
+ RecordId loc2;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -268,7 +268,7 @@ namespace mongo {
{
WriteUnitOfWork uow( opCtx.get() );
for ( int i = 0; i < N; i++ ) {
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
}
uow.commit();
@@ -333,7 +333,7 @@ namespace mongo {
rs.reset( NULL ); // this has to be deleted before ss
}
- StatusWith<DiskLoc> insertBSON(scoped_ptr<OperationContext>& opCtx,
+ StatusWith<RecordId> insertBSON(scoped_ptr<OperationContext>& opCtx,
scoped_ptr<RecordStore>& rs,
const OpTime& opTime) {
BSONObj obj = BSON( "ts" << opTime );
@@ -342,8 +342,8 @@ namespace mongo {
invariant( wrs );
Status status = wrs->oplogDiskLocRegister( opCtx.get(), opTime );
if (!status.isOK())
- return StatusWith<DiskLoc>( status );
- StatusWith<DiskLoc> res = rs->insertRecord(opCtx.get(),
+ return StatusWith<RecordId>( status );
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(),
obj.objdata(),
obj.objsize(),
false);
@@ -381,52 +381,52 @@ namespace mongo {
// success cases
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,1)).getValue(),
- DiskLoc(1,1));
+ RecordId(1,1));
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(1,2)).getValue(),
- DiskLoc(1,2));
+ RecordId(1,2));
ASSERT_EQ(insertBSON(opCtx, rs, OpTime(2,2)).getValue(),
- DiskLoc(2,2));
+ RecordId(2,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
// find start
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(0,1)), DiskLoc()); // nothing <=
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,1)), DiskLoc(1,2)); // between
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,2)), DiskLoc(2,2)); // ==
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(2,2)); // > highest
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId()); // nothing <=
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,1)), RecordId(1,2)); // between
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,2)), RecordId(2,2)); // ==
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2)); // > highest
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(2,2), false); // no-op
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(2,2), false); // no-op
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(2,2));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(1,2), false); // deletes 2,2
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), false); // deletes 2,2
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(1,2));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,2));
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), DiskLoc(1,2), true); // deletes 1,2
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), true); // deletes 1,2
}
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc(1,1));
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,1));
}
{
@@ -438,7 +438,7 @@ namespace mongo {
{
scoped_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(2,3)), DiskLoc());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId());
}
}
@@ -455,20 +455,20 @@ namespace mongo {
obj.objsize(), false ).getStatus());
wuow.commit();
}
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), DiskLoc(0,1)), DiskLoc().setInvalid());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId().setInvalid());
}
TEST(WiredTigerRecordStoreTest, CappedOrder) {
scoped_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
scoped_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000,10000));
- DiskLoc loc1;
+ RecordId loc1;
{ // first insert a document
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
- StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), "a", 2, false );
+ StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
uow.commit();
@@ -524,7 +524,7 @@ namespace mongo {
}
}
- DiskLoc _oplogOrderInsertOplog( OperationContext* txn,
+ RecordId _oplogOrderInsertOplog( OperationContext* txn,
scoped_ptr<RecordStore>& rs,
int inc ) {
OpTime opTime = OpTime(5,inc);
@@ -532,7 +532,7 @@ namespace mongo {
Status status = wrs->oplogDiskLocRegister( txn, opTime );
ASSERT_OK( status );
BSONObj obj = BSON( "ts" << opTime );
- StatusWith<DiskLoc> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
+ StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
ASSERT_OK( res.getStatus() );
return res.getValue();
}
@@ -549,7 +549,7 @@ namespace mongo {
ASSERT( wrs->usingOplogHack() );
}
- DiskLoc loc1;
+ RecordId loc1;
{ // first insert a document
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 8da8ae5772e..9e2cceaf75d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -188,7 +188,7 @@ namespace mongo {
}
}
- void WiredTigerRecoveryUnit::setOplogReadTill( const DiskLoc& loc ) {
+ void WiredTigerRecoveryUnit::setOplogReadTill( const RecordId& loc ) {
_oplogReadTill = loc;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index a1eb26215c3..b15efcbe75e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -85,8 +85,8 @@ namespace mongo {
bool everStartedWrite() const { return _everStartedWrite; }
int depth() const { return _depth; }
- void setOplogReadTill( const DiskLoc& loc );
- DiskLoc getOplogReadTill() const { return _oplogReadTill; }
+ void setOplogReadTill( const RecordId& loc );
+ RecordId getOplogReadTill() const { return _oplogReadTill; }
static WiredTigerRecoveryUnit* get(OperationContext *txn);
@@ -107,7 +107,7 @@ namespace mongo {
Timer _timer;
bool _currentlySquirreled;
bool _syncing;
- DiskLoc _oplogReadTill;
+ RecordId _oplogReadTill;
typedef OwnedPointerVector<Change> Changes;
Changes _changes;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index d84990e80fe..081d37108a1 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -134,7 +134,7 @@ namespace mongo {
long long maxSizeBytes = 1024 * 1024 * 1024;
- set<DiskLoc> locs;
+ set<RecordId> locs;
long long numDocsFound;
long long estSizeBytes;
{
@@ -162,7 +162,7 @@ namespace mongo {
const Collection* collection = db->getCollection(&txn, ns);
// Make sure all the disklocs actually correspond to the right info
- for ( set<DiskLoc>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
+ for ( set<RecordId>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
const BSONObj obj = collection->docFor(&txn, *it);
ASSERT_EQUALS(obj["tag"].OID(), tag);
}
@@ -182,7 +182,7 @@ namespace mongo {
long long maxSizeBytes = 1024 * 1024 * 1024;
- set<DiskLoc> locs;
+ set<RecordId> locs;
long long numDocsFound;
long long estSizeBytes;
{
@@ -227,7 +227,7 @@ namespace mongo {
// Very small max size
long long maxSizeBytes = 10;
- set<DiskLoc> locs;
+ set<RecordId> locs;
long long numDocsFound;
long long estSizeBytes;
{
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 1b43af2a093..504d04c528a 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -77,7 +77,7 @@ namespace IndexUpdateTests {
"name" << "a_1" );
int32_t lenWHdr = indexInfo.objsize() + Record::HeaderSize;
const char* systemIndexes = "unittests.system.indexes";
- DiskLoc infoLoc = allocateSpaceForANewRecord( systemIndexes,
+ RecordId infoLoc = allocateSpaceForANewRecord( systemIndexes,
nsdetails( systemIndexes ),
lenWHdr,
false );
@@ -216,12 +216,12 @@ namespace IndexUpdateTests {
// Add index keys to the phaseOne.
int32_t nKeys = 130;
for( int32_t i = 0; i < nKeys; ++i ) {
- phaseOne.sorter->add( BSON( "a" << i ), /* dummy disk loc */ DiskLoc(), false );
+ phaseOne.sorter->add( BSON( "a" << i ), /* dummy disk loc */ RecordId(), false );
}
phaseOne.nkeys = phaseOne.n = nKeys;
phaseOne.sorter->sort( false );
// Set up remaining arguments.
- set<DiskLoc> dups;
+ set<RecordId> dups;
CurOp* op = txn.getCurOp();
ProgressMeterHolder pm (op->setMessage("BuildBottomUp",
"BuildBottomUp Progress",
@@ -282,12 +282,12 @@ namespace IndexUpdateTests {
int32_t nKeys = 130;
// Add index keys to the phaseOne.
for( int32_t i = 0; i < nKeys; ++i ) {
- phaseOne.sorter->add( BSON( "a" << i ), /* dummy disk loc */ DiskLoc(), false );
+ phaseOne.sorter->add( BSON( "a" << i ), /* dummy disk loc */ RecordId(), false );
}
phaseOne.nkeys = phaseOne.n = nKeys;
phaseOne.sorter->sort( false );
// Set up remaining arguments.
- set<DiskLoc> dups;
+ set<RecordId> dups;
CurOp* op = txn.getCurOp();
ProgressMeterHolder pm (op->setMessage("InterruptBuildBottomUp",
"InterruptBuildBottomUp Progress",
@@ -417,17 +417,17 @@ namespace IndexUpdateTests {
// Create a new collection.
Database* db = _ctx.ctx().db();
Collection* coll;
- DiskLoc loc1;
- DiskLoc loc2;
+ RecordId loc1;
+ RecordId loc2;
{
WriteUnitOfWork wunit(&_txn);
db->dropCollection( &_txn, _ns );
coll = db->createCollection( &_txn, _ns );
- StatusWith<DiskLoc> swLoc1 = coll->insertDocument(&_txn,
+ StatusWith<RecordId> swLoc1 = coll->insertDocument(&_txn,
BSON("_id" << 1 << "a" << "dup"),
true);
- StatusWith<DiskLoc> swLoc2 = coll->insertDocument(&_txn,
+ StatusWith<RecordId> swLoc2 = coll->insertDocument(&_txn,
BSON("_id" << 2 << "a" << "dup"),
true);
ASSERT_OK(swLoc1.getStatus());
@@ -450,7 +450,7 @@ namespace IndexUpdateTests {
ASSERT_OK(indexer.init(spec));
- std::set<DiskLoc> dups;
+ std::set<RecordId> dups;
ASSERT_OK(indexer.insertAllDocumentsInCollection(&dups));
// either loc1 or loc2 should be in dups but not both.
@@ -668,7 +668,7 @@ namespace IndexUpdateTests {
"name" << name );
int32_t lenWHdr = indexInfo.objsize() + Record::HeaderSize;
const char* systemIndexes = "unittests.system.indexes";
- DiskLoc infoLoc = allocateSpaceForANewRecord( systemIndexes,
+ RecordId infoLoc = allocateSpaceForANewRecord( systemIndexes,
nsdetails( systemIndexes ),
lenWHdr,
false );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 762b14d8e69..ca4171e22a0 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -156,15 +156,15 @@ namespace NamespaceTests {
int nRecords() const {
int count = 0;
const Extent* ext;
- for ( DiskLoc extLoc = nsd()->firstExtent();
+ for ( RecordId extLoc = nsd()->firstExtent();
!extLoc.isNull();
extLoc = ext->xnext) {
ext = extentManager()->getExtent(extLoc);
int fileNo = ext->firstRecord.a();
if ( fileNo == -1 )
continue;
- for ( int recOfs = ext->firstRecord.getOfs(); recOfs != DiskLoc::NullOfs;
- recOfs = recordStore()->recordFor(DiskLoc(fileNo, recOfs))->nextOfs() ) {
+ for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs;
+ recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) {
++count;
}
}
@@ -173,7 +173,7 @@ namespace NamespaceTests {
}
int nExtents() const {
int count = 0;
- for ( DiskLoc extLoc = nsd()->firstExtent();
+ for ( RecordId extLoc = nsd()->firstExtent();
!extLoc.isNull();
extLoc = extentManager()->getExtent(extLoc)->xnext ) {
++count;
@@ -222,7 +222,7 @@ namespace NamespaceTests {
ASSERT( nsd() );
ASSERT_EQUALS( 0, nRecords() );
ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
- DiskLoc initial = DiskLoc();
+ RecordId initial = RecordId();
initial.setInvalid();
ASSERT( initial == nsd()->capFirstNewRecord() );
}
@@ -249,10 +249,10 @@ namespace NamespaceTests {
const int N = 20;
const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
- DiskLoc l[ N ];
+ RecordId l[ N ];
for ( int i = 0; i < N; ++i ) {
BSONObj b = bigObj();
- StatusWith<DiskLoc> status = collection()->insertDocument( &txn, b, true );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, b, true );
ASSERT( status.isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
@@ -272,9 +272,9 @@ namespace NamespaceTests {
create();
ASSERT_EQUALS( 2, nExtents() );
- DiskLoc l[ 8 ];
+ RecordId l[ 8 ];
for ( int i = 0; i < 8; ++i ) {
- StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigObj(), true );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigObj(), true );
ASSERT( status.isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
@@ -289,7 +289,7 @@ namespace NamespaceTests {
bob.appendOID( "_id", NULL, true );
bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
BSONObj bigger = bob.done();
- StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, false );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, false );
ASSERT( !status.isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
@@ -322,7 +322,7 @@ namespace NamespaceTests {
ASSERT( nsd()->isCapped() );
ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
- StatusWith<DiskLoc> result =
+ StatusWith<RecordId> result =
collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
ASSERT( result.isOK() );
Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
@@ -333,7 +333,7 @@ namespace NamespaceTests {
};
- /* test NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc loc)
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc)
*/
class TruncateCapped : public Base {
virtual string spec() const {
@@ -349,13 +349,13 @@ namespace NamespaceTests {
int N = MinExtentSize / b.objsize() * nExtents() + 5;
int T = N - 4;
- DiskLoc truncAt;
- //DiskLoc l[ 8 ];
+ RecordId truncAt;
+ //RecordId l[ 8 ];
for ( int i = 0; i < N; ++i ) {
BSONObj bb = bigObj();
- StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bb, true );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
ASSERT( status.isOK() );
- DiskLoc a = status.getValue();
+ RecordId a = status.getValue();
if( T == i )
truncAt = a;
ASSERT( !a.isNull() );
@@ -365,7 +365,7 @@ namespace NamespaceTests {
}
ASSERT( nRecords() < N );
- DiskLoc last, first;
+ RecordId last, first;
{
auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
ns(),
@@ -388,7 +388,7 @@ namespace NamespaceTests {
ASSERT_EQUALS( collection()->numRecords() , 28u );
{
- DiskLoc loc;
+ RecordId loc;
auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
ns(),
collection(),
@@ -401,7 +401,7 @@ namespace NamespaceTests {
ns(),
collection(),
InternalPlanner::BACKWARD));
- DiskLoc loc;
+ RecordId loc;
runner->getNext(NULL, &loc);
ASSERT( last != loc );
ASSERT( !last.isNull() );
@@ -412,7 +412,7 @@ namespace NamespaceTests {
bob.appendOID("_id", 0, true);
bob.append( "a", string( MinExtentSize + 300, 'a' ) );
BSONObj bigger = bob.done();
- StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, true );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, true );
ASSERT( !status.isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
@@ -429,7 +429,7 @@ namespace NamespaceTests {
void run() {
create();
nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
- nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing() = DiskLoc();
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing() = RecordId();
nsd()->cappedLastDelRecLastExtent().Null();
NamespaceDetails *d = nsd();
@@ -443,13 +443,13 @@ namespace NamespaceTests {
ASSERT( nsd()->capExtent().getOfs() != 0 );
ASSERT( !nsd()->capFirstNewRecord().isValid() );
int nDeleted = 0;
- for ( DiskLoc i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
+ for ( RecordId i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
ASSERT_EQUALS( 10, nDeleted );
ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
}
private:
- static void zero( DiskLoc *d ) {
- memset( d, 0, sizeof( DiskLoc ) );
+ static void zero( RecordId *d ) {
+ memset( d, 0, sizeof( RecordId ) );
}
virtual string spec() const {
return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 828530905c6..0808a666b2d 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -75,7 +75,7 @@ namespace PdfileTests {
BSONObj x = BSON( "x" << 1 );
ASSERT( x["_id"].type() == 0 );
Collection* collection = _context.db()->getOrCreateCollection( &_txn, ns() );
- StatusWith<DiskLoc> dl = collection->insertDocument( &_txn, x, true );
+ StatusWith<RecordId> dl = collection->insertDocument( &_txn, x, true );
ASSERT( !dl.isOK() );
StatusWith<BSONObj> fixed = fixDocumentForInsert( x );
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index a8bd8516031..0680d1c4889 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -403,7 +403,7 @@ namespace PlanRankingTests {
* equality predicate on "a". The presence of the range predicate has an impact on the
* intersection plan that is raced against the single-index plans: since "a" no longer generates
* point interval bounds, the results of the index scan aren't guaranteed to be returned in
- * DiskLoc order, and so the intersection plan uses the AND_HASHED stage instead of the
+ * RecordId order, and so the intersection plan uses the AND_HASHED stage instead of the
* AND_SORTED stage. It is still the case that the query should pick the plan that uses index
* "b", instead of the plan that uses index "a" or the (hashed) intersection plan.
*/
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 30f2c00c547..c3bb6e7b56b 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -27,7 +27,7 @@
*/
/**
- * This file tests db/exec/and_*.cpp and DiskLoc invalidation. DiskLoc invalidation forces a fetch
+ * This file tests db/exec/and_*.cpp and RecordId invalidation. RecordId invalidation forces a fetch
* so we cannot test it outside of a dbtest.
*/
@@ -72,11 +72,11 @@ namespace QueryStageAnd {
return descriptor;
}
- void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(&_txn, DiskLoc(),
+ void getLocs(set<RecordId>* out, Collection* coll) {
+ RecordIterator* it = coll->getIterator(&_txn, RecordId(),
CollectionScanParams::FORWARD);
while (!it->isEOF()) {
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
out->insert(nextLoc);
}
delete it;
@@ -150,8 +150,8 @@ namespace QueryStageAnd {
//
/**
- * Invalidate a DiskLoc held by a hashed AND before the AND finishes evaluating. The AND should
- * process all other data just fine and flag the invalidated DiskLoc in the WorkingSet.
+ * Invalidate a RecordId held by a hashed AND before the AND finishes evaluating. The AND should
+ * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
*/
class QueryStageAndHashInvalidation : public QueryStageAndBase {
public:
@@ -205,10 +205,10 @@ namespace QueryStageAnd {
// ...yield
ah->saveState();
// ...invalidate one of the read objects
- set<DiskLoc> data;
+ set<RecordId> data;
getLocs(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
- for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) {
+ for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
if (coll->docFor(&_txn, *it)["foo"].numberInt() == 15) {
ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
remove(coll->docFor(&_txn, *it));
@@ -304,14 +304,14 @@ namespace QueryStageAnd {
const unordered_set<WorkingSetID>& flagged = ws.getFlagged();
ASSERT_EQUALS(size_t(0), flagged.size());
- // "delete" deletedObj (by invalidating the DiskLoc of the obj that matches it).
+ // "delete" deletedObj (by invalidating the RecordId of the obj that matches it).
BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20);
ah->saveState();
- set<DiskLoc> data;
+ set<RecordId> data;
getLocs(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
- for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) {
+ for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it))) {
ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
break;
@@ -902,8 +902,8 @@ namespace QueryStageAnd {
//
/**
- * Invalidate a DiskLoc held by a sorted AND before the AND finishes evaluating. The AND should
- * process all other data just fine and flag the invalidated DiskLoc in the WorkingSet.
+ * Invalidate a RecordId held by a sorted AND before the AND finishes evaluating. The AND should
+ * process all other data just fine and flag the invalidated RecordId in the WorkingSet.
*/
class QueryStageAndSortedInvalidation : public QueryStageAndBase {
public:
@@ -942,18 +942,18 @@ namespace QueryStageAnd {
ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Get the set of disklocs in our collection to use later.
- set<DiskLoc> data;
+ set<RecordId> data;
getLocs(&data, coll);
// We're making an assumption here that happens to be true because we clear out the
- // collection before running this: increasing inserts have increasing DiskLocs.
+ // collection before running this: increasing inserts have increasing RecordIds.
// This isn't true in general if the collection is not dropped beforehand.
WorkingSetID id = WorkingSet::INVALID_ID;
// Sorted AND looks at the first child, which is an index scan over foo==1.
ah->work(&id);
- // The first thing that the index scan returns (due to increasing DiskLoc trick) is the
+ // The first thing that the index scan returns (due to increasing RecordId trick) is the
// very first insert, which should be the very first thing in data. Let's invalidate it
// and make sure it shows up in the flagged results.
ah->saveState();
@@ -971,7 +971,7 @@ namespace QueryStageAnd {
ASSERT_TRUE(member->getFieldDotted("bar", &elt));
ASSERT_EQUALS(1, elt.numberInt());
- set<DiskLoc>::iterator it = data.begin();
+ set<RecordId>::iterator it = data.begin();
// Proceed along, AND-ing results.
int count = 0;
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index f2e406e29fb..42bd6931bb5 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -103,7 +103,7 @@ namespace QueryStageCollectionScan {
void getLocs(Collection* collection,
CollectionScanParams::Direction direction,
- vector<DiskLoc>* out) {
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -261,8 +261,8 @@ namespace QueryStageCollectionScan {
Collection* coll = ctx.getCollection();
- // Get the DiskLocs that would be returned by an in-order scan.
- vector<DiskLoc> locs;
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
getLocs(coll, CollectionScanParams::FORWARD, &locs);
// Configure the scan.
@@ -322,8 +322,8 @@ namespace QueryStageCollectionScan {
Client::WriteContext ctx(&_txn, ns());
Collection* coll = ctx.getCollection();
- // Get the DiskLocs that would be returned by an in-order scan.
- vector<DiskLoc> locs;
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
getLocs(coll, CollectionScanParams::BACKWARD, &locs);
// Configure the scan.
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 3593d0841e3..4338880ec73 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -106,13 +106,13 @@ namespace QueryStageCount {
wunit.commit();
}
- void remove(const DiskLoc& loc) {
+ void remove(const RecordId& loc) {
WriteUnitOfWork wunit(&_txn);
_coll->deleteDocument(&_txn, loc, false, false, NULL);
wunit.commit();
}
- void update(const DiskLoc& oldLoc, const BSONObj& newDoc) {
+ void update(const RecordId& oldLoc, const BSONObj& newDoc) {
WriteUnitOfWork wunit(&_txn);
_coll->updateDocument(&_txn, oldLoc, newDoc, false, NULL);
wunit.commit();
@@ -216,7 +216,7 @@ namespace QueryStageCount {
static const char* ns() { return "unittest.QueryStageCount"; }
protected:
- vector<DiskLoc> _locs;
+ vector<RecordId> _locs;
OperationContextImpl _txn;
Lock::DBLock _dbLock;
Client::Context _ctx;
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index ed13459cc77..f12ea3e8f1f 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -67,7 +67,7 @@ namespace QueryStageDelete {
void getLocs(Collection* collection,
CollectionScanParams::Direction direction,
- vector<DiskLoc>* out) {
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -110,8 +110,8 @@ namespace QueryStageDelete {
Collection* coll = ctx.getCollection();
- // Get the DiskLocs that would be returned by an in-order scan.
- vector<DiskLoc> locs;
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
getLocs(coll, CollectionScanParams::FORWARD, &locs);
// Configure the scan.
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 049ecb7caf2..135942faee7 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -79,7 +79,7 @@ namespace QueryStageDistinct {
// Distinct hack execution is always covered.
// Key value is retrieved from working set key data
- // instead of DiskLoc.
+ // instead of RecordId.
ASSERT_FALSE(member->hasObj());
BSONElement keyElt;
ASSERT_TRUE(member->getFieldDotted(field, &keyElt));
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index c126f3e0d7f..a827dafd570 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -56,10 +56,10 @@ namespace QueryStageFetch {
_client.dropCollection(ns());
}
- void getLocs(set<DiskLoc>* out, Collection* coll) {
+ void getLocs(set<RecordId>* out, Collection* coll) {
RecordIterator* it = coll->getIterator(&_txn);
while (!it->isEOF()) {
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
out->insert(nextLoc);
}
delete it;
@@ -100,7 +100,7 @@ namespace QueryStageFetch {
// Add an object to the DB.
insert(BSON("foo" << 5));
- set<DiskLoc> locs;
+ set<RecordId> locs;
getLocs(&locs, coll);
ASSERT_EQUALS(size_t(1), locs.size());
@@ -117,7 +117,7 @@ namespace QueryStageFetch {
mockStage->pushBack(mockMember);
mockMember.state = WorkingSetMember::OWNED_OBJ;
- mockMember.loc = DiskLoc();
+ mockMember.loc = RecordId();
mockMember.obj = BSON("foo" << 6);
ASSERT_TRUE(mockMember.obj.isOwned());
mockStage->pushBack(mockMember);
@@ -162,7 +162,7 @@ namespace QueryStageFetch {
// Add an object to the DB.
insert(BSON("foo" << 5));
- set<DiskLoc> locs;
+ set<RecordId> locs;
getLocs(&locs, coll);
ASSERT_EQUALS(size_t(1), locs.size());
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 1589a267933..40c49905102 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -61,10 +61,10 @@ namespace QueryStageKeep {
_client.dropCollection(ns());
}
- void getLocs(set<DiskLoc>* out, Collection* coll) {
+ void getLocs(set<RecordId>* out, Collection* coll) {
RecordIterator* it = coll->getIterator(&_txn);
while (!it->isEOF()) {
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
out->insert(nextLoc);
}
delete it;
@@ -135,7 +135,7 @@ namespace QueryStageKeep {
params.collection = coll;
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- params.start = DiskLoc();
+ params.start = RecordId();
CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL);
// Create a KeepMutations stage to merge in the 10 flagged objects.
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 059cfe9e7e7..53173b26780 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -72,10 +72,10 @@ namespace QueryStageMergeSortTests {
_client.remove(ns(), obj);
}
- void getLocs(set<DiskLoc>* out, Collection* coll) {
+ void getLocs(set<RecordId>* out, Collection* coll) {
RecordIterator* it = coll->getIterator(&_txn);
while (!it->isEOF()) {
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
out->insert(nextLoc);
}
delete it;
@@ -547,10 +547,10 @@ namespace QueryStageMergeSortTests {
ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
}
- set<DiskLoc> locs;
+ set<RecordId> locs;
getLocs(&locs, coll);
- set<DiskLoc>::iterator it = locs.begin();
+ set<RecordId>::iterator it = locs.begin();
// Get 10 results. Should be getting results in order of 'locs'.
int count = 0;
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 961c2022897..c44a9157a27 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -88,7 +88,7 @@ namespace {
virtual void restoreState(OperationContext* opCtx) {
}
- virtual void invalidate(OperationContext* txn, const DiskLoc& dl, InvalidationType type) {
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
}
virtual vector<PlanStage*> getChildren() const {
return vector<PlanStage*>();
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 4dfa777d720..58506417a70 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -65,10 +65,10 @@ namespace QueryStageSortTests {
_client.insert(ns(), obj);
}
- void getLocs(set<DiskLoc>* out, Collection* coll) {
+ void getLocs(set<RecordId>* out, Collection* coll) {
RecordIterator* it = coll->getIterator(&_txn);
while (!it->isEOF()) {
- DiskLoc nextLoc = it->getNext();
+ RecordId nextLoc = it->getNext();
out->insert(nextLoc);
}
delete it;
@@ -78,10 +78,10 @@ namespace QueryStageSortTests {
* We feed a mix of (key, unowned, owned) data to the sort stage.
*/
void insertVarietyOfObjects(MockStage* ms, Collection* coll) {
- set<DiskLoc> locs;
+ set<RecordId> locs;
getLocs(&locs, coll);
- set<DiskLoc>::iterator it = locs.begin();
+ set<RecordId>::iterator it = locs.begin();
for (int i = 0; i < numObj(); ++i, ++it) {
ASSERT_FALSE(it == locs.end());
@@ -269,7 +269,7 @@ namespace QueryStageSortTests {
fillData();
// The data we're going to later invalidate.
- set<DiskLoc> locs;
+ set<RecordId> locs;
getLocs(&locs, coll);
// Build the mock scan stage which feeds the data.
@@ -294,7 +294,7 @@ namespace QueryStageSortTests {
// We should have read in the first 'firstRead' locs. Invalidate the first.
ss->saveState();
- set<DiskLoc>::iterator it = locs.begin();
+ set<RecordId>::iterator it = locs.begin();
ss->invalidate(&_txn, *it++, INVALIDATION_DELETION);
ss->restoreState(&_txn);
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index bc87aaef668..2ed9925716c 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -90,7 +90,7 @@ namespace QueryStageTests {
boost::scoped_ptr<PlanExecutor> exec(rawExec);
int count = 0;
- for (DiskLoc dl; PlanExecutor::ADVANCED == exec->getNext(NULL, &dl); ) {
+ for (RecordId dl; PlanExecutor::ADVANCED == exec->getNext(NULL, &dl); ) {
++count;
}
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 75df12f9978..624eec54a71 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -124,7 +124,7 @@ namespace QueryStageUpdate {
void getLocs(Collection* collection,
CollectionScanParams::Direction direction,
- vector<DiskLoc>* out) {
+ vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -257,8 +257,8 @@ namespace QueryStageUpdate {
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(&_txn, ns());
- // Get the DiskLocs that would be returned by an in-order scan.
- vector<DiskLoc> locs;
+ // Get the RecordIds that would be returned by an in-order scan.
+ vector<RecordId> locs;
getLocs(coll, CollectionScanParams::FORWARD, &locs);
UpdateRequest request(nsString());
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 43e882e8250..fd87c858afb 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -486,7 +486,7 @@ namespace QueryTests {
// Inserting a document into a capped collection can force another document out.
// In this case, the capped collection has 2 documents, so inserting two more clobbers
- // whatever DiskLoc that the underlying cursor had as its state.
+ // whatever RecordId that the underlying cursor had as its state.
//
// In the Cursor world, the ClientCursor was responsible for manipulating cursors. It
// would detect that the cursor's "refloc" (translation: diskloc required to maintain
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index f8ebd0d45bd..914a6d1ec1e 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -183,7 +183,7 @@ namespace ReplTests {
RecordIterator* it = coll->getIterator(&_txn);
while ( !it->isEOF() ) {
- DiskLoc currLoc = it->getNext();
+ RecordId currLoc = it->getNext();
ops.push_back(coll->docFor(&_txn, currLoc));
}
delete it;
@@ -220,7 +220,7 @@ namespace ReplTests {
RecordIterator* it = coll->getIterator(&_txn);
::mongo::log() << "all for " << ns << endl;
while ( !it->isEOF() ) {
- DiskLoc currLoc = it->getNext();
+ RecordId currLoc = it->getNext();
::mongo::log() << coll->docFor(&_txn, currLoc).toString() << endl;
}
delete it;
@@ -237,13 +237,13 @@ namespace ReplTests {
coll = db->createCollection( &_txn, ns );
}
- vector< DiskLoc > toDelete;
+ vector< RecordId > toDelete;
RecordIterator* it = coll->getIterator(&_txn);
while ( !it->isEOF() ) {
toDelete.push_back( it->getNext() );
}
delete it;
- for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
+ for( vector< RecordId >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
coll->deleteDocument( &_txn, *i, true );
}
wunit.commit();
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 842b58832d8..9f124d9a647 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -83,11 +83,11 @@ namespace {
Collection* coll = dbHolder().get( txn, nss.db() )->getCollection( txn, nss.ns() );
return coll->truncate( txn );
}
- DiskLoc insertRecord( OperationContext* txn,
+ RecordId insertRecord( OperationContext* txn,
const NamespaceString& nss,
const BSONObj& data ) {
Collection* coll = dbHolder().get( txn, nss.db() )->getCollection( txn, nss.ns() );
- StatusWith<DiskLoc> status = coll->insertDocument( txn, data, false );
+ StatusWith<RecordId> status = coll->insertDocument( txn, data, false );
ASSERT_OK( status.getStatus() );
return status.getValue();
}
@@ -97,7 +97,7 @@ namespace {
Collection* coll = dbHolder().get( txn, nss.db() )->getCollection( txn, nss.ns() );
scoped_ptr<RecordIterator> iter( coll->getIterator( txn ) );
ASSERT( !iter->isEOF() );
- DiskLoc loc = iter->getNext();
+ RecordId loc = iter->getNext();
ASSERT( iter->isEOF() );
ASSERT_EQ( data, coll->docFor( txn, loc ) );
}
@@ -642,10 +642,10 @@ namespace {
invariant(ice);
HeadManager* headManager = ice->headManager();
- const DiskLoc oldHead = headManager->getHead(&txn);
+ const RecordId oldHead = headManager->getHead(&txn);
ASSERT_EQ(oldHead, ice->head(&txn));
- const DiskLoc dummyHead(123, 456);
+ const RecordId dummyHead(123, 456);
ASSERT_NE(oldHead, dummyHead);
// END SETUP / START TEST
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index d76a32997cd..1cb77ca6f5a 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -516,7 +516,7 @@ namespace mongo {
// we want the number of records to better report, in that case
bool isLargeChunk = false;
unsigned long long recCount = 0;;
- DiskLoc dl;
+ RecordId dl;
while (PlanExecutor::ADVANCED == exec->getNext(NULL, &dl)) {
if ( ! isLargeChunk ) {
scoped_spinlock lk( _trackerLocks );
@@ -577,14 +577,14 @@ namespace mongo {
Collection* collection = ctx.getCollection();
scoped_spinlock lk( _trackerLocks );
- set<DiskLoc>::iterator i = _cloneLocs.begin();
+ set<RecordId>::iterator i = _cloneLocs.begin();
for ( ; i!=_cloneLocs.end(); ++i ) {
if (tracker.intervalHasElapsed()) // should I yield?
break;
invariant( collection );
- DiskLoc dl = *i;
+ RecordId dl = *i;
BSONObj o;
if ( !collection->findDoc( txn, dl, &o ) ) {
// doc was deleted
@@ -612,7 +612,7 @@ namespace mongo {
return true;
}
- void aboutToDelete( const DiskLoc& dl ) {
+ void aboutToDelete( const RecordId& dl ) {
// Even though above we call findDoc to check for existance
// that check only works for non-mmapv1 engines, and this is needed
// for mmapv1.
@@ -680,7 +680,7 @@ namespace mongo {
// no locking needed because built initially by 1 thread in a read lock
// emptied by 1 thread in a read lock
// updates applied by 1 thread in a write lock
- set<DiskLoc> _cloneLocs;
+ set<RecordId> _cloneLocs;
list<BSONObj> _reload; // objects that were modified that must be recloned
list<BSONObj> _deleted; // objects deleted during clone that should be deleted later
@@ -697,7 +697,7 @@ namespace mongo {
class DeleteNotificationStage : public PlanStage {
public:
virtual void invalidate(OperationContext* txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type);
virtual StageState work(WorkingSetID* out) {
@@ -741,7 +741,7 @@ namespace mongo {
} migrateFromStatus;
void MigrateFromStatus::DeleteNotificationStage::invalidate(OperationContext *txn,
- const DiskLoc& dl,
+ const RecordId& dl,
InvalidationType type) {
if ( type == INVALIDATION_DELETION ) {
migrateFromStatus.aboutToDelete( dl );
@@ -875,10 +875,10 @@ namespace mongo {
// 2. Make sure my view is complete and lock the distributed lock to ensure shard
// metadata stability.
// 3. Migration
- // Retrieve all DiskLocs, which need to be migrated in order to do as little seeking
- // as possible during transfer. Retrieval of the DiskLocs happens under a collection
+ // Retrieve all RecordIds, which need to be migrated in order to do as little seeking
+ // as possible during transfer. Retrieval of the RecordIds happens under a collection
// lock, but then the collection lock is dropped. This opens up an opportunity for
- // repair or compact to invalidate these DiskLocs, because these commands do not
+ // repair or compact to invalidate these RecordIds, because these commands do not
// synchronized with migration. Note that data modifications are not a problem,
// because we are registered for change notifications.
//
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index fc897132594..dae9292fca3 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -165,7 +165,7 @@ namespace mongo {
// TODO if $exist for nulls were picking the index, it could be used instead efficiently
int keyPatternLength = keyPattern.nFields();
- DiskLoc loc;
+ RecordId loc;
BSONObj currKey;
while (PlanExecutor::ADVANCED == exec->getNext(&currKey, &loc)) {
//check that current key contains non missing elements for all fields in keyPattern
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index 2baab3a3ad4..d9edb1159b9 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -38,7 +38,7 @@
namespace mongo {
class Database;
- class DiskLoc;
+ class RecordId;
class OperationContext;
// --------------