summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-08-25 17:25:39 -0400
committerEliot Horowitz <eliot@10gen.com>2014-08-25 18:08:37 -0400
commitf0bb5123f50bb617eb9499539f01779c6e9f5e95 (patch)
tree98cc4abf3c89881691158345435523c5dd6ae37f /src/mongo/db
parentcb3f5cfa43d9565675d2f36c4b0f7cecbad47a49 (diff)
downloadmongo-f0bb5123f50bb617eb9499539f01779c6e9f5e95.tar.gz
SERVER-13635: OperationContext on read paths
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp2
-rw-r--r--src/mongo/db/catalog/collection.cpp40
-rw-r--r--src/mongo/db/catalog/collection.h12
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h19
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp4
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp4
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h6
-rw-r--r--src/mongo/db/catalog/database.cpp12
-rw-r--r--src/mongo/db/catalog/head_manager.h2
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp106
-rw-r--r--src/mongo/db/catalog/index_catalog.h37
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp40
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h15
-rw-r--r--src/mongo/db/catalog/index_create.cpp4
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/count.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp12
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp10
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/rename_collection.cpp6
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp24
-rw-r--r--src/mongo/db/dbhelpers.cpp20
-rw-r--r--src/mongo/db/exec/and_hash.cpp16
-rw-r--r--src/mongo/db/exec/and_hash.h9
-rw-r--r--src/mongo/db/exec/and_sorted.cpp8
-rw-r--r--src/mongo/db/exec/and_sorted.h4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp2
-rw-r--r--src/mongo/db/exec/count.cpp6
-rw-r--r--src/mongo/db/exec/fetch.cpp8
-rw-r--r--src/mongo/db/exec/fetch.h11
-rw-r--r--src/mongo/db/exec/geo_near.cpp10
-rw-r--r--src/mongo/db/exec/idhack.cpp4
-rw-r--r--src/mongo/db/exec/index_scan.cpp4
-rw-r--r--src/mongo/db/exec/merge_sort.cpp8
-rw-r--r--src/mongo/db/exec/merge_sort.h4
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp2
-rw-r--r--src/mongo/db/exec/multi_iterator.h6
-rw-r--r--src/mongo/db/exec/multi_plan.cpp20
-rw-r--r--src/mongo/db/exec/multi_plan.h3
-rw-r--r--src/mongo/db/exec/near.cpp2
-rw-r--r--src/mongo/db/exec/oplogstart.cpp4
-rw-r--r--src/mongo/db/exec/sort.cpp10
-rw-r--r--src/mongo/db/exec/sort.h6
-rw-r--r--src/mongo/db/exec/sort_test.cpp4
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp18
-rw-r--r--src/mongo/db/exec/subplan.cpp5
-rw-r--r--src/mongo/db/exec/text.cpp18
-rw-r--r--src/mongo/db/exec/working_set_common.cpp7
-rw-r--r--src/mongo/db/exec/working_set_common.h4
-rw-r--r--src/mongo/db/geo/haystack.cpp2
-rw-r--r--src/mongo/db/index/btree_based_access_method.cpp10
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.h2
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp2
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h11
-rw-r--r--src/mongo/db/index/index_descriptor.h5
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp2
-rw-r--r--src/mongo/db/ops/update_lifecycle.h4
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp4
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.h2
-rw-r--r--src/mongo/db/prefetch.cpp7
-rw-r--r--src/mongo/db/query/get_executor.cpp24
-rw-r--r--src/mongo/db/query/get_executor.h3
-rw-r--r--src/mongo/db/query/internal_plans.h2
-rw-r--r--src/mongo/db/query/new_find.cpp2
-rw-r--r--src/mongo/db/query/stage_builder.cpp26
-rw-r--r--src/mongo/db/repl/oplog.cpp8
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/db/storage/SConscript8
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp162
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h105
-rw-r--r--src/mongo/db/storage/heap1/heap1_btree_impl.cpp2
-rw-r--r--src/mongo/db/storage/heap1/heap1_database_catalog_entry.cpp19
-rw-r--r--src/mongo/db/storage/heap1/heap1_database_catalog_entry.h19
-rw-r--r--src/mongo/db/storage/heap1/heap1_test.cpp4
-rw-r--r--src/mongo/db/storage/heap1/record_store_heap.cpp14
-rw-r--r--src/mongo/db/storage/heap1/record_store_heap.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp12
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp185
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.h25
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp240
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp12
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp70
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h21
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp6
-rw-r--r--src/mongo/db/storage/record_data.h2
-rw-r--r--src/mongo/db/storage/record_store.h6
-rw-r--r--src/mongo/db/storage/rocks/SConscript1
-rw-r--r--src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp124
-rw-r--r--src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h50
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine.cpp2
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine_test.cpp49
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp14
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.h10
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_test.cpp60
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp2
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl.h2
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h2
-rw-r--r--src/mongo/db/ttl.cpp2
120 files changed, 1149 insertions, 861 deletions
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index b614e8e1602..841a740e7ba 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -92,7 +92,7 @@ namespace {
}
IndexCatalog* indexCatalog = collection->getIndexCatalog();
IndexDescriptor* oldIndex = NULL;
- while ((oldIndex = indexCatalog->findIndexByKeyPattern(v1SystemUsersKeyPattern))) {
+ while ((oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
indexCatalog->dropIndex(txn, oldIndex);
}
wctx.commit();
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 3966ccae972..614841758bd 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -149,7 +149,7 @@ namespace mongo {
int64_t count = 0;
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
- BSONObj obj = docFor( loc );
+ BSONObj obj = docFor( txn, loc );
if ( expression->matchesBSON( obj ) )
count++;
}
@@ -157,14 +157,14 @@ namespace mongo {
return count;
}
- BSONObj Collection::docFor(const DiskLoc& loc) const {
- return _recordStore->dataFor( loc ).toBson();
+ BSONObj Collection::docFor(OperationContext* txn, const DiskLoc& loc) const {
+ return _recordStore->dataFor( txn, loc ).toBson();
}
StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
const DocWriter* doc,
bool enforceQuota ) {
- verify( _indexCatalog.numIndexesTotal() == 0 ); // eventually can implement, just not done
+ verify( _indexCatalog.numIndexesTotal( txn ) == 0 ); // eventually can implement, just not done
StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
doc,
@@ -178,7 +178,7 @@ namespace mongo {
StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
const BSONObj& docToInsert,
bool enforceQuota ) {
- if ( _indexCatalog.findIdIndex() ) {
+ if ( _indexCatalog.findIdIndex( txn ) ) {
if ( docToInsert["_id"].eoo() ) {
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
str::stream() << "Collection::insertDocument got "
@@ -255,7 +255,7 @@ namespace mongo {
Status Collection::aboutToDeleteCapped( OperationContext* txn, const DiskLoc& loc ) {
- BSONObj doc = docFor( loc );
+ BSONObj doc = docFor( txn, loc );
/* check if any cursors point to us. if so, advance them. */
_cursorCache.invalidateDocument(loc, INVALIDATION_DELETION);
@@ -276,7 +276,7 @@ namespace mongo {
return;
}
- BSONObj doc = docFor( loc );
+ BSONObj doc = docFor( txn, loc );
if ( deletedId ) {
BSONElement e = doc["_id"];
@@ -304,7 +304,7 @@ namespace mongo {
bool enforceQuota,
OpDebug* debug ) {
- BSONObj objOld = _recordStore->dataFor( oldLocation ).toBson();
+ BSONObj objOld = _recordStore->dataFor( txn, oldLocation ).toBson();
if ( objOld.hasElement( "_id" ) ) {
BSONElement oldId = objOld["_id"];
@@ -327,7 +327,7 @@ namespace mongo {
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( true );
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
@@ -376,7 +376,7 @@ namespace mongo {
if ( debug )
debug->keyUpdates = 0;
- ii = _indexCatalog.getIndexIterator( true );
+ ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
@@ -438,12 +438,12 @@ namespace mongo {
return _recordStore->isCapped();
}
- uint64_t Collection::numRecords() const {
- return _recordStore->numRecords();
+ uint64_t Collection::numRecords( OperationContext* txn ) const {
+ return _recordStore->numRecords( txn );
}
- uint64_t Collection::dataSize() const {
- return _recordStore->dataSize();
+ uint64_t Collection::dataSize( OperationContext* txn ) const {
+ return _recordStore->dataSize( txn );
}
/**
@@ -454,12 +454,12 @@ namespace mongo {
* 4) re-write indexes
*/
Status Collection::truncate(OperationContext* txn) {
- massert( 17445, "index build in progress", _indexCatalog.numIndexesInProgress() == 0 );
+ massert( 17445, "index build in progress", _indexCatalog.numIndexesInProgress( txn ) == 0 );
// 1) store index specs
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( false );
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
while ( ii.more() ) {
const IndexDescriptor* idx = ii.next();
indexSpecs.push_back( idx->infoObj().getOwned() );
@@ -522,11 +522,11 @@ namespace mongo {
return status;
{ // indexes
- output->append("nIndexes", _indexCatalog.numIndexesReady() );
+ output->append("nIndexes", _indexCatalog.numIndexesReady( txn ) );
int idxn = 0;
try {
BSONObjBuilder indexes; // not using subObjStart to be exception safe
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(false);
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
while( i.more() ) {
const IndexDescriptor* descriptor = i.next();
log(LogComponent::kIndexing) << "validating index " << descriptor->indexNamespace() << endl;
@@ -567,7 +567,7 @@ namespace mongo {
if ( touchIndexes ) {
Timer t;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( false );
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
while ( ii.more() ) {
const IndexDescriptor* desc = ii.next();
const IndexAccessMethod* iam = _indexCatalog.getIndex( desc );
@@ -576,7 +576,7 @@ namespace mongo {
return status;
}
- output->append( "indexes", BSON( "num" << _indexCatalog.numIndexesTotal() <<
+ output->append( "indexes", BSON( "num" << _indexCatalog.numIndexesTotal( txn ) <<
"millis" << t.millis() ) );
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 9ca4c76e97b..bfc9add1235 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -128,7 +128,7 @@ namespace mongo {
bool requiresIdIndex() const;
- BSONObj docFor(const DiskLoc& loc) const;
+ BSONObj docFor(OperationContext* txn, const DiskLoc& loc) const;
// ---- things that should move to a CollectionAccessMethod like thing
/**
@@ -246,15 +246,15 @@ namespace mongo {
bool isCapped() const;
- uint64_t numRecords() const;
+ uint64_t numRecords( OperationContext* txn ) const;
- uint64_t dataSize() const;
+ uint64_t dataSize( OperationContext* txn ) const;
- int averageObjectSize() const {
- uint64_t n = numRecords();
+ int averageObjectSize( OperationContext* txn ) const {
+ uint64_t n = numRecords( txn );
if ( n == 0 )
return 5;
- return static_cast<int>( dataSize() / n );
+ return static_cast<int>( dataSize( txn ) / n );
}
// --- end suspect things
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 1b0175b59c8..d537df5d223 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -53,29 +53,34 @@ namespace mongo {
virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const = 0;
- virtual int getTotalIndexCount() const = 0;
+ virtual int getTotalIndexCount( OperationContext* txn ) const = 0;
- virtual int getCompletedIndexCount() const = 0;
+ virtual int getCompletedIndexCount( OperationContext* txn ) const = 0;
virtual int getMaxAllowedIndexes() const = 0;
- virtual void getAllIndexes( std::vector<std::string>* names ) const = 0;
+ virtual void getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const = 0;
- virtual BSONObj getIndexSpec( const StringData& idxName ) const = 0;
+ virtual BSONObj getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const = 0;
- virtual bool isIndexMultikey( const StringData& indexName) const = 0;
+ virtual bool isIndexMultikey( OperationContext* txn,
+ const StringData& indexName) const = 0;
virtual bool setIndexIsMultikey(OperationContext* txn,
const StringData& indexName,
bool multikey = true) = 0;
- virtual DiskLoc getIndexHead( const StringData& indexName ) const = 0;
+ virtual DiskLoc getIndexHead( OperationContext* txn,
+ const StringData& indexName ) const = 0;
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
const DiskLoc& newHead ) = 0;
- virtual bool isIndexReady( const StringData& indexName ) const = 0;
+ virtual bool isIndexReady( OperationContext* txn,
+ const StringData& indexName ) const = 0;
virtual Status removeIndex( OperationContext* txn,
const StringData& indexName ) = 0;
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index b1d0594ee2e..06c20a8030a 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -104,7 +104,7 @@ namespace mongo {
"cannot compact collection with record store: " <<
_recordStore->name() );
- if ( _indexCatalog.numIndexesInProgress() )
+ if ( _indexCatalog.numIndexesInProgress( txn ) )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
@@ -114,7 +114,7 @@ namespace mongo {
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
+ IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( txn, false ) );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index f0482cad178..d9783b6db07 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -60,10 +60,10 @@ namespace mongo {
// index filters should persist throughout life of collection
}
- void CollectionInfoCache::computeIndexKeys() {
+ void CollectionInfoCache::computeIndexKeys( OperationContext* txn ) {
_indexedPaths.clear();
- IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(true);
+ IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 3948c55e5c0..092d436b2c7 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -73,9 +73,9 @@ namespace mongo {
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
*/
- const UpdateIndexData& indexKeys() {
+ const UpdateIndexData& indexKeys( OperationContext* txn ) {
if ( !_keysComputed )
- computeIndexKeys();
+ computeIndexKeys( txn );
return _indexedPaths;
}
@@ -109,7 +109,7 @@ namespace mongo {
/**
* Must be called under exclusive DB lock.
*/
- void computeIndexKeys();
+ void computeIndexKeys( OperationContext* txn );
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index e0a4e02f259..34c7186e782 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -253,7 +253,7 @@ namespace mongo {
IndexCatalog* idxCatalog = coll->getIndexCatalog();
- IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator( true );
+ IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator( opCtx, true );
long long totalSize = 0;
@@ -292,14 +292,14 @@ namespace mongo {
continue;
ncollections += 1;
- objects += collection->numRecords();
- size += collection->dataSize();
+ objects += collection->numRecords(opCtx);
+ size += collection->dataSize(opCtx);
BSONObjBuilder temp;
storageSize += collection->getRecordStore()->storageSize( opCtx, &temp );
numExtents += temp.obj()["numExtents"].numberInt(); // XXX
- indexes += collection->getIndexCatalog()->numIndexesTotal();
+ indexes += collection->getIndexCatalog()->numIndexesTotal( opCtx );
indexSize += getIndexSizeForCollection(opCtx, collection);
}
@@ -364,7 +364,7 @@ namespace mongo {
return Status( ErrorCodes::InternalError, ss.str() );
}
- verify( collection->_details->getTotalIndexCount() == 0 );
+ verify( collection->_details->getTotalIndexCount( txn ) == 0 );
LOG(1) << "\t dropIndexes done" << endl;
Top::global.collectionDropped( fullns );
@@ -445,7 +445,7 @@ namespace mongo {
Collection* coll = getCollection( txn, fromNS );
if ( !coll )
return Status( ErrorCodes::NamespaceNotFound, "collection not found to rename" );
- IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator( true );
+ IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
_clearCollectionCache( desc->indexNamespace() );
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index bdb993ee3e3..9b9ceeb84c3 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -42,7 +42,7 @@ namespace mongo {
public:
virtual ~HeadManager() { }
- virtual const DiskLoc getHead() const = 0;
+ virtual const DiskLoc getHead(OperationContext* txn) const = 0;
virtual void setHead(OperationContext* txn, const DiskLoc newHead) = 0;
};
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 58f27c6a35b..48f640c0b89 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -88,24 +88,26 @@ namespace mongo {
Status IndexCatalog::init(OperationContext* txn) {
vector<string> indexNames;
- _collection->getCatalogEntry()->getAllIndexes( &indexNames );
+ _collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
for ( size_t i = 0; i < indexNames.size(); i++ ) {
const string& indexName = indexNames[i];
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( indexName ).getOwned();
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( txn,
+ indexName ).getOwned();
- if ( !_collection->getCatalogEntry()->isIndexReady( indexName ) ) {
+ if ( !_collection->getCatalogEntry()->isIndexReady( txn, indexName ) ) {
_unfinishedIndexes.push_back( spec );
continue;
}
BSONObj keyPattern = spec.getObjectField("key");
IndexDescriptor* descriptor = new IndexDescriptor( _collection,
- _getAccessMethodName(txn, keyPattern),
+ _getAccessMethodName(txn,
+ keyPattern),
spec );
IndexCatalogEntry* entry = _setupInMemoryStructures( txn, descriptor );
- fassert( 17340, entry->isReady() );
+ fassert( 17340, entry->isReady( txn ) );
}
if ( _unfinishedIndexes.size() ) {
@@ -128,9 +130,9 @@ namespace mongo {
descriptorCleanup.release(),
_collection->infoCache() ) );
- entry->init( _collection->_database->_dbEntry->getIndex( txn,
- _collection->getCatalogEntry(),
- entry.get() ) );
+ entry->init( txn, _collection->_database->_dbEntry->getIndex( txn,
+ _collection->getCatalogEntry(),
+ entry.get() ) );
IndexCatalogEntry* save = entry.get();
_entries.add( entry.release() );
@@ -302,7 +304,7 @@ namespace {
Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
txn->lockState()->assertWriteLocked( _collection->_database->name() );
- invariant(_collection->numRecords() == 0);
+ invariant(_collection->numRecords(txn) == 0);
_checkMagic();
Status status = checkUnfinished();
@@ -345,7 +347,7 @@ namespace {
indexBuildBlock.success();
// sanity check
- invariant(_collection->getCatalogEntry()->isIndexReady(descriptor->indexName()));
+ invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
return Status::OK();
}
@@ -449,7 +451,7 @@ namespace {
_catalog->_collection->infoCache()->addedIndex();
- IndexDescriptor* desc = _catalog->findIndexByName( _indexName, true );
+ IndexDescriptor* desc = _catalog->findIndexByName( _txn, _indexName, true );
fassert( 17330, desc );
IndexCatalogEntry* entry = _catalog->_entries.find( desc );
fassert( 17331, entry && entry == _entry );
@@ -548,7 +550,7 @@ namespace {
{
// Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByName( name, true );
+ const IndexDescriptor* desc = findIndexByName( txn, name, true );
if ( desc ) {
// index already exists with same name
@@ -575,7 +577,7 @@ namespace {
{
// Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByKeyPattern(key, true);
+ const IndexDescriptor* desc = findIndexByKeyPattern(txn, key, true);
if (desc) {
LOG(2) << "index already exists with diff name " << name
<< ' ' << key << endl;
@@ -592,7 +594,7 @@ namespace {
}
}
- if ( _collection->getCatalogEntry()->getTotalIndexCount() >=
+ if ( _collection->getCatalogEntry()->getTotalIndexCount( txn ) >=
_collection->getCatalogEntry()->getMaxAllowedIndexes() ) {
string s = str::stream() << "add index fails, too many indexes for "
<< _collection->ns().ns() << " key:" << key.toString();
@@ -606,7 +608,7 @@ namespace {
if ( pluginName == IndexNames::TEXT ) {
vector<IndexDescriptor*> textIndexes;
const bool includeUnfinishedIndexes = true;
- findIndexByType( IndexNames::TEXT, textIndexes, includeUnfinishedIndexes );
+ findIndexByType( txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes );
if ( textIndexes.size() > 0 ) {
return Status( ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
@@ -641,14 +643,14 @@ namespace {
// make sure nothing in progress
massert( 17348,
"cannot dropAllIndexes when index builds in progress",
- numIndexesTotal() == numIndexesReady() );
+ numIndexesTotal(txn) == numIndexesReady(txn) );
bool haveIdIndex = false;
vector<string> indexNamesToDrop;
{
int seen = 0;
- IndexIterator ii = getIndexIterator( true );
+ IndexIterator ii = getIndexIterator( txn, true );
while ( ii.more() ) {
seen++;
IndexDescriptor* desc = ii.next();
@@ -658,12 +660,12 @@ namespace {
}
indexNamesToDrop.push_back( desc->indexName() );
}
- invariant( seen == numIndexesTotal() );
+ invariant( seen == numIndexesTotal(txn) );
}
for ( size_t i = 0; i < indexNamesToDrop.size(); i++ ) {
string indexName = indexNamesToDrop[i];
- IndexDescriptor* desc = findIndexByName( indexName, true );
+ IndexDescriptor* desc = findIndexByName( txn, indexName, true );
invariant( desc );
LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
IndexCatalogEntry* entry = _entries.find( desc );
@@ -674,24 +676,24 @@ namespace {
// verify state is sane post cleaning
long long numIndexesInCollectionCatalogEntry =
- _collection->getCatalogEntry()->getTotalIndexCount();
+ _collection->getCatalogEntry()->getTotalIndexCount( txn );
if ( haveIdIndex ) {
- fassert( 17324, numIndexesTotal() == 1 );
- fassert( 17325, numIndexesReady() == 1 );
+ fassert( 17324, numIndexesTotal(txn) == 1 );
+ fassert( 17325, numIndexesReady(txn) == 1 );
fassert( 17326, numIndexesInCollectionCatalogEntry == 1 );
fassert( 17336, _entries.size() == 1 );
}
else {
- if ( numIndexesTotal() || numIndexesInCollectionCatalogEntry || _entries.size() ) {
+ if ( numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size() ) {
error() << "About to fassert - "
- << " numIndexesTotal(): " << numIndexesTotal()
+ << " numIndexesTotal(): " << numIndexesTotal(txn)
<< " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
<< " _entries.size(): " << _entries.size()
<< " indexNamesToDrop: " << indexNamesToDrop.size()
<< " haveIdIndex: " << haveIdIndex;
}
- fassert( 17327, numIndexesTotal() == 0 );
+ fassert( 17327, numIndexesTotal(txn) == 0 );
fassert( 17328, numIndexesInCollectionCatalogEntry == 0 );
fassert( 17337, _entries.size() == 0 );
}
@@ -708,7 +710,7 @@ namespace {
if ( !entry )
return Status( ErrorCodes::InternalError, "cannot find index to delete" );
- if ( !entry->isReady() )
+ if ( !entry->isReady( txn ) )
return Status( ErrorCodes::InternalError, "cannot delete not ready index" );
BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
@@ -808,30 +810,32 @@ namespace {
return toReturn;
}
- bool IndexCatalog::isMultikey( const IndexDescriptor* idx ) {
+ bool IndexCatalog::isMultikey( OperationContext* txn, const IndexDescriptor* idx ) {
IndexCatalogEntry* entry = _entries.find( idx );
invariant( entry );
- return entry->isMultikey();
+ return entry->isMultikey( txn );
}
// ---------------------------
- int IndexCatalog::numIndexesTotal() const {
- return _collection->getCatalogEntry()->getTotalIndexCount();
+ int IndexCatalog::numIndexesTotal( OperationContext* txn ) const {
+ return _collection->getCatalogEntry()->getTotalIndexCount( txn );
}
- int IndexCatalog::numIndexesReady() const {
- return _collection->getCatalogEntry()->getCompletedIndexCount();
+ int IndexCatalog::numIndexesReady( OperationContext* txn ) const {
+ return _collection->getCatalogEntry()->getCompletedIndexCount( txn );
}
- bool IndexCatalog::haveIdIndex() const {
- return findIdIndex() != NULL;
+ bool IndexCatalog::haveIdIndex( OperationContext* txn ) const {
+ return findIdIndex( txn ) != NULL;
}
- IndexCatalog::IndexIterator::IndexIterator( const IndexCatalog* cat,
+ IndexCatalog::IndexIterator::IndexIterator( OperationContext* txn,
+ const IndexCatalog* cat,
bool includeUnfinishedIndexes )
: _includeUnfinishedIndexes( includeUnfinishedIndexes ),
+ _txn( txn ),
_catalog( cat ),
_iterator( cat->_entries.begin() ),
_start( true ),
@@ -868,7 +872,7 @@ namespace {
++_iterator;
if ( _includeUnfinishedIndexes ||
- entry->isReady() ) {
+ entry->isReady(_txn) ) {
_next = entry;
return;
}
@@ -877,8 +881,8 @@ namespace {
}
- IndexDescriptor* IndexCatalog::findIdIndex() const {
- IndexIterator ii = getIndexIterator( false );
+ IndexDescriptor* IndexCatalog::findIdIndex( OperationContext* txn ) const {
+ IndexIterator ii = getIndexIterator( txn, false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
if ( desc->isIdIndex() )
@@ -887,9 +891,10 @@ namespace {
return NULL;
}
- IndexDescriptor* IndexCatalog::findIndexByName( const StringData& name,
+ IndexDescriptor* IndexCatalog::findIndexByName( OperationContext* txn,
+ const StringData& name,
bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( includeUnfinishedIndexes );
+ IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
if ( desc->indexName() == name )
@@ -898,9 +903,10 @@ namespace {
return NULL;
}
- IndexDescriptor* IndexCatalog::findIndexByKeyPattern( const BSONObj& key,
+ IndexDescriptor* IndexCatalog::findIndexByKeyPattern( OperationContext* txn,
+ const BSONObj& key,
bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( includeUnfinishedIndexes );
+ IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
if ( desc->keyPattern() == key )
@@ -909,18 +915,19 @@ namespace {
return NULL;
}
- IndexDescriptor* IndexCatalog::findIndexByPrefix( const BSONObj &keyPattern,
+ IndexDescriptor* IndexCatalog::findIndexByPrefix( OperationContext* txn,
+ const BSONObj &keyPattern,
bool requireSingleKey ) const {
IndexDescriptor* best = NULL;
- IndexIterator ii = getIndexIterator( false );
+ IndexIterator ii = getIndexIterator( txn, false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
if ( !keyPattern.isPrefixOf( desc->keyPattern() ) )
continue;
- if( !desc->isMultikey() )
+ if( !desc->isMultikey( txn ) )
return desc;
if ( !requireSingleKey )
@@ -930,9 +937,10 @@ namespace {
return best;
}
- void IndexCatalog::findIndexByType( const string& type , vector<IndexDescriptor*>& matches,
+ void IndexCatalog::findIndexByType( OperationContext* txn,
+ const string& type, vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( includeUnfinishedIndexes );
+ IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
if ( IndexNames::findPluginName( desc->keyPattern() ) == type ) {
@@ -1051,13 +1059,13 @@ namespace {
IndexCatalogEntry* entry = *i;
// If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady() ? !noWarn : false;
+ bool logIfError = entry->isReady(txn) ? !noWarn : false;
_unindexRecord(txn, entry, obj, loc, logIfError);
}
}
Status IndexCatalog::checkNoIndexConflicts( OperationContext* txn, const BSONObj &obj ) {
- IndexIterator ii = getIndexIterator( true );
+ IndexIterator ii = getIndexIterator( txn, true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 2f4ed0b677e..a3646d9ead2 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -62,44 +62,50 @@ namespace mongo {
// ---- accessors -----
- int numIndexesTotal() const;
- int numIndexesReady() const;
- int numIndexesInProgress() const { return numIndexesTotal() - numIndexesReady(); }
+ int numIndexesTotal( OperationContext* txn ) const;
+ int numIndexesReady( OperationContext* txn ) const;
+ int numIndexesInProgress( OperationContext* txn ) const {
+ return numIndexesTotal(txn) - numIndexesReady(txn);
+ }
/**
* this is in "alive" until the Collection goes away
* in which case everything from this tree has to go away
*/
- bool haveIdIndex() const;
+ bool haveIdIndex( OperationContext* txn ) const;
/**
* Returns the spec for the id index to create by default for this collection.
*/
BSONObj getDefaultIdIndexSpec() const;
- IndexDescriptor* findIdIndex() const;
+ IndexDescriptor* findIdIndex( OperationContext* txn ) const;
/**
* @return null if cannot find
*/
- IndexDescriptor* findIndexByName( const StringData& name,
+ IndexDescriptor* findIndexByName( OperationContext* txn,
+ const StringData& name,
bool includeUnfinishedIndexes = false ) const;
/**
* @return null if cannot find
*/
- IndexDescriptor* findIndexByKeyPattern( const BSONObj& key,
+ IndexDescriptor* findIndexByKeyPattern( OperationContext* txn,
+ const BSONObj& key,
bool includeUnfinishedIndexes = false ) const;
/* Returns the index entry for the first index whose prefix contains
* 'keyPattern'. If 'requireSingleKey' is true, skip indices that contain
* array attributes. Otherwise, returns NULL.
*/
- IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern,
+ IndexDescriptor* findIndexByPrefix( OperationContext* txn,
+ const BSONObj &keyPattern,
bool requireSingleKey ) const;
- void findIndexByType( const std::string& type,
+ void findIndexByType( OperationContext* txn,
+ const std::string& type,
std::vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes = false ) const;
@@ -123,11 +129,15 @@ namespace mongo {
// returns the access method for the last return IndexDescriptor
IndexAccessMethod* accessMethod( IndexDescriptor* desc );
private:
- IndexIterator( const IndexCatalog* cat, bool includeUnfinishedIndexes );
+ IndexIterator( OperationContext* txn,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes );
void _advance();
bool _includeUnfinishedIndexes;
+
+ OperationContext* _txn;
const IndexCatalog* _catalog;
IndexCatalogEntryContainer::const_iterator _iterator;
@@ -139,8 +149,9 @@ namespace mongo {
friend class IndexCatalog;
};
- IndexIterator getIndexIterator( bool includeUnfinishedIndexes ) const {
- return IndexIterator( this, includeUnfinishedIndexes );
+ IndexIterator getIndexIterator( OperationContext* txn,
+ bool includeUnfinishedIndexes ) const {
+ return IndexIterator( txn, this, includeUnfinishedIndexes );
};
// ---- index set modifiers ------
@@ -182,7 +193,7 @@ namespace mongo {
// ---- modify single index
- bool isMultikey( const IndexDescriptor* idex );
+ bool isMultikey( OperationContext* txn, const IndexDescriptor* idex );
// --- these probably become private?
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 511bddf7cb0..fc0c171975c 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -49,8 +49,8 @@ namespace mongo {
HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) { }
virtual ~HeadManagerImpl() { }
- const DiskLoc getHead() const {
- return _catalogEntry->head();
+ const DiskLoc getHead(OperationContext* txn) const {
+ return _catalogEntry->head(txn);
}
void setHead(OperationContext* txn, const DiskLoc newHead) {
@@ -85,27 +85,28 @@ namespace mongo {
delete _descriptor;
}
- void IndexCatalogEntry::init( IndexAccessMethod* accessMethod ) {
+ void IndexCatalogEntry::init( OperationContext* txn,
+ IndexAccessMethod* accessMethod ) {
verify( _accessMethod == NULL );
_accessMethod = accessMethod;
- _isReady = _catalogIsReady();
- _head = _catalogHead();
- _isMultikey = _catalogIsMultikey();
+ _isReady = _catalogIsReady( txn );
+ _head = _catalogHead( txn );
+ _isMultikey = _catalogIsMultikey( txn );
}
- const DiskLoc& IndexCatalogEntry::head() const {
- DEV verify( _head == _catalogHead() );
+ const DiskLoc& IndexCatalogEntry::head( OperationContext* txn ) const {
+ DEV invariant( _head == _catalogHead( txn ) );
return _head;
}
- bool IndexCatalogEntry::isReady() const {
- DEV verify( _isReady == _catalogIsReady() );
+ bool IndexCatalogEntry::isReady( OperationContext* txn ) const {
+ DEV invariant( _isReady == _catalogIsReady( txn ) );
return _isReady;
}
- bool IndexCatalogEntry::isMultikey() const {
- DEV verify( _isMultikey == _catalogIsMultikey() );
+ bool IndexCatalogEntry::isMultikey( OperationContext* txn ) const {
+ DEV invariant( _isMultikey == _catalogIsMultikey( txn ) );
return _isMultikey;
}
@@ -113,7 +114,6 @@ namespace mongo {
void IndexCatalogEntry::setIsReady( bool newIsReady ) {
_isReady = newIsReady;
- verify( isReady() == newIsReady );
}
void IndexCatalogEntry::setHead( OperationContext* txn, DiskLoc newHead ) {
@@ -124,7 +124,7 @@ namespace mongo {
}
void IndexCatalogEntry::setMultikey( OperationContext* txn ) {
- if ( isMultikey() )
+ if ( isMultikey( txn ) )
return;
if ( _collection->setIndexIsMultikey( txn,
_descriptor->indexName(),
@@ -140,16 +140,16 @@ namespace mongo {
// ----
- bool IndexCatalogEntry::_catalogIsReady() const {
- return _collection->isIndexReady( _descriptor->indexName() );
+ bool IndexCatalogEntry::_catalogIsReady( OperationContext* txn ) const {
+ return _collection->isIndexReady( txn, _descriptor->indexName() );
}
- DiskLoc IndexCatalogEntry::_catalogHead() const {
- return _collection->getIndexHead( _descriptor->indexName() );
+ DiskLoc IndexCatalogEntry::_catalogHead( OperationContext* txn ) const {
+ return _collection->getIndexHead( txn, _descriptor->indexName() );
}
- bool IndexCatalogEntry::_catalogIsMultikey() const {
- return _collection->isIndexMultikey( _descriptor->indexName() );
+ bool IndexCatalogEntry::_catalogIsMultikey( OperationContext* txn ) const {
+ return _collection->isIndexMultikey( txn, _descriptor->indexName() );
}
// ------------------
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 77f19c8d99b..79f33f06f91 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -57,7 +57,8 @@ namespace mongo {
const string& ns() const { return _ns; }
- void init( IndexAccessMethod* accessMethod );
+ void init( OperationContext* txn,
+ IndexAccessMethod* accessMethod );
IndexDescriptor* descriptor() { return _descriptor; }
const IndexDescriptor* descriptor() const { return _descriptor; }
@@ -69,7 +70,7 @@ namespace mongo {
/// ---------------------
- const DiskLoc& head() const;
+ const DiskLoc& head( OperationContext* txn ) const;
void setHead( OperationContext* txn, DiskLoc newHead );
@@ -79,18 +80,18 @@ namespace mongo {
// --
- bool isMultikey() const;
+ bool isMultikey( OperationContext* txn ) const;
void setMultikey( OperationContext* txn );
// if this ready is ready for queries
- bool isReady() const;
+ bool isReady( OperationContext* txn ) const;
private:
- bool _catalogIsReady() const;
- DiskLoc _catalogHead() const;
- bool _catalogIsMultikey() const;
+ bool _catalogIsReady( OperationContext* txn ) const;
+ DiskLoc _catalogHead( OperationContext* txn ) const;
+ bool _catalogIsMultikey( OperationContext* txn ) const;
// -----
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 86bed4182fe..07031334378 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -195,7 +195,7 @@ namespace mongo {
const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
ProgressMeter* progress = _txn->setMessage(curopMessage,
curopMessage,
- _collection->numRecords());
+ _collection->numRecords(_txn));
Timer t;
@@ -233,7 +233,7 @@ namespace mongo {
if (_allowInterruption)
_txn->checkForInterrupt();
- progress->setTotalWhileRunning( _collection->numRecords() );
+ progress->setTotalWhileRunning( _collection->numRecords(_txn) );
}
progress->finished();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index abe3883284e..3453f4b2976 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -530,7 +530,7 @@ namespace mongo {
db);
Collection* c = db->getCollection( txn, to_name );
- if ( c && !c->getIndexCatalog()->haveIdIndex() ) {
+ if ( c && !c->getIndexCatalog()->haveIdIndex( txn ) ) {
// We need to drop objects with duplicate _ids because we didn't do a true
// snapshot and this is before applying oplog operations that occur during the
// initial sync.
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 8cf2b0d8ad2..2a4f60cee31 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -86,7 +86,7 @@ namespace mongo {
std::max( static_cast<long long>(size * 2),
static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
- long long excessSize = fromCollection->dataSize() - allocatedSpaceGuess;
+ long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
scoped_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn,
fromNs,
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index ea74c4e7c2b..89c0de4940b 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -89,7 +89,7 @@ namespace mongo {
// count of all objects
if (query.isEmpty()) {
- return applySkipLimit(collection->numRecords(), cmd);
+ return applySkipLimit(collection->numRecords(txn), cmd);
}
long long skip = cmd["skip"].numberLong();
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 4b5c1a869ba..d7c3e05ccdc 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -150,7 +150,7 @@ namespace mongo {
wunit.commit();
}
- result.append( "numIndexesBefore", collection->getIndexCatalog()->numIndexesTotal() );
+ result.append( "numIndexesBefore", collection->getIndexCatalog()->numIndexesTotal(txn) );
MultiIndexBlock indexer(txn, collection);
indexer.allowBackgroundBuilding();
@@ -198,7 +198,7 @@ namespace mongo {
wunit.commit();
}
- result.append( "numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal() );
+ result.append( "numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn) );
return true;
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2125dd85e8c..7518365a53a 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -81,7 +81,7 @@ namespace mongo {
if ( !collection )
return "";
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex();
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );
auto_ptr<PlanExecutor> exec;
if ( desc ) {
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 51484b2f39a..fd3e8e6b57e 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -138,7 +138,7 @@ namespace mongo {
stopIndexBuilds(txn, db, jsobj);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() );
+ anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn) );
BSONElement f = jsobj.getField("index");
@@ -156,7 +156,8 @@ namespace mongo {
return true;
}
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( indexToDelete );
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( txn,
+ indexToDelete );
if ( desc == NULL ) {
errmsg = str::stream() << "index not found with name [" << indexToDelete << "]";
return false;
@@ -177,7 +178,8 @@ namespace mongo {
}
if ( f.type() == Object ) {
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( f.embeddedObject() );
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern( txn, f.embeddedObject() );
if ( desc == NULL ) {
errmsg = "can't find index with key:";
errmsg += f.embeddedObject().toString();
@@ -254,10 +256,10 @@ namespace mongo {
vector<BSONObj> all;
{
vector<string> indexNames;
- collection->getCatalogEntry()->getAllIndexes( &indexNames );
+ collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
for ( size_t i = 0; i < indexNames.size(); i++ ) {
const string& name = indexNames[i];
- BSONObj spec = collection->getCatalogEntry()->getIndexSpec( name );
+ BSONObj spec = collection->getCatalogEntry()->getIndexSpec( txn, name );
all.push_back(spec.removeField("v").getOwned());
const BSONObj key = spec.getObjectField("key");
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 187fdf1caad..7b1be4adc82 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -97,7 +97,7 @@ namespace mongo {
// We seek to populate this.
string nearFieldName;
bool using2DIndex = false;
- if (!getFieldName(collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
+ if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
return false;
}
@@ -261,12 +261,12 @@ namespace mongo {
}
private:
- bool getFieldName(Collection* collection, IndexCatalog* indexCatalog, string* fieldOut,
- string* errOut, bool *isFrom2D) {
+ bool getFieldName(OperationContext* txn, Collection* collection, IndexCatalog* indexCatalog,
+ string* fieldOut, string* errOut, bool *isFrom2D) {
vector<IndexDescriptor*> idxs;
// First, try 2d.
- collection->getIndexCatalog()->findIndexByType(IndexNames::GEO_2D, idxs);
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
if (idxs.size() > 1) {
*errOut = "more than one 2d index, not sure which to run geoNear on";
return false;
@@ -287,7 +287,7 @@ namespace mongo {
// Next, 2dsphere.
idxs.clear();
- collection->getIndexCatalog()->findIndexByType(IndexNames::GEO_2DSPHERE, idxs);
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
if (0 == idxs.size()) {
*errOut = "no geo indices for geoNear";
return false;
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 8f88b9260db..7849417e370 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -84,11 +84,11 @@ namespace mongo {
}
vector<string> indexNames;
- cce->getAllIndexes( &indexNames );
+ cce->getAllIndexes( txn, &indexNames );
BSONArrayBuilder arr;
for ( size_t i = 0; i < indexNames.size(); i++ ) {
- arr.append( cce->getIndexSpec( indexNames[i] ) );
+ arr.append( cce->getIndexSpec( txn, indexNames[i] ) );
}
result.append( "indexes", arr.arr() );
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 1721868f018..2a5570d8d72 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -376,7 +376,7 @@ namespace mongo {
finalCtx.ctx().db()->getCollection(_txn, _config.outputOptions.finalNamespace);
if ( finalColl ) {
IndexCatalog::IndexIterator ii =
- finalColl->getIndexCatalog()->getIndexIterator( true );
+ finalColl->getIndexCatalog()->getIndexIterator( _txn, true );
// Iterate over finalColl's indexes.
while ( ii.more() ) {
IndexDescriptor* currIndex = ii.next();
@@ -961,7 +961,7 @@ namespace mongo {
bool foundIndex = false;
IndexCatalog::IndexIterator ii =
- incColl->getIndexCatalog()->getIndexIterator( true );
+ incColl->getIndexCatalog()->getIndexIterator( _txn, true );
// Iterate over incColl's indexes.
while ( ii.more() ) {
IndexDescriptor* currIndex = ii.next();
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index b4d8fa440e8..8e4cac7f76e 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -103,7 +103,7 @@ namespace mongo {
OwnedPointerVector<PlanExecutor> execs;
for ( size_t i = 0; i < numCursors; i++ ) {
WorkingSet* ws = new WorkingSet();
- MultiIteratorStage* mis = new MultiIteratorStage(ws, collection);
+ MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
// Takes ownership of 'ws' and 'mis'.
execs.push_back(new PlanExecutor(ws, mis, collection));
}
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index 872f115377c..0eb4555ad0e 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -152,7 +152,7 @@ namespace mongo {
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator( true );
+ sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
int longestIndexNameLength = 0;
while ( sourceIndIt.more() ) {
int thisLength = sourceIndIt.next()->indexName().length();
@@ -257,7 +257,7 @@ namespace mongo {
{
std::vector<BSONObj> indexesToCopy;
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator( true );
+ sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
while (sourceIndIt.more()) {
const BSONObj currIndex = sourceIndIt.next()->infoObj();
@@ -275,7 +275,7 @@ namespace mongo {
while (!sourceIt->isEOF()) {
txn->checkForInterrupt(false);
- const BSONObj obj = sourceColl->docFor(sourceIt->getNext());
+ const BSONObj obj = sourceColl->docFor(txn, sourceIt->getNext());
WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index c2d2888f942..e763f1e23af 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -313,7 +313,7 @@ namespace mongo {
if ( !coll )
continue;
- if ( coll->getIndexCatalog()->findIdIndex() )
+ if ( coll->getIndexCatalog()->findIdIndex( txn ) )
continue;
log() << "WARNING: the collection '" << *i
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index f38db4a747d..309b7fab336 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -461,7 +461,7 @@ namespace mongo {
return false;
}
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal();
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal( txn );
stopIndexBuilds(txn, db, cmdObj);
@@ -785,7 +785,7 @@ namespace mongo {
Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
- if ( !collection || collection->numRecords() == 0 ) {
+ if ( !collection || collection->numRecords(txn) == 0 ) {
result.appendNumber( "size" , 0 );
result.appendNumber( "numObjects" , 0 );
result.append( "millis" , timer.millis() );
@@ -797,9 +797,9 @@ namespace mongo {
auto_ptr<PlanExecutor> exec;
if ( min.isEmpty() && max.isEmpty() ) {
if ( estimate ) {
- result.appendNumber( "size" , static_cast<long long>(collection->dataSize()) );
+ result.appendNumber( "size" , static_cast<long long>(collection->dataSize(txn)) );
result.appendNumber( "numObjects",
- static_cast<long long>( collection->numRecords() ) );
+ static_cast<long long>( collection->numRecords(txn) ) );
result.append( "millis" , timer.millis() );
return 1;
}
@@ -817,7 +817,7 @@ namespace mongo {
}
IndexDescriptor *idx =
- collection->getIndexCatalog()->findIndexByPrefix( keyPattern, true ); /* require single key */
+ collection->getIndexCatalog()->findIndexByPrefix( txn, keyPattern, true ); /* require single key */
if ( idx == NULL ) {
errmsg = "couldn't find valid index containing key pattern";
@@ -831,7 +831,7 @@ namespace mongo {
exec.reset(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
}
- long long avgObjSize = collection->dataSize() / collection->numRecords();
+ long long avgObjSize = collection->dataSize(txn) / collection->numRecords(txn);
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
@@ -845,7 +845,7 @@ namespace mongo {
if ( estimate )
size += avgObjSize;
else
- size += collection->getRecordStore()->dataFor(loc).size();
+ size += collection->getRecordStore()->dataFor(txn, loc).size();
numObjects++;
@@ -923,18 +923,18 @@ namespace mongo {
bool verbose = jsobj["verbose"].trueValue();
- long long size = collection->dataSize() / scale;
- long long numRecords = collection->numRecords();
+ long long size = collection->dataSize(txn) / scale;
+ long long numRecords = collection->numRecords(txn);
result.appendNumber( "count" , numRecords );
result.appendNumber( "size" , size );
if( numRecords )
- result.append( "avgObjSize" , collection->averageObjectSize() );
+ result.append( "avgObjSize" , collection->averageObjectSize(txn) );
result.appendNumber( "storageSize",
static_cast<long long>(collection->getRecordStore()->storageSize( txn, &result,
verbose ? 1 : 0 ) ) /
scale );
- result.append( "nindexes" , collection->getIndexCatalog()->numIndexesReady() );
+ result.append( "nindexes" , collection->getIndexCatalog()->numIndexesReady( txn ) );
collection->getRecordStore()->appendCustomStats( txn, &result, scale );
@@ -1017,7 +1017,7 @@ namespace mongo {
continue;
}
- IndexDescriptor* idx = coll->getIndexCatalog()->findIndexByKeyPattern( keyPattern );
+ IndexDescriptor* idx = coll->getIndexCatalog()->findIndexByKeyPattern( txn, keyPattern );
if ( idx == NULL ) {
errmsg = str::stream() << "cannot find index " << keyPattern
<< " for ns " << ns;
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index baccd4ab7df..959c4ca44ad 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -103,7 +103,7 @@ namespace mongo {
DiskLoc loc = findOne( txn, collection, query, requireIndex );
if ( loc.isNull() )
return false;
- result = collection->docFor(loc);
+ result = collection->docFor(txn, loc);
return true;
}
@@ -156,7 +156,7 @@ namespace mongo {
*nsFound = true;
IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex();
+ const IndexDescriptor* desc = catalog->findIdIndex( txn );
if ( !desc )
return false;
@@ -171,7 +171,7 @@ namespace mongo {
DiskLoc loc = accessMethod->findSingle( txn, query["_id"].wrap() );
if ( loc.isNull() )
return false;
- result = collection->docFor( loc );
+ result = collection->docFor( txn, loc );
return true;
}
@@ -180,7 +180,7 @@ namespace mongo {
const BSONObj& idquery) {
verify(collection);
IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex();
+ const IndexDescriptor* desc = catalog->findIdIndex( txn );
uassert(13430, "no _id index", desc);
// See SERVER-12397. This may not always be true.
BtreeBasedAccessMethod* accessMethod =
@@ -304,7 +304,8 @@ namespace mongo {
// Therefore, any multi-key index prefixed by shard key cannot be multikey over
// the shard key fields.
const IndexDescriptor* idx =
- collection->getIndexCatalog()->findIndexByPrefix(shardKeyPattern,
+ collection->getIndexCatalog()->findIndexByPrefix(txn,
+ shardKeyPattern,
false /* allow multi key */);
if ( idx == NULL )
@@ -369,7 +370,8 @@ namespace mongo {
break;
IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern( indexKeyPattern.toBSON() );
+ collection->getIndexCatalog()->findIndexByKeyPattern( txn,
+ indexKeyPattern.toBSON() );
auto_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection, desc,
min, max,
@@ -498,7 +500,7 @@ namespace mongo {
// Require single key
IndexDescriptor *idx =
- collection->getIndexCatalog()->findIndexByPrefix( range.keyPattern, true );
+ collection->getIndexCatalog()->findIndexByPrefix( txn, range.keyPattern, true );
if ( idx == NULL ) {
return Status( ErrorCodes::IndexNotFound, range.keyPattern.toString() );
@@ -510,10 +512,10 @@ namespace mongo {
// sizes will vary
long long avgDocsWhenFull;
long long avgDocSizeBytes;
- const long long totalDocsInNS = collection->numRecords();
+ const long long totalDocsInNS = collection->numRecords( txn );
if ( totalDocsInNS > 0 ) {
// TODO: Figure out what's up here
- avgDocSizeBytes = collection->dataSize() / totalDocsInNS;
+ avgDocSizeBytes = collection->dataSize( txn ) / totalDocsInNS;
avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes;
avgDocsWhenFull = std::min( kMaxDocsPerChunk + 1,
130 * avgDocsWhenFull / 100 /* slack */);
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 26a4a72d7ea..fde17410232 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -51,10 +51,12 @@ namespace mongo {
// static
const char* AndHashStage::kStageType = "AND_HASH";
- AndHashStage::AndHashStage(WorkingSet* ws,
+ AndHashStage::AndHashStage(OperationContext* txn,
+ WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_ws(ws),
_filter(filter),
_hashingChildren(true),
@@ -63,11 +65,13 @@ namespace mongo {
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
- AndHashStage::AndHashStage(WorkingSet* ws,
+ AndHashStage::AndHashStage(OperationContext* txn,
+ WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection,
size_t maxMemUsage)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_ws(ws),
_filter(filter),
_hashingChildren(true),
@@ -463,7 +467,7 @@ namespace mongo {
if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, member, _collection);
_ws->flagForReview(_lookAheadResults[i]);
_lookAheadResults[i] = WorkingSet::INVALID_ID;
}
@@ -493,7 +497,7 @@ namespace mongo {
_memUsage -= member->getMemUsage();
// The loc is about to be invalidated. Fetch it and clear the loc.
- WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, member, _collection);
// Add the WSID to the to-be-reviewed list in the WS.
_ws->flagForReview(id);
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 16197bb5c93..6f9e62f8931 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -52,12 +52,16 @@ namespace mongo {
*/
class AndHashStage : public PlanStage {
public:
- AndHashStage(WorkingSet* ws, const MatchExpression* filter, const Collection* collection);
+ AndHashStage(OperationContext* txn,
+ WorkingSet* ws,
+ const MatchExpression* filter,
+ const Collection* collection);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
- AndHashStage(WorkingSet* ws,
+ AndHashStage(OperationContext* txn,
+ WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection,
size_t maxMemUsage);
@@ -99,6 +103,7 @@ namespace mongo {
StageState workChild(size_t childNo, WorkingSetID* out);
// Not owned by us.
+ OperationContext* _txn;
const Collection* _collection;
// Not owned by us.
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 8d8ff94d27e..e81401a7bd3 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -38,10 +38,12 @@ namespace mongo {
// static
const char* AndSortedStage::kStageType = "AND_SORTED";
- AndSortedStage::AndSortedStage(WorkingSet* ws,
+ AndSortedStage::AndSortedStage(OperationContext* txn,
+ WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_ws(ws),
_filter(filter),
_targetNode(numeric_limits<size_t>::max()),
@@ -290,7 +292,7 @@ namespace mongo {
++_specificStats.flagged;
// The DiskLoc could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateLoc(_ws->get(_targetId), _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, _ws->get(_targetId), _collection);
_ws->flagForReview(_targetId);
_targetId = WorkingSet::INVALID_ID;
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index fe37d5be76e..870cbd1b1ab 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -53,7 +53,8 @@ namespace mongo {
*/
class AndSortedStage : public PlanStage {
public:
- AndSortedStage(WorkingSet* ws, const MatchExpression* filter, const Collection* collection);
+ AndSortedStage(OperationContext* txn,
+ WorkingSet* ws, const MatchExpression* filter, const Collection* collection);
virtual ~AndSortedStage();
void addChild(PlanStage* child);
@@ -86,6 +87,7 @@ namespace mongo {
PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
// Not owned by us.
+ OperationContext* _txn;
const Collection* _collection;
// Not owned by us.
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ab03475676e..5b8219db320 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -102,7 +102,7 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = nextLoc;
- member->obj = _params.collection->docFor(member->loc);
+ member->obj = _params.collection->docFor(_txn, member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
++_specificStats.docsTested;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index b43f8227ad1..216e058c205 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -44,10 +44,10 @@ namespace mongo {
_btreeCursor(NULL),
_params(params),
_hitEnd(false),
- _shouldDedup(params.descriptor->isMultikey()),
+ _shouldDedup(params.descriptor->isMultikey(txn)),
_commonStats(kStageType) {
_specificStats.keyPattern = _params.descriptor->keyPattern();
- _specificStats.isMultiKey = _params.descriptor->isMultikey();
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(txn);
}
void Count::initIndexCursor() {
@@ -192,7 +192,7 @@ namespace mongo {
_endCursor->seek(_params.endKey, _params.endKeyInclusive);
// This can change during yielding.
- _shouldDedup = _descriptor->isMultikey();
+ _shouldDedup = _descriptor->isMultikey(_txn);
checkEnd();
}
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 2290a77835f..5e42d09e21d 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -39,11 +39,13 @@ namespace mongo {
// static
const char* FetchStage::kStageType = "FETCH";
- FetchStage::FetchStage(WorkingSet* ws,
+ FetchStage::FetchStage(OperationContext* txn,
+ WorkingSet* ws,
PlanStage* child,
const MatchExpression* filter,
const Collection* collection)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_ws(ws),
_child(child),
_filter(filter),
@@ -82,7 +84,7 @@ namespace mongo {
// Don't need index data anymore as we have an obj.
member->keyData.clear();
- member->obj = _collection->docFor(member->loc);
+ member->obj = _collection->docFor(_txn, member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
}
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 6c5b4a3be23..5cce36d3225 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -45,10 +45,11 @@ namespace mongo {
*/
class FetchStage : public PlanStage {
public:
- FetchStage(WorkingSet* ws,
- PlanStage* child,
- const MatchExpression* filter,
- const Collection* collection);
+ FetchStage(OperationContext* txn,
+ WorkingSet* ws,
+ PlanStage* child,
+ const MatchExpression* filter,
+ const Collection* collection);
virtual ~FetchStage();
@@ -80,6 +81,8 @@ namespace mongo {
StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID,
WorkingSetID* out);
+ OperationContext* _txn;
+
// Collection which is used by this stage. Used to resolve record ids retrieved by child
// stages. The lifetime of the collection must supersede that of the stage.
const Collection* _collection;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index c9ac11ced74..f5217d068b6 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -437,11 +437,12 @@ namespace mongo {
class FetchStageWithMatch : public FetchStage {
public:
- FetchStageWithMatch(WorkingSet* ws,
+ FetchStageWithMatch(OperationContext* txn,
+ WorkingSet* ws,
PlanStage* child,
MatchExpression* filter,
const Collection* collection)
- : FetchStage(ws, child, filter, collection), _matcher(filter) {
+ : FetchStage(txn, ws, child, filter, collection), _matcher(filter) {
}
virtual ~FetchStageWithMatch() {
@@ -647,7 +648,8 @@ namespace mongo {
}
// FetchStage owns index scan
- FetchStage* fetcher(new FetchStageWithMatch(workingSet,
+ FetchStage* fetcher(new FetchStageWithMatch(txn,
+ workingSet,
scan,
docMatcher,
collection));
@@ -881,7 +883,7 @@ namespace mongo {
IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
// FetchStage owns index scan
- FetchStage* fetcher(new FetchStage(workingSet, scan, _nearParams.filter, collection));
+ FetchStage* fetcher(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(fetcher,
true,
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 83fe0e8c5ea..549bb8ce7f6 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -88,7 +88,7 @@ namespace mongo {
const IndexCatalog* catalog = _collection->getIndexCatalog();
// Find the index we use.
- IndexDescriptor* idDesc = catalog->findIdIndex();
+ IndexDescriptor* idDesc = catalog->findIdIndex(_txn);
if (NULL == idDesc) {
_done = true;
return PlanStage::IS_EOF;
@@ -114,7 +114,7 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = loc;
- member->obj = _collection->docFor(loc);
+ member->obj = _collection->docFor(_txn, loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
if (_addKeyMetadata) {
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index ff4f22ec954..e5a8cfe2019 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -77,13 +77,13 @@ namespace mongo {
_shouldDedup = false;
}
else {
- _shouldDedup = _params.descriptor->isMultikey();
+ _shouldDedup = _params.descriptor->isMultikey(_txn);
}
// We can't always access the descriptor in the call to getStats() so we pull
// the status-only information we need out here.
_specificStats.indexName = _params.descriptor->infoObj()["name"].String();
- _specificStats.isMultiKey = _params.descriptor->isMultikey();
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(_txn);
// Set up the index cursor.
CursorOptions cursorOptions;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 372bb912d47..4b11e711843 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -37,10 +37,12 @@ namespace mongo {
// static
const char* MergeSortStage::kStageType = "SORT_MERGE";
- MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
+ MergeSortStage::MergeSortStage(OperationContext* txn,
+ const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_ws(ws),
_pattern(params.pattern),
_dedup(params.dedup),
@@ -207,7 +209,7 @@ namespace mongo {
WorkingSetMember* member = _ws->get(valueIt->id);
if (member->hasLoc() && (dl == member->loc)) {
// Force a fetch and flag. We could possibly merge this result back in later.
- WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, member, _collection);
_ws->flagForReview(valueIt->id);
++_specificStats.forcedFetches;
}
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 042778bc8fa..55d7a08660b 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -55,7 +55,8 @@ namespace mongo {
*/
class MergeSortStage : public PlanStage {
public:
- MergeSortStage(const MergeSortStageParams& params,
+ MergeSortStage(OperationContext* txn,
+ const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection);
virtual ~MergeSortStage();
@@ -83,6 +84,7 @@ namespace mongo {
private:
// Not owned by us.
+ OperationContext* _txn;
const Collection* _collection;
// Not owned by us.
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 3c806ffebc5..d9da77613b3 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -47,7 +47,7 @@ namespace mongo {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->loc = next;
- member->obj = _collection->docFor(next);
+ member->obj = _collection->docFor(_txn, next);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index eb889b9e31f..3ad512a1682 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -44,8 +44,9 @@ namespace mongo {
*/
class MultiIteratorStage : public PlanStage {
public:
- MultiIteratorStage(WorkingSet* ws, Collection* collection)
- : _collection(collection),
+ MultiIteratorStage(OperationContext* txn, WorkingSet* ws, Collection* collection)
+ : _txn(txn),
+ _collection(collection),
_ws(ws) { }
~MultiIteratorStage() { }
@@ -85,6 +86,7 @@ namespace mongo {
*/
DiskLoc _advance();
+ OperationContext* _txn;
Collection* _collection;
OwnedPointerVector<RecordIterator> _iterators;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index d2a27f2511c..2dd765d9553 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -52,8 +52,11 @@ namespace mongo {
// static
const char* MultiPlanStage::kStageType = "MULTI_PLAN";
- MultiPlanStage::MultiPlanStage(const Collection* collection, CanonicalQuery* cq)
- : _collection(collection),
+ MultiPlanStage::MultiPlanStage(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* cq)
+ : _txn(txn),
+ _collection(collection),
_query(cq),
_bestPlanIdx(kNoSuchPlan),
_backupPlanIdx(kNoSuchPlan),
@@ -150,7 +153,7 @@ namespace mongo {
double fraction = internalQueryPlanEvaluationCollFraction;
numWorks = std::max(size_t(internalQueryPlanEvaluationWorks),
- size_t(fraction * _collection->numRecords()));
+ size_t(fraction * _collection->numRecords(_txn)));
}
// We treat ntoreturn as though it is a limit during plan ranking.
@@ -414,7 +417,8 @@ namespace mongo {
namespace {
- void invalidateHelper(WorkingSet* ws, // may flag for review
+ void invalidateHelper(OperationContext* txn,
+ WorkingSet* ws, // may flag for review
const DiskLoc& dl,
list<WorkingSetID>* idsToInvalidate,
const Collection* collection) {
@@ -424,7 +428,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member, collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, collection);
ws->flagForReview(*it);
idsToInvalidate->erase(it);
it = next;
@@ -442,17 +446,17 @@ namespace mongo {
if (bestPlanChosen()) {
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
bestPlan.root->invalidate(dl, type);
- invalidateHelper(bestPlan.ws, dl, &bestPlan.results, _collection);
+ invalidateHelper(_txn, bestPlan.ws, dl, &bestPlan.results, _collection);
if (hasBackupPlan()) {
CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
backupPlan.root->invalidate(dl, type);
- invalidateHelper(backupPlan.ws, dl, &backupPlan.results, _collection);
+ invalidateHelper(_txn, backupPlan.ws, dl, &backupPlan.results, _collection);
}
}
else {
for (size_t ix = 0; ix < _candidates.size(); ++ix) {
_candidates[ix].root->invalidate(dl, type);
- invalidateHelper(_candidates[ix].ws, dl, &_candidates[ix].results, _collection);
+ invalidateHelper(_txn, _candidates[ix].ws, dl, &_candidates[ix].results, _collection);
}
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 3cf9bbda0f7..3304227d66a 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -50,7 +50,7 @@ namespace mongo {
class MultiPlanStage : public PlanStage {
public:
/** Takes no ownership */
- MultiPlanStage(const Collection* collection, CanonicalQuery* cq);
+ MultiPlanStage(OperationContext* txn, const Collection* collection, CanonicalQuery* cq);
virtual ~MultiPlanStage();
@@ -148,6 +148,7 @@ namespace mongo {
static const int kNoSuchPlan = -1;
// not owned here
+ OperationContext* _txn;
const Collection* _collection;
// The query that we're trying to figure out the best solution to.
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 462b02abc40..21aee43a7f7 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -294,7 +294,7 @@ namespace mongo {
WorkingSetMember* member = _workingSet->get(seenIt->second);
verify(member->hasLoc());
- WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, member, _collection);
verify(!member->hasLoc());
// Don't keep it around in the seen map since there's no valid DiskLoc anymore
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 7727946a5df..e6b2c445b26 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -86,12 +86,12 @@ namespace mongo {
const DiskLoc loc = _subIterators.back()->getNext();
_subIterators.popAndDeleteBack();
- if (!loc.isNull() && !_filter->matchesBSON(_collection->docFor(loc))) {
+ if (!loc.isNull() && !_filter->matchesBSON(_collection->docFor(_txn, loc))) {
_done = true;
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = loc;
- member->obj = _collection->docFor(member->loc);
+ member->obj = _collection->docFor(_txn, member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
*out = id;
return PlanStage::ADVANCED;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 236103ee364..22d60076104 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -280,8 +280,12 @@ namespace mongo {
return lhs.loc < rhs.loc;
}
- SortStage::SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child)
- : _collection(params.collection),
+ SortStage::SortStage(OperationContext* txn,
+ const SortStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child)
+ : _txn(txn),
+ _collection(params.collection),
_ws(ws),
_child(child),
_pattern(params.pattern),
@@ -444,7 +448,7 @@ namespace mongo {
WorkingSetMember* member = _ws->get(it->second);
verify(member->loc == dl);
- WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
+ WorkingSetCommon::fetchAndInvalidateLoc(_txn, member, _collection);
// Remove the DiskLoc from our set of active DLs.
_wsidByDiskLoc.erase(it);
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 1ff6855aee2..9a10ced5c2a 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -142,7 +142,10 @@ namespace mongo {
*/
class SortStage : public PlanStage {
public:
- SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child);
+ SortStage(OperationContext* txn,
+ const SortStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child);
virtual ~SortStage();
@@ -172,6 +175,7 @@ namespace mongo {
//
// Not owned by us.
+ OperationContext* _txn;
const Collection* _collection;
// Not owned by us.
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index c890fb4dd15..61afd9f9e9c 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -47,7 +47,7 @@ namespace {
// MockStage will be owned by SortStage.
MockStage* ms = new MockStage(&ws);
SortStageParams params;
- SortStage sort(params, &ws, ms);
+ SortStage sort(NULL, params, &ws, ms);
// Check initial EOF state.
ASSERT_TRUE(ms->isEOF());
@@ -109,7 +109,7 @@ namespace {
params.query = fromjson(queryStr);
params.limit = limit;
- SortStage sort(params, &ws, ms);
+ SortStage sort(NULL, params, &ws, ms);
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 44f27bc7a73..0f804e890ee 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -145,7 +145,7 @@ namespace mongo {
// Add a fetch at the top for the user so we can get obj back for sure.
// TODO: Do we want to do this for the user? I think so.
- PlanStage* rootFetch = new FetchStage(ws.get(), userRoot, NULL, collection);
+ PlanStage* rootFetch = new FetchStage(txn, ws.get(), userRoot, NULL, collection);
PlanExecutor runner(ws.release(), rootFetch, collection);
@@ -207,7 +207,7 @@ namespace mongo {
BSONObj keyPatternObj = nodeArgs["keyPattern"].Obj();
IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern(keyPatternObj);
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, keyPatternObj);
uassert(16890, "Can't find index: " + keyPatternObj.toString(), desc);
IndexScanParams params;
@@ -224,7 +224,7 @@ namespace mongo {
uassert(16921, "Nodes argument must be provided to AND",
nodeArgs["nodes"].isABSONObj());
- auto_ptr<AndHashStage> andStage(new AndHashStage(workingSet, matcher, collection));
+ auto_ptr<AndHashStage> andStage(new AndHashStage(txn, workingSet, matcher, collection));
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -249,8 +249,8 @@ namespace mongo {
uassert(16924, "Nodes argument must be provided to AND",
nodeArgs["nodes"].isABSONObj());
- auto_ptr<AndSortedStage> andStage(
- new AndSortedStage(workingSet, matcher, collection));
+ auto_ptr<AndSortedStage> andStage(new AndSortedStage(txn, workingSet,
+ matcher, collection));
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -299,7 +299,7 @@ namespace mongo {
nodeArgs["node"].Obj(),
workingSet,
exprs);
- return new FetchStage(workingSet, subNode, matcher, collection);
+ return new FetchStage(txn, workingSet, subNode, matcher, collection);
}
else if ("limit" == nodeName) {
uassert(16937, "Limit stage doesn't have a filter (put it on the child)",
@@ -368,8 +368,8 @@ namespace mongo {
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- auto_ptr<MergeSortStage> mergeStage(
- new MergeSortStage(params, workingSet, collection));
+ auto_ptr<MergeSortStage> mergeStage(new MergeSortStage(txn, params,
+ workingSet, collection));
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
@@ -389,7 +389,7 @@ namespace mongo {
string search = nodeArgs["search"].String();
vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType("text", idxMatches);
+ collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
uassert(17194, "Expected exactly one text index", idxMatches.size() == 1);
IndexDescriptor* index = idxMatches[0];
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 39aaa83a2ee..4e871c663b1 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -268,7 +268,8 @@ namespace mongo {
_ws->clear();
- auto_ptr<MultiPlanStage> multiPlanStage(new MultiPlanStage(_collection,
+ auto_ptr<MultiPlanStage> multiPlanStage(new MultiPlanStage(_txn,
+ _collection,
orChildCQ.get()));
// Dump all the solutions into the MPR.
@@ -354,7 +355,7 @@ namespace mongo {
// with stats obtained in the same fashion as a competitive ranking would have obtained
// them.
_ws->clear();
- auto_ptr<MultiPlanStage> multiPlanStage(new MultiPlanStage(_collection, _query));
+ auto_ptr<MultiPlanStage> multiPlanStage(new MultiPlanStage(_txn, _collection, _query));
PlanStage* root;
verify(StageBuilder::build(_txn, _collection, *soln, _ws, &root));
multiPlanStage->addPlan(soln, root, _ws); // Takes ownership first two arguments.
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 46800e96335..f040a274c01 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -273,7 +273,7 @@ namespace mongo {
// Filter for phrases and negated terms
if (_params.query.hasNonTermPieces()) {
- if (!_ftsMatcher.matchesNonTerm(_params.index->getCollection()->docFor(loc))) {
+ if (!_ftsMatcher.matchesNonTerm(_params.index->getCollection()->docFor(_txn, loc))) {
return PlanStage::NEED_TIME;
}
}
@@ -281,7 +281,7 @@ namespace mongo {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->loc = loc;
- member->obj = _params.index->getCollection()->docFor(member->loc);
+ member->obj = _params.index->getCollection()->docFor(_txn, member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
member->addComputed(new TextScoreComputedData(score));
return PlanStage::ADVANCED;
@@ -289,12 +289,14 @@ namespace mongo {
class TextMatchableDocument : public MatchableDocument {
public:
- TextMatchableDocument(const BSONObj& keyPattern,
+ TextMatchableDocument(OperationContext* txn,
+ const BSONObj& keyPattern,
const BSONObj& key,
DiskLoc loc,
const Collection* collection,
bool *fetched)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_keyPattern(keyPattern),
_key(key),
_loc(loc),
@@ -302,7 +304,7 @@ namespace mongo {
BSONObj toBSON() const {
*_fetched = true;
- return _collection->docFor(_loc);
+ return _collection->docFor(_txn, _loc);
}
virtual ElementIterator* allocateIterator(const ElementPath* path) const {
@@ -327,7 +329,7 @@ namespace mongo {
// All else fails, fetch.
*_fetched = true;
- return new BSONElementIterator(path, _collection->docFor(_loc));
+ return new BSONElementIterator(path, _collection->docFor(_txn, _loc));
}
virtual void releaseIterator( ElementIterator* iterator ) const {
@@ -335,6 +337,7 @@ namespace mongo {
}
private:
+ OperationContext* _txn;
const Collection* _collection;
BSONObj _keyPattern;
BSONObj _key;
@@ -368,7 +371,8 @@ namespace mongo {
if (_filter) {
// We have not seen this document before and need to apply a filter.
bool fetched = false;
- TextMatchableDocument tdoc(_params.index->keyPattern(),
+ TextMatchableDocument tdoc(_txn,
+ _params.index->keyPattern(),
key,
loc,
_params.index->getCollection(),
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index a3929d0b620..502f740f6da 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -33,8 +33,9 @@
namespace mongo {
// static
- bool WorkingSetCommon::fetchAndInvalidateLoc(
- WorkingSetMember* member, const Collection* collection) {
+ bool WorkingSetCommon::fetchAndInvalidateLoc(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection) {
// Already in our desired state.
if (member->state == WorkingSetMember::OWNED_OBJ) { return true; }
@@ -42,7 +43,7 @@ namespace mongo {
if (!member->hasLoc()) { return false; }
// Do the fetch, invalidate the DL.
- member->obj = collection->docFor(member->loc).getOwned();
+ member->obj = collection->docFor(txn, member->loc).getOwned();
member->state = WorkingSetMember::OWNED_OBJ;
member->loc = DiskLoc();
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 2bd177edf86..3019aac1d15 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -39,7 +39,9 @@ namespace mongo {
* Requires either a valid BSONObj or valid DiskLoc.
* Returns true if the fetch and invalidate succeeded, false otherwise.
*/
- static bool fetchAndInvalidateLoc(WorkingSetMember* member, const Collection* collection);
+ static bool fetchAndInvalidateLoc(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection);
/**
* Initialize the fields in 'dest' from 'src', creating copies of owned objects as needed.
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index ad8b06f00eb..42cae74d1e6 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -85,7 +85,7 @@ namespace mongo {
}
vector<IndexDescriptor*> idxs;
- collection->getIndexCatalog()->findIndexByType(IndexNames::GEO_HAYSTACK, idxs);
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_HAYSTACK, idxs);
if (idxs.size() == 0) {
errmsg = "no geoSearch index";
return false;
diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp
index 88264581e53..36978cf04bc 100644
--- a/src/mongo/db/index/btree_based_access_method.cpp
+++ b/src/mongo/db/index/btree_based_access_method.cpp
@@ -100,7 +100,7 @@ namespace mongo {
if (ErrorCodes::UniqueIndexViolation == status.code()) {
// We ignore it for some reason in BG indexing.
- if (!_btreeState->isReady()) {
+ if (!_btreeState->isReady(txn)) {
DEV log() << "info: key already in index during bg indexing (ok)\n";
continue;
}
@@ -315,7 +315,7 @@ namespace mongo {
IndexAccessMethod* BtreeBasedAccessMethod::initiateBulk(OperationContext* txn) {
// If there's already data in the index, don't do anything.
- if (!_newInterface->isEmpty()) {
+ if (!_newInterface->isEmpty(txn)) {
return NULL;
}
@@ -329,11 +329,13 @@ namespace mongo {
bool mayInterrupt,
bool dupsAllowed,
set<DiskLoc>* dupsToDrop) {
- if (!_newInterface->isEmpty()) {
+
+ BtreeBasedBulkAccessMethod* bulk = static_cast<BtreeBasedBulkAccessMethod*>(bulkRaw);
+
+ if (!_newInterface->isEmpty(bulk->getOperationContext())) {
return Status(ErrorCodes::InternalError, "trying to commit but has data already");
}
- BtreeBasedBulkAccessMethod* bulk = static_cast<BtreeBasedBulkAccessMethod*>(bulkRaw);
return bulk->commit(dupsToDrop, mayInterrupt, dupsAllowed);
}
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.h b/src/mongo/db/index/btree_based_bulk_access_method.h
index 49fc1f53028..39a1a8b3508 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.h
+++ b/src/mongo/db/index/btree_based_bulk_access_method.h
@@ -129,6 +129,8 @@ namespace mongo {
return NULL;
}
+ OperationContext* getOperationContext() { return _txn; }
+
private:
typedef Sorter<BSONObj, DiskLoc> BSONObjExternalSorter;
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 62afc01bd11..c79fe77c8c9 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -77,7 +77,7 @@ namespace mongo {
}
int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
- GeoHaystackSearchHopper hopper(nearObj, maxDistance, limit, _geoField, collection);
+ GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
long long btreeMatches = 0;
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 67c97d25b90..663aa10b204 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -45,12 +45,14 @@ namespace mongo {
* @param limit The maximum number of results to return
* @param geoField Which field in the provided DiskLoc has the point to test.
*/
- GeoHaystackSearchHopper(const BSONObj& nearObj,
+ GeoHaystackSearchHopper(OperationContext* txn,
+ const BSONObj& nearObj,
double maxDistance,
unsigned limit,
const std::string& geoField,
const Collection* collection)
- : _collection(collection),
+ : _txn(txn),
+ _collection(collection),
_near(nearObj),
_maxDistance(maxDistance),
_limit(limit),
@@ -60,7 +62,7 @@ namespace mongo {
// it)
void consider(const DiskLoc& loc) {
if (limitReached()) return;
- Point p(_collection->docFor(loc).getFieldDotted(_geoField));
+ Point p(_collection->docFor(_txn, loc).getFieldDotted(_geoField));
if (distance(_near, p) > _maxDistance)
return;
_locs.push_back(loc);
@@ -68,7 +70,7 @@ namespace mongo {
int appendResultsTo(BSONArrayBuilder* b) {
for (unsigned i = 0; i <_locs.size(); i++)
- b->append(_collection->docFor(_locs[i]));
+ b->append(_collection->docFor(_txn, _locs[i]));
return _locs.size();
}
@@ -77,6 +79,7 @@ namespace mongo {
return _locs.size() >= _limit;
}
private:
+ OperationContext* _txn;
const Collection* _collection;
Point _near;
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 2e259136747..01a12cf3d5f 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -127,7 +127,10 @@ namespace mongo {
bool isSparse() const { return _sparse; }
// Is this index multikey?
- bool isMultikey() const { _checkOk(); return _collection->getIndexCatalog()->isMultikey( this ); }
+ bool isMultikey( OperationContext* txn ) const {
+ _checkOk();
+ return _collection->getIndexCatalog()->isMultikey( txn, this );
+ }
bool isIdIndex() const { _checkOk(); return _isIdIndex; }
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 39d9544a30c..34cb8fcc597 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -73,7 +73,7 @@ namespace {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal() > 0 ) {
+ if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal( txn ) > 0 ) {
warning() << ns << " had illegal indexes, removing";
indexCatalog->dropAllIndexes(txn, true);
continue;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 2a3f9a5e549..301e89a13fd 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -99,7 +99,7 @@ namespace mongo {
if (lifecycle) {
lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys());
+ driver->refreshIndexKeys(lifecycle->getIndexKeys(request.getOpCtx()));
}
PlanExecutor* rawExec;
diff --git a/src/mongo/db/ops/update_lifecycle.h b/src/mongo/db/ops/update_lifecycle.h
index ed5b55d3e5d..c0aa86695da 100644
--- a/src/mongo/db/ops/update_lifecycle.h
+++ b/src/mongo/db/ops/update_lifecycle.h
@@ -34,6 +34,8 @@
namespace mongo {
+ class OperationContext;
+
class UpdateLifecycle {
public:
@@ -56,7 +58,7 @@ namespace mongo {
/**
* Return a pointer to any indexes if there is a collection.
*/
- virtual const UpdateIndexData* getIndexKeys() const = 0;
+ virtual const UpdateIndexData* getIndexKeys( OperationContext* opCtx ) const = 0;
/**
* Returns the shard keys as immutable fields
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index 87648c37403..9a0c15e61fd 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -61,9 +61,9 @@ namespace mongo {
return _collection;
}
- const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys() const {
+ const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys(OperationContext* opCtx) const {
if (_collection)
- return &_collection->infoCache()->indexKeys();
+ return &_collection->infoCache()->indexKeys(opCtx);
return NULL;
}
diff --git a/src/mongo/db/ops/update_lifecycle_impl.h b/src/mongo/db/ops/update_lifecycle_impl.h
index fa612463757..c321b43912d 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.h
+++ b/src/mongo/db/ops/update_lifecycle_impl.h
@@ -52,7 +52,7 @@ namespace mongo {
virtual bool canContinue() const;
- virtual const UpdateIndexData* getIndexKeys() const;
+ virtual const UpdateIndexData* getIndexKeys(OperationContext* opCtx) const;
virtual const std::vector<FieldRef*>* getImmutableFields() const;
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index f0b948fcd3c..21b0d15d770 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -148,11 +148,11 @@ namespace mongo {
// on the update op case, the call to prefetchRecordPages will touch the _id index.
// thus perhaps this option isn't very useful?
try {
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex();
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(txn);
if ( !desc )
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
- verify( iam );
+ invariant( iam );
iam->touch(txn, obj);
}
catch (const DBException& e) {
@@ -164,7 +164,8 @@ namespace mongo {
{
// indexCount includes all indexes, including ones
// in the process of being built
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator( true );
+ IndexCatalog::IndexIterator ii =
+ collection->getIndexCatalog()->getIndexIterator( txn, true );
while ( ii.more() ) {
TimerHolder timer( &prefetchIndexStats);
// This will page in all index pages for the given object.
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 81e33790e52..b2936c80e61 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -100,16 +100,18 @@ namespace mongo {
} // namespace
- void fillOutPlannerParams(Collection* collection,
+ void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams) {
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,
+ false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(),
+ desc->isMultikey(txn),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
@@ -200,11 +202,11 @@ namespace mongo {
// Fill out the planning params. We use these for both cached solutions and non-cached.
QueryPlannerParams plannerParams;
plannerParams.options = plannerOptions;
- fillOutPlannerParams(collection, canonicalQuery, &plannerParams);
+ fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
// If we have an _id index we can use an idhack plan.
if (IDHackStage::supportsQuery(*canonicalQuery) &&
- collection->getIndexCatalog()->findIdIndex()) {
+ collection->getIndexCatalog()->findIdIndex(opCtx)) {
LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
@@ -366,7 +368,7 @@ namespace mongo {
else {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- MultiPlanStage* multiPlanStage = new MultiPlanStage(collection, canonicalQuery);
+ MultiPlanStage* multiPlanStage = new MultiPlanStage(opCtx, collection, canonicalQuery);
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
@@ -431,7 +433,7 @@ namespace mongo {
}
if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
- !collection->getIndexCatalog()->findIdIndex()) {
+ !collection->getIndexCatalog()->findIdIndex(txn)) {
const WhereCallbackReal whereCallback(txn, collection->ns().db());
CanonicalQuery* cq;
@@ -515,7 +517,7 @@ namespace mongo {
}
if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
- collection->getIndexCatalog()->findIdIndex()) {
+ collection->getIndexCatalog()->findIdIndex(txn)) {
LOG(2) << "Using idhack: " << unparsedQuery.toString();
PlanStage* idHackStage = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(),
@@ -595,7 +597,7 @@ namespace mongo {
}
if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
- collection->getIndexCatalog()->findIdIndex()) {
+ collection->getIndexCatalog()->findIdIndex(txn)) {
LOG(2) << "Using idhack: " << unparsedQuery.toString();
PlanStage* idHackStage = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(),
@@ -891,7 +893,7 @@ namespace mongo {
QueryPlannerParams plannerParams;
plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
// The distinct hack can work if any field is in the index but it's not always clear
@@ -899,7 +901,7 @@ namespace mongo {
if (desc->keyPattern().firstElement().fieldName() == field) {
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(),
+ desc->isMultikey(txn),
desc->isSparse(),
desc->indexName(),
desc->infoObj()));
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index bfdb53fe98b..76f2054d96a 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -52,7 +52,8 @@ namespace mongo {
* Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
* 'collection'. Exposed for testing.
*/
- void fillOutPlannerParams(Collection* collection,
+ void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams);
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 9cfff406b8f..f51d33baf59 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -122,7 +122,7 @@ namespace mongo {
PlanStage* root = ix;
if (IXSCAN_FETCH & options) {
- root = new FetchStage(ws, root, NULL, collection);
+ root = new FetchStage(txn, ws, root, NULL, collection);
}
PlanExecutor* exec = new PlanExecutor(ws, root, collection);
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index f34d097aa58..507cbb0117a 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -729,7 +729,7 @@ namespace mongo {
// collection is empty. Otherwise, the semantics of the tailable cursor is that the
// client will keep trying to read from it. So we'll keep it around.
Collection* collection = ctx.ctx().db()->getCollection(txn, cq->ns());
- if (collection && collection->numRecords() != 0 && pq.getNumToReturn() != 1) {
+ if (collection && collection->numRecords(txn) != 0 && pq.getNumToReturn() != 1) {
saveClientCursor = true;
}
}
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index f270b799681..0770eefde28 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -79,7 +79,7 @@ namespace mongo {
IndexScanParams params;
params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern( ixn->indexKeyPattern );
+ collection->getIndexCatalog()->findIndexByKeyPattern( txn, ixn->indexKeyPattern );
if ( params.descriptor == NULL ) {
warning() << "Can't find index " << ixn->indexKeyPattern.toString()
<< "in namespace " << collection->ns() << endl;
@@ -96,7 +96,7 @@ namespace mongo {
const FetchNode* fn = static_cast<const FetchNode*>(root);
PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
if (NULL == childStage) { return NULL; }
- return new FetchStage(ws, childStage, fn->filter.get(), collection);
+ return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
}
else if (STAGE_SORT == root->getType()) {
const SortNode* sn = static_cast<const SortNode*>(root);
@@ -107,7 +107,7 @@ namespace mongo {
params.pattern = sn->pattern;
params.query = sn->query;
params.limit = sn->limit;
- return new SortStage(params, ws, childStage);
+ return new SortStage(txn, params, ws, childStage);
}
else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
@@ -148,7 +148,7 @@ namespace mongo {
}
else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get(), collection));
+ auto_ptr<AndHashStage> ret(new AndHashStage(txn, ws, ahn->filter.get(), collection));
for (size_t i = 0; i < ahn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -168,7 +168,7 @@ namespace mongo {
}
else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get(), collection));
+ auto_ptr<AndSortedStage> ret(new AndSortedStage(txn, ws, asn->filter.get(), collection));
for (size_t i = 0; i < asn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -181,7 +181,7 @@ namespace mongo {
MergeSortStageParams params;
params.dedup = msn->dedup;
params.pattern = msn->sort;
- auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
+ auto_ptr<MergeSortStage> ret(new MergeSortStage(txn, params, ws, collection));
for (size_t i = 0; i < msn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -199,8 +199,8 @@ namespace mongo {
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
- IndexDescriptor* twoDIndex = collection->getIndexCatalog()->findIndexByKeyPattern(node
- ->indexKeyPattern);
+ IndexDescriptor* twoDIndex = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
+ node->indexKeyPattern);
if (twoDIndex == NULL) {
warning() << "Can't find 2D index " << node->indexKeyPattern.toString()
@@ -222,8 +222,8 @@ namespace mongo {
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
- IndexDescriptor* s2Index = collection->getIndexCatalog()->findIndexByKeyPattern(node
- ->indexKeyPattern);
+ IndexDescriptor* s2Index = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
+ node->indexKeyPattern);
if (s2Index == NULL) {
warning() << "Can't find 2DSphere index " << node->indexKeyPattern.toString()
@@ -241,7 +241,7 @@ namespace mongo {
return NULL;
}
vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType("text", idxMatches);
+ collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
if (1 != idxMatches.size()) {
warning() << "No text index, or more than one text index";
return NULL;
@@ -293,7 +293,7 @@ namespace mongo {
DistinctParams params;
params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(dn->indexKeyPattern);
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, dn->indexKeyPattern);
params.direction = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
@@ -310,7 +310,7 @@ namespace mongo {
CountParams params;
params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(cn->indexKeyPattern);
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, cn->indexKeyPattern);
params.startKey = cn->startKey;
params.startKeyInclusive = cn->startKeyInclusive;
params.endKey = cn->endKey;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 8baef920496..2a86ec23402 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -635,7 +635,7 @@ namespace repl {
// but keep it just in case
RARELY if ( indexCatalog
&& !collection->isCapped()
- && !indexCatalog->haveIdIndex() ) {
+ && !indexCatalog->haveIdIndex(txn) ) {
try {
Helpers::ensureIndex(txn, collection, BSON("_id" << 1), true, "_id_");
}
@@ -670,7 +670,7 @@ namespace repl {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
- RARELY if ( indexCatalog && !collection->isCapped() && !indexCatalog->haveIdIndex() ) {
+ RARELY if ( indexCatalog && !collection->isCapped() && !indexCatalog->haveIdIndex(txn) ) {
try {
Helpers::ensureIndex(txn, collection, BSON("_id" << 1), true, "_id_");
}
@@ -710,9 +710,9 @@ namespace repl {
// thus this is not ideal.
else {
if (collection == NULL ||
- (indexCatalog->haveIdIndex() && Helpers::findById(txn, collection, updateCriteria).isNull()) ||
+ (indexCatalog->haveIdIndex(txn) && Helpers::findById(txn, collection, updateCriteria).isNull()) ||
// capped collections won't have an _id index
- (!indexCatalog->haveIdIndex() && Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
+ (!indexCatalog->haveIdIndex(txn) && Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
failedUpdate = true;
log() << "replication couldn't find doc: " << op.toString() << endl;
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 46a35d9d0a1..fa3dd087206 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -141,7 +141,7 @@ namespace repl {
Collection* collection = ctx.ctx().db()->getCollection(txn, rsoplog);
// temp
- if( collection->numRecords() == 0 )
+ if( collection->numRecords(txn) == 0 )
return; // already empty, ok.
LOG(1) << "replSet empty oplog" << rsLog;
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 49505fff78f..c309aa51f0d 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -620,7 +620,7 @@ namespace repl {
}
// did we just empty the collection? if so let's check if it even
// exists on the source.
- if (collection->numRecords() == 0) {
+ if (collection->numRecords(txn) == 0) {
try {
string sys = ctx.db()->name() + ".system.namespaces";
BSONObj nsResult = them->findOne(sys, QUERY("name" << doc.ns));
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index cd5110be3b7..c8c8d9f2099 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -7,3 +7,11 @@ env.Library(
],
LIBDEPS=[]
)
+
+env.Library(
+ target='bson_collection_catalog_entry',
+ source=[
+ 'bson_collection_catalog_entry.cpp',
+ ],
+ LIBDEPS=[]
+ )
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
new file mode 100644
index 00000000000..e9913f7934a
--- /dev/null
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -0,0 +1,162 @@
+// bson_collection_catalog_entry.cpp
+
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/storage/bson_collection_catalog_entry.h"
+
+namespace mongo {
+
+ BSONCollectionCatalogEntry::BSONCollectionCatalogEntry( const StringData& ns )
+ : CollectionCatalogEntry( ns ) {
+ }
+
+ CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions( OperationContext* txn ) const {
+ // TODO: support everything
+ return CollectionOptions();
+ }
+
+ int BSONCollectionCatalogEntry::getTotalIndexCount( OperationContext* txn ) const {
+ MetaData md = _getMetaData( txn );
+
+ return static_cast<int>( md.indexes.size() );
+ }
+
+ int BSONCollectionCatalogEntry::getCompletedIndexCount( OperationContext* txn ) const {
+ MetaData md = _getMetaData( txn );
+
+ int num = 0;
+ for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
+ if ( md.indexes[i].ready )
+ num++;
+ }
+ return num;
+ }
+
+ BSONObj BSONCollectionCatalogEntry::getIndexSpec( OperationContext* txn,
+ const StringData& indexName ) const {
+ MetaData md = _getMetaData( txn );
+
+ int offset = md.findIndexOffset( indexName );
+ invariant( offset >= 0 );
+ return md.indexes[offset].spec.getOwned();
+ }
+
+
+ void BSONCollectionCatalogEntry::getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const {
+ MetaData md = _getMetaData( txn );
+
+ for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
+ names->push_back( md.indexes[i].spec["name"].String() );
+ }
+ }
+
+ bool BSONCollectionCatalogEntry::isIndexMultikey( OperationContext* txn,
+ const StringData& indexName) const {
+ MetaData md = _getMetaData( txn );
+
+ int offset = md.findIndexOffset( indexName );
+ invariant( offset >= 0 );
+ return md.indexes[offset].multikey;
+ }
+
+ DiskLoc BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
+ const StringData& indexName ) const {
+ MetaData md = _getMetaData( txn );
+
+ int offset = md.findIndexOffset( indexName );
+ invariant( offset >= 0 );
+ return md.indexes[offset].head;
+ }
+
+ bool BSONCollectionCatalogEntry::isIndexReady( OperationContext* txn,
+ const StringData& indexName ) const {
+ MetaData md = _getMetaData( txn );
+
+ int offset = md.findIndexOffset( indexName );
+ invariant( offset >= 0 );
+ return md.indexes[offset].ready;
+ }
+
+ int BSONCollectionCatalogEntry::MetaData::findIndexOffset( const StringData& name ) const {
+ for ( unsigned i = 0; i < indexes.size(); i++ )
+ if ( indexes[i].spec["name"].String() == name )
+ return i;
+ return -1;
+ }
+
+ bool BSONCollectionCatalogEntry::MetaData::eraseIndex( const StringData& name ) {
+ int indexOffset = findIndexOffset( name );
+
+ if ( indexOffset < 0 ) {
+ return false;
+ }
+
+ indexes.erase( indexes.begin() + indexOffset );
+ return true;
+ }
+
+ BSONObj BSONCollectionCatalogEntry::MetaData::toBSON() const {
+ BSONObjBuilder b;
+ b.append( "ns", ns );
+ {
+ BSONArrayBuilder arr( b.subarrayStart( "indexes" ) );
+ for ( unsigned i = 0; i < indexes.size(); i++ ) {
+ BSONObjBuilder sub( arr.subobjStart() );
+ sub.append( "spec", indexes[i].spec );
+ sub.appendBool( "ready", indexes[i].ready );
+ sub.appendBool( "multikey", indexes[i].multikey );
+ sub.append( "head_a", indexes[i].head.a() );
+ sub.append( "head_b", indexes[i].head.getOfs() );
+ sub.done();
+ }
+ arr.done();
+ }
+ return b.obj();
+ }
+
+ void BSONCollectionCatalogEntry::MetaData::parse( const BSONObj& obj ) {
+ ns = obj["ns"].valuestrsafe();
+
+ BSONElement e = obj["indexes"];
+ if ( e.isABSONObj() ) {
+ std::vector<BSONElement> entries = e.Array();
+ for ( unsigned i = 0; i < entries.size(); i++ ) {
+ BSONObj idx = entries[i].Obj();
+ IndexMetaData imd;
+ imd.spec = idx["spec"].Obj().getOwned();
+ imd.ready = idx["ready"].trueValue();
+ imd.head = DiskLoc( idx["head_a"].Int(),
+ idx["head_b"].Int() );
+ imd.multikey = idx["multikey"].trueValue();
+ indexes.push_back( imd );
+ }
+ }
+ }
+}
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
new file mode 100644
index 00000000000..85fa1f887d4
--- /dev/null
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -0,0 +1,105 @@
+// bson_collection_catalog_entry.h
+
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include "mongo/db/catalog/collection_catalog_entry.h"
+
+namespace mongo {
+
+ /**
+ * This is a helper class for any storage engine that wants to store catalog information
+ * as BSON. It is totally optional to use this.
+ */
+ class BSONCollectionCatalogEntry : public CollectionCatalogEntry {
+ public:
+ BSONCollectionCatalogEntry( const StringData& ns );
+
+ virtual ~BSONCollectionCatalogEntry(){}
+
+ virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const;
+
+ virtual int getTotalIndexCount( OperationContext* txn ) const;
+
+ virtual int getCompletedIndexCount( OperationContext* txn ) const;
+
+ virtual BSONObj getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const;
+
+ virtual void getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const;
+
+ virtual bool isIndexMultikey( OperationContext* txn,
+ const StringData& indexName) const;
+
+ virtual DiskLoc getIndexHead( OperationContext* txn,
+ const StringData& indexName ) const;
+
+ virtual bool isIndexReady( OperationContext* txn,
+ const StringData& indexName ) const;
+
+ // ------ for implementors
+
+ struct IndexMetaData {
+ IndexMetaData() {}
+ IndexMetaData( BSONObj s, bool r, DiskLoc h, bool m )
+ : spec( s ), ready( r ), head( h ), multikey( m ) {}
+
+ BSONObj spec;
+ bool ready;
+ DiskLoc head;
+ bool multikey;
+ };
+
+ struct MetaData {
+ void parse( const BSONObj& obj );
+ BSONObj toBSON() const;
+
+ int findIndexOffset( const StringData& name ) const;
+
+ /**
+ * Removes information about an index from the MetaData. Returns true if an index
+ * called name existed and was deleted, and false otherwise.
+ */
+ bool eraseIndex( const StringData& name );
+
+ std::string ns;
+ std::vector<IndexMetaData> indexes;
+ };
+
+ protected:
+ virtual MetaData _getMetaData( OperationContext* txn ) const = 0;
+
+ };
+
+}
diff --git a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
index b5f8f6f3c75..4f4e34f6813 100644
--- a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
+++ b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
@@ -184,7 +184,7 @@ namespace {
return Status::OK();
}
- virtual bool isEmpty() {
+ virtual bool isEmpty(OperationContext* txn) {
return _data->empty();
}
diff --git a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.cpp b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.cpp
index fb752a0f295..24bc5e5438b 100644
--- a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.cpp
@@ -166,11 +166,11 @@ namespace mongo {
indexes.clear();
}
- int Heap1DatabaseCatalogEntry::Entry::getTotalIndexCount() const {
+ int Heap1DatabaseCatalogEntry::Entry::getTotalIndexCount( OperationContext* txn ) const {
return static_cast<int>( indexes.size() );
}
- int Heap1DatabaseCatalogEntry::Entry::getCompletedIndexCount() const {
+ int Heap1DatabaseCatalogEntry::Entry::getCompletedIndexCount( OperationContext* txn ) const {
int ready = 0;
for ( Indexes::const_iterator i = indexes.begin(); i != indexes.end(); ++i )
if ( i->second->ready )
@@ -178,18 +178,21 @@ namespace mongo {
return ready;
}
- void Heap1DatabaseCatalogEntry::Entry::getAllIndexes( std::vector<std::string>* names ) const {
+ void Heap1DatabaseCatalogEntry::Entry::getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const {
for ( Indexes::const_iterator i = indexes.begin(); i != indexes.end(); ++i )
names->push_back( i->second->name );
}
- BSONObj Heap1DatabaseCatalogEntry::Entry::getIndexSpec( const StringData& idxName ) const {
+ BSONObj Heap1DatabaseCatalogEntry::Entry::getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const {
Indexes::const_iterator i = indexes.find( idxName.toString() );
invariant( i != indexes.end() );
return i->second->spec;
}
- bool Heap1DatabaseCatalogEntry::Entry::isIndexMultikey( const StringData& idxName) const {
+ bool Heap1DatabaseCatalogEntry::Entry::isIndexMultikey( OperationContext* txn,
+ const StringData& idxName) const {
Indexes::const_iterator i = indexes.find( idxName.toString() );
invariant( i != indexes.end() );
return i->second->isMultikey;
@@ -207,7 +210,8 @@ namespace mongo {
return true;
}
- DiskLoc Heap1DatabaseCatalogEntry::Entry::getIndexHead( const StringData& idxName ) const {
+ DiskLoc Heap1DatabaseCatalogEntry::Entry::getIndexHead( OperationContext* txn,
+ const StringData& idxName ) const {
Indexes::const_iterator i = indexes.find( idxName.toString() );
invariant( i != indexes.end() );
return i->second->head;
@@ -221,7 +225,8 @@ namespace mongo {
i->second->head = newHead;
}
- bool Heap1DatabaseCatalogEntry::Entry::isIndexReady( const StringData& idxName ) const {
+ bool Heap1DatabaseCatalogEntry::Entry::isIndexReady( OperationContext* txn,
+ const StringData& idxName ) const {
Indexes::const_iterator i = indexes.find( idxName.toString() );
invariant( i != indexes.end() );
return i->second->ready;
diff --git a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
index 1268d26ff3e..167a7440465 100644
--- a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
+++ b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
@@ -113,29 +113,34 @@ namespace mongo {
Entry( const StringData& ns, const CollectionOptions& options );
virtual ~Entry();
- int getTotalIndexCount() const;
+ int getTotalIndexCount( OperationContext* txn ) const;
- int getCompletedIndexCount() const;
+ int getCompletedIndexCount( OperationContext* txn ) const;
int getMaxAllowedIndexes() const { return 64; }
- void getAllIndexes( std::vector<std::string>* names ) const;
+ void getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const;
- BSONObj getIndexSpec( const StringData& idxName ) const;
+ BSONObj getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const;
- bool isIndexMultikey( const StringData& indexName) const;
+ bool isIndexMultikey( OperationContext* txn,
+ const StringData& indexName) const;
bool setIndexIsMultikey(OperationContext* txn,
const StringData& indexName,
bool multikey = true);
- DiskLoc getIndexHead( const StringData& indexName ) const;
+ DiskLoc getIndexHead( OperationContext* txn,
+ const StringData& indexName ) const;
void setIndexHead( OperationContext* txn,
const StringData& indexName,
const DiskLoc& newHead );
- bool isIndexReady( const StringData& indexName ) const;
+ bool isIndexReady( OperationContext* txn,
+ const StringData& indexName ) const;
Status removeIndex( OperationContext* txn,
const StringData& indexName );
diff --git a/src/mongo/db/storage/heap1/heap1_test.cpp b/src/mongo/db/storage/heap1/heap1_test.cpp
index 1a5dc7b5f80..f6863b333ed 100644
--- a/src/mongo/db/storage/heap1/heap1_test.cpp
+++ b/src/mongo/db/storage/heap1/heap1_test.cpp
@@ -80,8 +80,8 @@ namespace {
RecordStore* rs = db.getRecordStore( &op, "foo.bar" );
StatusWith<DiskLoc> loc = rs->insertRecord( &op, "abc", 4, -1 );
ASSERT_OK( loc.getStatus() );
- ASSERT_EQUALS( 1, rs->numRecords() );
- ASSERT_EQUALS( std::string( "abc" ), rs->dataFor( loc.getValue() ).data() );
+ ASSERT_EQUALS( 1, rs->numRecords( &op ) );
+ ASSERT_EQUALS( std::string( "abc" ), rs->dataFor( &op, loc.getValue() ).data() );
}
}
diff --git a/src/mongo/db/storage/heap1/record_store_heap.cpp b/src/mongo/db/storage/heap1/record_store_heap.cpp
index 3145e6f1c75..06d55847b1b 100644
--- a/src/mongo/db/storage/heap1/record_store_heap.cpp
+++ b/src/mongo/db/storage/heap1/record_store_heap.cpp
@@ -63,7 +63,7 @@ namespace mongo {
const char* HeapRecordStore::name() const { return "heap"; }
- RecordData HeapRecordStore::dataFor( const DiskLoc& loc ) const {
+ RecordData HeapRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc ) const {
return recordFor(loc)->toRecordData();
}
@@ -79,21 +79,21 @@ namespace mongo {
invariant(_records.erase(loc) == 1);
}
- bool HeapRecordStore::cappedAndNeedDelete() const {
+ bool HeapRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
if (!_isCapped)
return false;
if (_dataSize > _cappedMaxSize)
return true;
- if ((_cappedMaxDocs != -1) && (numRecords() > _cappedMaxDocs))
+ if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
return true;
return false;
}
void HeapRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
- while (cappedAndNeedDelete()) {
+ while (cappedAndNeedDelete(txn)) {
invariant(!_records.empty());
DiskLoc oldest = _records.begin()->first;
@@ -340,7 +340,7 @@ namespace mongo {
BSONObjBuilder* extraInfo,
int infoLevel) const {
// Note: not making use of extraInfo or infoLevel since we don't have extents
- const int64_t recordOverhead = numRecords() * HeapRecord::HeaderSize;
+ const int64_t recordOverhead = numRecords(txn) * HeapRecord::HeaderSize;
return _dataSize + recordOverhead;
}
@@ -439,7 +439,7 @@ namespace mongo {
}
RecordData HeapRecordIterator::dataFor(const DiskLoc& loc) const {
- return _rs.dataFor(loc);
+ return _rs.dataFor(_txn, loc);
}
//
@@ -504,7 +504,7 @@ namespace mongo {
}
RecordData HeapRecordReverseIterator::dataFor(const DiskLoc& loc) const {
- return _rs.dataFor(loc);
+ return _rs.dataFor(_txn, loc);
}
} // namespace mongo
diff --git a/src/mongo/db/storage/heap1/record_store_heap.h b/src/mongo/db/storage/heap1/record_store_heap.h
index 05180436023..4ac30ac4a2f 100644
--- a/src/mongo/db/storage/heap1/record_store_heap.h
+++ b/src/mongo/db/storage/heap1/record_store_heap.h
@@ -55,7 +55,7 @@ namespace mongo {
virtual const char* name() const;
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
@@ -121,9 +121,9 @@ namespace mongo {
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const;
- virtual long long dataSize() const { return _dataSize; }
+ virtual long long dataSize( OperationContext* txn ) const { return _dataSize; }
- virtual long long numRecords() const { return _records.size(); }
+ virtual long long numRecords( OperationContext* txn ) const { return _records.size(); }
protected:
class HeapRecord {
@@ -161,7 +161,7 @@ namespace mongo {
private:
DiskLoc allocateLoc();
- bool cappedAndNeedDelete() const;
+ bool cappedAndNeedDelete(OperationContext* txn) const;
void cappedDeleteAsNeeded(OperationContext* txn);
// TODO figure out a proper solution to metadata
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index f1ca5d55427..f21f1dac523 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -99,7 +99,7 @@ namespace mongo {
}
virtual long long getSpaceUsedBytes( OperationContext* txn ) const {
- return _btree->getRecordStore()->dataSize();
+ return _btree->getRecordStore()->dataSize( txn );
}
virtual Status dupKeyCheck(OperationContext* txn,
@@ -108,8 +108,8 @@ namespace mongo {
return _btree->dupKeyCheck(txn, key, loc);
}
- virtual bool isEmpty() {
- return _btree->isEmpty();
+ virtual bool isEmpty(OperationContext* txn) {
+ return _btree->isEmpty(txn);
}
virtual Status touch(OperationContext* txn) const{
@@ -124,7 +124,7 @@ namespace mongo {
: _txn(txn),
_btree(btree),
_direction(direction),
- _bucket(btree->getHead()), // XXX this shouldn't be nessisary, but is.
+ _bucket(btree->getHead(txn)), // XXX this shouldn't be nessisary, but is.
_ofs(0) {
}
@@ -185,11 +185,11 @@ namespace mongo {
}
virtual BSONObj getKey() const {
- return _btree->getKey(_bucket, _ofs);
+ return _btree->getKey(_txn, _bucket, _ofs);
}
virtual DiskLoc getDiskLoc() const {
- return _btree->getDiskLoc(_bucket, _ofs);
+ return _btree->getDiskLoc(_txn, _bucket, _ofs);
}
virtual void advance() {
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index b177cb42343..120c9738b25 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -76,7 +76,7 @@ namespace mongo {
// The normal bulk building path calls initAsEmpty, so we already have an empty root bucket.
// This isn't the case in some unit tests that use the Builder directly rather than going
// through an IndexAccessMethod.
- _rightLeafLoc = _logic->_headManager->getHead();
+ _rightLeafLoc = _logic->_headManager->getHead(txn);
if (_rightLeafLoc.isNull()) {
_rightLeafLoc = _logic->_addBucket(txn);
_logic->_headManager->setHead(_txn, _rightLeafLoc);
@@ -130,12 +130,13 @@ namespace mongo {
//
template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc leftSibLoc) {
+ DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib,
+ DiskLoc leftSibLoc) {
invariant(leftSib->n >= 2); // Guaranteed by sufficiently small KeyMax.
if (leftSib->parent.isNull()) {
// Making a new root
- invariant(leftSibLoc == _logic->_headManager->getHead());
+ invariant(leftSibLoc == _logic->_headManager->getHead(_txn));
const DiskLoc newRootLoc = _logic->_addBucket(_txn);
leftSib->parent = newRootLoc;
_logic->_headManager->setHead(_txn, newRootLoc);
@@ -177,13 +178,13 @@ namespace mongo {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) {
- return _logic->btreemod(_txn, _logic->getBucket(loc));
+ return _logic->btreemod(_txn, _logic->getBucket(_txn, loc));
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
BtreeLogic<BtreeLayout>::Builder::_getBucket(DiskLoc loc) {
- return _logic->getBucket(loc);
+ return _logic->getBucket(_txn, loc);
}
//
@@ -420,7 +421,7 @@ namespace mongo {
}
}
- invariant(getBucket(bucketLoc) == bucket);
+ invariant(getBucket(txn, bucketLoc) == bucket);
{
// declare that we will write to [k(keypos),k(n)]
@@ -492,7 +493,7 @@ namespace mongo {
const DiskLoc thisLoc,
int &refPos) {
- invariant(getBucket(thisLoc) == bucket);
+ invariant(getBucket(txn, thisLoc) == bucket);
if (bucket->flags & Packed) {
return;
@@ -687,7 +688,7 @@ namespace mongo {
DiskLoc* loc,
int* pos,
int direction) const {
- while (!loc->isNull() && !keyIsUsed(*loc, *pos)) {
+ while (!loc->isNull() && !keyIsUsed(txn, *loc, *pos)) {
*loc = advance(txn, *loc, pos, direction);
}
}
@@ -736,7 +737,7 @@ namespace mongo {
const vector<bool>& keyEndInclusive,
int direction) const {
- BucketType* bucket = getBucket(*thisLocInOut);
+ BucketType* bucket = getBucket(txn, *thisLocInOut);
int l, h;
bool dontGoUp;
@@ -772,7 +773,8 @@ namespace mongo {
if (dontGoUp) {
// this comparison result assures h > l
- if (!customFind(l,
+ if (!customFind(txn,
+ l,
h,
keyBegin,
keyBeginLen,
@@ -791,7 +793,8 @@ namespace mongo {
// go up parents until rightmost/leftmost node is >=/<= target or at top
while (!bucket->parent.isNull()) {
*thisLocInOut = bucket->parent;
- bucket = getBucket(*thisLocInOut);
+ bucket = getBucket(txn,
+ *thisLocInOut);
if (direction > 0) {
if (customBSONCmp(getFullKey(bucket, bucket->n - 1).data.toBson(),
@@ -844,7 +847,7 @@ namespace mongo {
int direction,
pair<DiskLoc, int>& bestParent) const {
- BucketType* bucket = getBucket(*locInOut);
+ BucketType* bucket = getBucket(txn, *locInOut);
if (0 == bucket->n) {
*locInOut = DiskLoc();
@@ -885,7 +888,7 @@ namespace mongo {
if (!next.isNull()) {
bestParent = pair<DiskLoc, int>(*locInOut, *keyOfsInOut);
*locInOut = next;
- bucket = getBucket(*locInOut);
+ bucket = getBucket(txn, *locInOut);
continue;
}
else {
@@ -919,12 +922,13 @@ namespace mongo {
}
else {
*locInOut = next;
- bucket = getBucket(*locInOut);
+ bucket = getBucket(txn, *locInOut);
continue;
}
}
- if (!customFind(l,
+ if (!customFind(txn,
+ l,
h,
keyBegin,
keyBeginLen,
@@ -939,12 +943,13 @@ namespace mongo {
return;
}
- bucket = getBucket(*locInOut);
+ bucket = getBucket(txn, *locInOut);
}
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::customFind(int low,
+ bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
+ int low,
int high,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -957,7 +962,7 @@ namespace mongo {
int* keyOfsInOut,
pair<DiskLoc, int>& bestParent) const {
- const BucketType* bucket = getBucket(*thisLocInOut);
+ const BucketType* bucket = getBucket(txn, *thisLocInOut);
for (;;) {
if (low + 1 == high) {
@@ -1068,10 +1073,10 @@ namespace mongo {
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(), key, &position, &found, minDiskLoc, 1);
+ DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
while (!bucket.isNull()) {
- FullKey fullKey = getFullKey(getBucket(bucket), position);
+ FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
if (fullKey.header.isUsed()) {
return fullKey.data.woEqual(key);
}
@@ -1100,10 +1105,10 @@ namespace mongo {
int position;
bool found;
- DiskLoc posLoc = _locate(txn, getRootLoc(), key, &position, &found, minDiskLoc, 1);
+ DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, minDiskLoc, 1);
while (!posLoc.isNull()) {
- FullKey fullKey = getFullKey(getBucket(posLoc), position);
+ FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
if (fullKey.header.isUsed()) {
// TODO: we may not need fullKey.data until we know fullKey.header.isUsed() here
// and elsewhere.
@@ -1251,11 +1256,11 @@ namespace mongo {
void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn,
BucketType* bucket,
const DiskLoc bucketLoc) {
- invariant(bucketLoc != getRootLoc());
+ invariant(bucketLoc != getRootLoc(txn));
_bucketDeletion->aboutToDeleteBucket(bucketLoc);
- BucketType* p = getBucket(bucket->parent);
+ BucketType* p = getBucket(txn, bucket->parent);
int parentIdx = indexInParent(txn, bucket, bucketLoc);
*txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
deallocBucket(txn, bucket, bucketLoc);
@@ -1289,7 +1294,7 @@ namespace mongo {
invariant(*keyOffsetInOut >= 0);
- BucketType* bucket = getBucket(*bucketLocInOut);
+ BucketType* bucket = getBucket(txn, *bucketLocInOut);
invariant(bucket);
invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
@@ -1401,7 +1406,7 @@ namespace mongo {
// advanceLoc must be a descentant of thisLoc, because thisLoc has a
// child in the proper direction and all descendants of thisLoc must be
// nonempty because they are not the root.
- BucketType* advanceBucket = getBucket(advanceLoc);
+ BucketType* advanceBucket = getBucket(txn, advanceLoc);
if (!childLocForPos(advanceBucket, advanceKeyOfs).isNull()
|| !childLocForPos(advanceBucket, advanceKeyOfs + 1).isNull()) {
@@ -1427,17 +1432,17 @@ namespace mongo {
invariant(bucket->n == 0 && !bucket->nextChild.isNull() );
if (bucket->parent.isNull()) {
- invariant(getRootLoc() == bucketLoc);
+ invariant(getRootLoc(txn) == bucketLoc);
_headManager->setHead(txn, bucket->nextChild);
}
else {
- BucketType* parentBucket = getBucket(bucket->parent);
+ BucketType* parentBucket = getBucket(txn, bucket->parent);
int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
*txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
bucket->nextChild;
}
- *txn->recoveryUnit()->writing(&getBucket(bucket->nextChild)->parent) = bucket->parent;
+ *txn->recoveryUnit()->writing(&getBucket(txn, bucket->nextChild)->parent) = bucket->parent;
_bucketDeletion->aboutToDeleteBucket(bucketLoc);
deallocBucket(txn, bucket, bucketLoc);
}
@@ -1458,8 +1463,8 @@ namespace mongo {
int pos = 0;
- BucketType* leftBucket = getBucket(leftNodeLoc);
- BucketType* rightBucket = getBucket(rightNodeLoc);
+ BucketType* leftBucket = getBucket(txn, leftNodeLoc);
+ BucketType* rightBucket = getBucket(txn, rightNodeLoc);
int sum = BucketType::HeaderSize
+ _packedDataSize(leftBucket, pos)
@@ -1481,8 +1486,8 @@ namespace mongo {
int split = -1;
int rightSize = 0;
- const BucketType* l = childForPos(bucket, leftIndex);
- const BucketType* r = childForPos(bucket, leftIndex + 1);
+ const BucketType* l = childForPos(txn, bucket, leftIndex);
+ const BucketType* r = childForPos(txn, bucket, leftIndex + 1);
int KNS = sizeof(KeyHeaderType);
int rightSizeLimit = ( l->topSize
@@ -1541,8 +1546,8 @@ namespace mongo {
DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
- BucketType* l = btreemod(txn, getBucket(leftNodeLoc));
- BucketType* r = btreemod(txn, getBucket(rightNodeLoc));
+ BucketType* l = btreemod(txn, getBucket(txn, leftNodeLoc));
+ BucketType* r = btreemod(txn, getBucket(txn, rightNodeLoc));
int pos = 0;
_packReadyForMod(l, pos);
@@ -1584,7 +1589,7 @@ namespace mongo {
BucketType* bucket,
const DiskLoc bucketLoc) const {
invariant(!bucket->parent.isNull());
- const BucketType* p = getBucket(bucket->parent);
+ const BucketType* p = getBucket(txn, bucket->parent);
if (p->nextChild == bucketLoc) {
return p->n;
}
@@ -1715,10 +1720,10 @@ namespace mongo {
DiskLoc rchild = childLocForPos(bucket, leftIndex + 1);
int zeropos = 0;
- BucketType* l = btreemod(txn, getBucket(lchild));
+ BucketType* l = btreemod(txn, getBucket(txn, lchild));
_packReadyForMod(l, zeropos);
- BucketType* r = btreemod(txn, getBucket(rchild));
+ BucketType* r = btreemod(txn, getBucket(txn, rchild));
_packReadyForMod(r, zeropos);
int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
@@ -1746,7 +1751,7 @@ namespace mongo {
return false;
}
- BucketType* p = getBucket(bucket->parent);
+ BucketType* p = getBucket(txn, bucket->parent);
int parentIdx = indexInParent(txn, bucket, bucketLoc);
// TODO will missing neighbor case be possible long term? Should we try to merge/balance
@@ -1765,7 +1770,7 @@ namespace mongo {
return true;
}
- BucketType* pm = btreemod(txn, getBucket(bucket->parent));
+ BucketType* pm = btreemod(txn, getBucket(txn, bucket->parent));
if (mayBalanceRight) {
doMergeChildren(txn, pm, bucket->parent, parentIdx);
return true;
@@ -1786,18 +1791,18 @@ namespace mongo {
bool found = false;
KeyDataOwnedType ownedKey(key);
- DiskLoc loc = _locate(txn, getRootLoc(), ownedKey, &pos, &found, recordLoc, 1);
+ DiskLoc loc = _locate(txn, getRootLoc(txn), ownedKey, &pos, &found, recordLoc, 1);
if (found) {
- BucketType* bucket = btreemod(txn, getBucket(loc));
+ BucketType* bucket = btreemod(txn, getBucket(txn, loc));
delKeyAtPos(txn, bucket, loc, pos);
- assertValid(_indexName, getRoot(), _ordering);
+ assertValid(_indexName, getRoot(txn), _ordering);
}
return found;
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::isEmpty() const {
- return getRoot()->n == 0;
+ bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
+ return getRoot(txn)->n == 0;
}
/**
@@ -1811,7 +1816,7 @@ namespace mongo {
int firstIndex,
int lastIndex) {
- invariant(getBucket(bucketLoc) == bucket);
+ invariant(getBucket(txn, bucketLoc) == bucket);
if (lastIndex == -1) {
lastIndex = bucket->n;
@@ -1820,7 +1825,7 @@ namespace mongo {
for (int i = firstIndex; i <= lastIndex; i++) {
const DiskLoc childLoc = childLocForPos(bucket, i);
if (!childLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(childLoc)->parent) = bucketLoc;
+ *txn->recoveryUnit()->writing(&getBucket(txn, childLoc)->parent) = bucketLoc;
}
}
}
@@ -1868,7 +1873,7 @@ namespace mongo {
const DiskLoc leftChildLoc,
const DiskLoc rightChildLoc) {
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) {
// If basicInsert() fails, the bucket will be packed as required by split().
@@ -1887,7 +1892,7 @@ namespace mongo {
invariant(kn->prevChildBucket == leftChildLoc);
*txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(rightChildLoc)->parent) = bucketLoc;
+ *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
}
}
else {
@@ -1900,7 +1905,7 @@ namespace mongo {
// Intent declared in basicInsert()
*const_cast<LocType*>(pc) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(rightChildLoc)->parent) = bucketLoc;
+ *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
}
}
}
@@ -1917,7 +1922,7 @@ namespace mongo {
int split = splitPos(bucket, keypos);
DiskLoc rLoc = _addBucket(txn);
- BucketType* r = btreemod(txn, getBucket(rLoc));
+ BucketType* r = btreemod(txn, getBucket(txn, rLoc));
for (int i = split + 1; i < bucket->n; i++) {
FullKey kn = getFullKey(bucket, i);
@@ -1927,7 +1932,7 @@ namespace mongo {
assertValid(_indexName, r, _ordering);
r = NULL;
- fixParentPtrs(txn, getBucket(rLoc), rLoc);
+ fixParentPtrs(txn, getBucket(txn, rLoc), rLoc);
FullKey splitkey = getFullKey(bucket, split);
// splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
@@ -1939,20 +1944,20 @@ namespace mongo {
if (bucket->parent.isNull()) {
// promote splitkey to a parent this->node make a new parent if we were the root
DiskLoc L = _addBucket(txn);
- BucketType* p = btreemod(txn, getBucket(L));
+ BucketType* p = btreemod(txn, getBucket(txn, L));
invariant(pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc));
p->nextChild = rLoc;
assertValid(_indexName, p, _ordering);
bucket->parent = L;
_headManager->setHead(txn, L);
- *txn->recoveryUnit()->writing(&getBucket(rLoc)->parent) = bucket->parent;
+ *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
}
else {
// set this before calling _insert - if it splits it will do fixParent() logic and
// change the value.
- *txn->recoveryUnit()->writing(&getBucket(rLoc)->parent) = bucket->parent;
+ *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
_insert(txn,
- getBucket(bucket->parent),
+ getBucket(txn, bucket->parent),
bucket->parent,
splitkey.data,
splitkey.recordLoc,
@@ -1987,7 +1992,7 @@ namespace mongo {
template <class BtreeLayout>
Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) {
- if (!_headManager->getHead().isNull()) {
+ if (!_headManager->getHead(txn).isNull()) {
return Status(ErrorCodes::InternalError, "index already initialized");
}
@@ -2003,7 +2008,7 @@ namespace mongo {
uassertStatusOK(loc.getStatus());
// this is a new bucket, not referenced by anyone, probably don't need this lock
- BucketType* b = btreemod(txn, getBucket(loc.getValue()));
+ BucketType* b = btreemod(txn, getBucket(txn, loc.getValue()));
init(b);
return loc.getValue();
}
@@ -2031,16 +2036,20 @@ namespace mongo {
}
template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(const DiskLoc& bucketLoc, const int keyOffset) const {
+ DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
return getKeyHeader(bucket, keyOffset).recordLoc;
}
template <class BtreeLayout>
- BSONObj BtreeLogic<BtreeLayout>::getKey(const DiskLoc& bucketLoc, const int keyOffset) const {
+ BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
int n = bucket->n;
invariant(n != BtreeLayout::INVALID_N_SENTINEL);
invariant(n >= 0);
@@ -2070,7 +2079,7 @@ namespace mongo {
bool strict,
bool dumpBuckets,
unsigned depth) {
- return _fullValidate(txn, getRootLoc(), unusedCount, strict, dumpBuckets, depth);
+ return _fullValidate(txn, getRootLoc(txn), unusedCount, strict, dumpBuckets, depth);
}
template <class BtreeLayout>
@@ -2080,7 +2089,7 @@ namespace mongo {
bool strict,
bool dumpBuckets,
unsigned depth) {
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
assertValid(_indexName, bucket, _ordering, true);
if (dumpBuckets) {
@@ -2102,7 +2111,7 @@ namespace mongo {
if (!kn.prevChildBucket.isNull()) {
DiskLoc left = kn.prevChildBucket;
- BucketType* b = getBucket(left);
+ BucketType* b = getBucket(txn, left);
if (strict) {
invariant(b->parent == bucketLoc);
@@ -2116,7 +2125,7 @@ namespace mongo {
}
if (!bucket->nextChild.isNull()) {
- BucketType* b = getBucket(bucket->nextChild);
+ BucketType* b = getBucket(txn, bucket->nextChild);
if (strict) {
invariant(b->parent == bucketLoc);
}
@@ -2213,15 +2222,15 @@ namespace mongo {
}
Status status = _insert(txn,
- getRoot(),
- getRootLoc(),
+ getRoot(txn),
+ getRootLoc(txn),
key,
value,
dupsAllowed,
DiskLoc(),
DiskLoc());
- assertValid(_indexName, getRoot(), _ordering);
+ assertValid(_indexName, getRoot(txn), _ordering);
return status;
}
@@ -2266,7 +2275,7 @@ namespace mongo {
}
else {
return _insert(txn,
- getBucket(childLoc),
+ getBucket(txn, childLoc),
childLoc,
key,
recordLoc,
@@ -2281,7 +2290,7 @@ namespace mongo {
const DiskLoc& bucketLoc,
int* posInOut,
int direction) const {
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
if (*posInOut < 0 || *posInOut >= bucket->n ) {
log() << "ASSERT failure advancing btree bucket" << endl;
@@ -2297,7 +2306,7 @@ namespace mongo {
// Look down if we need to.
DiskLoc nextDownLoc = childLocForPos(bucket, ko + adj);
- BucketType* nextDown = getBucket(nextDownLoc);
+ BucketType* nextDown = getBucket(txn, nextDownLoc);
if (NULL != nextDown) {
for (;;) {
if (direction > 0) {
@@ -2307,7 +2316,7 @@ namespace mongo {
*posInOut = nextDown->n - 1;
}
DiskLoc newNextDownLoc = childLocForPos(nextDown, *posInOut + adj);
- BucketType* newNextDownBucket = getBucket(newNextDownLoc);
+ BucketType* newNextDownBucket = getBucket(txn, newNextDownLoc);
if (NULL == newNextDownBucket) {
break;
}
@@ -2325,12 +2334,12 @@ namespace mongo {
// Hit the end of the bucket, move up and over.
DiskLoc childLoc = bucketLoc;
- DiskLoc ancestor = getBucket(bucketLoc)->parent;
+ DiskLoc ancestor = getBucket(txn, bucketLoc)->parent;
for (;;) {
if (ancestor.isNull()) {
break;
}
- BucketType* an = getBucket(ancestor);
+ BucketType* an = getBucket(txn, ancestor);
for (int i = 0; i < an->n; i++) {
if (childLocForPos(an, i + adj) == childLoc) {
*posInOut = i;
@@ -2347,8 +2356,10 @@ namespace mongo {
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::keyIsUsed(const DiskLoc& loc, const int& pos) const {
- return getKeyHeader(getBucket(loc), pos).isUsed();
+ bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* txn,
+ const DiskLoc& loc,
+ const int& pos) const {
+ return getKeyHeader(getBucket(txn, loc), pos).isUsed();
}
template <class BtreeLayout>
@@ -2365,7 +2376,7 @@ namespace mongo {
bool found = false;
KeyDataOwnedType owned(key);
- *bucketLocOut = _locate(txn, getRootLoc(), owned, posOut, &found, recordLoc, direction);
+ *bucketLocOut = _locate(txn, getRootLoc(txn), owned, posOut, &found, recordLoc, direction);
if (!found) {
return false;
@@ -2389,7 +2400,7 @@ namespace mongo {
const DiskLoc& recordLoc,
const int direction) const {
int position;
- BucketType* bucket = getBucket(bucketLoc);
+ BucketType* bucket = getBucket(txn, bucketLoc);
// XXX: owned to not owned conversion(?)
_find(txn, bucket, key, recordLoc, false, &position, foundOut);
@@ -2442,12 +2453,12 @@ namespace mongo {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::getBucket(const DiskLoc dl) const {
+ BtreeLogic<BtreeLayout>::getBucket(OperationContext* txn, const DiskLoc dl) const {
if (dl.isNull()) {
return NULL;
}
- RecordData recordData = _recordStore->dataFor(dl);
+ RecordData recordData = _recordStore->dataFor(txn, dl);
// we need to be working on the raw bytes, not a transient copy
invariant(!recordData.isOwned());
@@ -2457,21 +2468,21 @@ namespace mongo {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::getRoot() const {
- return getBucket(_headManager->getHead());
+ BtreeLogic<BtreeLayout>::getRoot(OperationContext* txn) const {
+ return getBucket(txn, _headManager->getHead(txn));
}
template <class BtreeLayout>
DiskLoc
- BtreeLogic<BtreeLayout>::getRootLoc() const {
- return _headManager->getHead();
+ BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* txn) const {
+ return _headManager->getHead(txn);
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::childForPos(BucketType* bucket, int pos) const {
+ BtreeLogic<BtreeLayout>::childForPos(OperationContext* txn, BucketType* bucket, int pos) const {
DiskLoc loc = childLocForPos(bucket, pos);
- return getBucket(loc);
+ return getBucket(txn, loc);
}
template <class BtreeLayout>
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
index 6b086567af2..9128fdedd05 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
@@ -166,7 +166,7 @@ namespace mongo {
const BSONObj& key,
const DiskLoc& recordLoc);
- bool isEmpty() const;
+ bool isEmpty(OperationContext* txn) const;
long long fullValidate(OperationContext*,
long long *unusedCount,
@@ -174,11 +174,15 @@ namespace mongo {
bool dumpBuckets,
unsigned depth);
- DiskLoc getDiskLoc(const DiskLoc& bucketLoc, const int keyOffset) const;
+ DiskLoc getDiskLoc(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const;
- BSONObj getKey(const DiskLoc& bucketLoc, const int keyOffset) const;
+ BSONObj getKey(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const;
- DiskLoc getHead() const { return _headManager->getHead(); }
+ DiskLoc getHead(OperationContext* txn) const { return _headManager->getHead(txn); }
Status touch(OperationContext* txn) const;
@@ -355,7 +359,8 @@ namespace mongo {
int* keyPositionOut,
bool* foundOut) const;
- bool customFind(int low,
+ bool customFind(OperationContext* txn,
+ int low,
int high,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -382,7 +387,7 @@ namespace mongo {
const KeyDataType& key,
const DiskLoc self) const;
- bool keyIsUsed(const DiskLoc& loc, const int& pos) const;
+ bool keyIsUsed(OperationContext* txn, const DiskLoc& loc, const int& pos) const;
void skipUnusedKeys(OperationContext* txn,
DiskLoc* loc,
@@ -561,13 +566,13 @@ namespace mongo {
const DiskLoc prevChild);
- BucketType* childForPos(BucketType* bucket, int pos) const;
+ BucketType* childForPos(OperationContext* txn, BucketType* bucket, int pos) const;
- BucketType* getBucket(const DiskLoc dl) const;
+ BucketType* getBucket(OperationContext* txn, const DiskLoc dl) const;
- BucketType* getRoot() const;
+ BucketType* getRoot(OperationContext* txn) const;
- DiskLoc getRootLoc() const;
+ DiskLoc getRootLoc(OperationContext* txn) const;
//
// Data
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 88fb0653c42..87a7f03feb2 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -106,15 +106,16 @@ namespace mongo {
verify(!diskLoc.isNull());
- return _helper.btree.getBucket(diskLoc);
+ return _helper.btree.getBucket(NULL, diskLoc);
}
BucketType* head() const {
- return _helper.btree.getBucket(_helper.headManager.getHead());
+ OperationContextNoop txn;
+ return _helper.btree.getBucket(&txn, _helper.headManager.getHead(&txn));
}
void forcePackBucket(const DiskLoc bucketLoc) {
- BucketType* bucket = _helper.btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
bucket->topSize += bucket->emptySize;
bucket->emptySize = 0;
@@ -130,18 +131,18 @@ namespace mongo {
}
int bucketRebalancedSeparatorPos(const DiskLoc bucketLoc, int leftIndex) {
- BucketType* bucket = _helper.btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
OperationContextNoop txn;
return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
}
FullKey getKey(const DiskLoc bucketLoc, int pos) const {
- const BucketType* bucket = _helper.btree.getBucket(bucketLoc);
+ const BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
return BtreeLogic<BtreeLayoutType>::getFullKey(bucket, pos);
}
void markKeyUnused(const DiskLoc bucketLoc, int keyPos) {
- BucketType* bucket = _helper.btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
invariant(keyPos >= 0 && keyPos < bucket->n);
_helper.btree.getKeyHeader(bucket, keyPos).setUnused();
@@ -158,7 +159,7 @@ namespace mongo {
void setBucketNextChild(const DiskLoc bucketLoc, const DiskLoc nextChild) {
OperationContextNoop txn;
- BucketType* bucket = _helper.btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(&txn, bucketLoc);
bucket->nextChild = nextChild;
_helper.btree.fixParentPtrs(&txn, bucket, bucketLoc);
@@ -194,7 +195,7 @@ namespace mongo {
this->insert(key, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(1);
- this->locate(key, 0, true, this->_helper.headManager.getHead(), 1);
+ this->locate(key, 0, true, this->_helper.headManager.getHead(&txn), 1);
this->unindex(key);
@@ -279,9 +280,9 @@ namespace mongo {
this->insert(k, this->_helper.dummyDiskLoc);
}
- locateExtended(1, 'a', 'b', this->_helper.headManager.getHead());
- locateExtended(1, 'c', 'd', this->_helper.headManager.getHead());
- locateExtended(1, 'e', 'f', this->_helper.headManager.getHead());
+ locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&txn));
locateExtended(1, 'g', 'g' + 1, DiskLoc()); // of course, 'h' isn't in the index.
// old behavior
@@ -291,9 +292,9 @@ namespace mongo {
// locateExtended( -1, 'g', 'f', dl() );
locateExtended(-1, 'a', 'a' - 1, DiskLoc()); // of course, 'a' - 1 isn't in the index
- locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead());
- locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead());
- locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead());
+ locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&txn));
}
private:
@@ -332,7 +333,7 @@ namespace mongo {
// 'E' is the split point and should be in the head the rest should be ~50/50
const BSONObj splitPoint = simpleKey('E', 800);
this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(), loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc);
ASSERT_EQUALS(0, pos);
// Find the one before 'E'
@@ -382,7 +383,7 @@ namespace mongo {
// 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
const BSONObj splitPoint = simpleKey('H', 800);
this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(), loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc);
ASSERT_EQUALS(0, pos);
// Find the one before 'H'
@@ -438,10 +439,10 @@ namespace mongo {
}
// numRecords() - 1, because this->_helper.dummyDiskLoc is actually in the record store too
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords() - 1);
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
long long expectedCount = 10 - unindexKeys();
- ASSERT_EQUALS(1, this->_helper.recordStore.numRecords() - 1);
+ ASSERT_EQUALS(1, this->_helper.recordStore.numRecords(NULL) - 1);
long long unusedCount = 0;
ASSERT_EQUALS(expectedCount, this->_helper.btree.fullValidate(&txn, &unusedCount, true, true, 0));
@@ -492,12 +493,12 @@ namespace mongo {
this->insert(k, this->_helper.dummyDiskLoc);
}
- // numRecords() - 1, because fixedDiskLoc is actually in the record store too
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords() - 1);
+ // numRecords(NULL) - 1, because fixedDiskLoc is actually in the record store too
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL) - 1);
const BSONObj k = simpleKey('a' + 17, 800);
this->unindex(k);
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords() - 1);
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
long long unusedCount = 0;
ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, true, 0));
@@ -516,7 +517,7 @@ namespace mongo {
ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
@@ -524,7 +525,7 @@ namespace mongo {
ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}");
}
@@ -541,7 +542,7 @@ namespace mongo {
ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
@@ -549,7 +550,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}");
}
@@ -570,7 +571,7 @@ namespace mongo {
ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
@@ -578,7 +579,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{d:{b:{a:null}}}");
}
@@ -596,7 +597,7 @@ namespace mongo {
ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
@@ -605,7 +606,7 @@ namespace mongo {
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
}
@@ -623,7 +624,7 @@ namespace mongo {
ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "ff");
verify(this->unindex(k));
@@ -632,7 +633,7 @@ namespace mongo {
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
}
@@ -652,7 +653,7 @@ namespace mongo {
ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
@@ -660,7 +661,7 @@ namespace mongo {
ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{d:{b:{a:null},cc:{c:null}},"
"dd:null,"
@@ -680,7 +681,7 @@ namespace mongo {
ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "g");
verify(this->unindex(k));
@@ -688,7 +689,7 @@ namespace mongo {
ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{c:{b:{a:null}},d:null,_:{f:{e:null}}}");
}
@@ -706,7 +707,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
@@ -714,7 +715,7 @@ namespace mongo {
ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}");
}
@@ -732,7 +733,7 @@ namespace mongo {
ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
@@ -740,7 +741,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}");
}
@@ -758,7 +759,7 @@ namespace mongo {
ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
@@ -766,7 +767,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}");
}
@@ -784,7 +785,7 @@ namespace mongo {
ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
@@ -792,7 +793,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
// Height is not currently reduced in this case
builder.checkStructure("{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}");
@@ -811,7 +812,7 @@ namespace mongo {
ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
@@ -819,7 +820,7 @@ namespace mongo {
ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}");
}
@@ -837,7 +838,7 @@ namespace mongo {
ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
@@ -845,7 +846,7 @@ namespace mongo {
ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
// no recursion currently in this case
builder.checkStructure("{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}");
@@ -868,11 +869,11 @@ namespace mongo {
const BSONObj& topKey = biggestKey('m');
DiskLoc leftChild = this->newBucket();
- builder.push(this->_helper.headManager.getHead(), topKey, leftChild);
+ builder.push(this->_helper.headManager.getHead(&txn), topKey, leftChild);
_count++;
DiskLoc rightChild = this->newBucket();
- this->setBucketNextChild(this->_helper.headManager.getHead(), rightChild);
+ this->setBucketNextChild(this->_helper.headManager.getHead(&txn), rightChild);
_count += builder.fillBucketToExactSize(leftChild, leftSize(), 'a');
_count += builder.fillBucketToExactSize(rightChild, rightSize(), 'n');
@@ -904,7 +905,7 @@ namespace mongo {
ASSERT_EQUALS(0, unused);
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = bigKey(*i);
this->unindex(k);
@@ -920,11 +921,11 @@ namespace mongo {
if (!merge()) {
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
}
else {
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
}
}
@@ -1094,11 +1095,14 @@ namespace mongo {
virtual bool merge() const { return false; }
virtual void initCheck() {
- _oldTop = this->getKey(this->_helper.headManager.getHead(), 0).data.toBson();
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
}
virtual void validate() {
- ASSERT_NOT_EQUALS(_oldTop, this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ OperationContextNoop txn;
+ ASSERT_NOT_EQUALS(_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
private:
@@ -1117,11 +1121,13 @@ namespace mongo {
virtual bool merge() const { return false; }
virtual void initCheck() {
- _oldTop = this->getKey(this->_helper.headManager.getHead(), 0).data.toBson();
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
}
- virtual void validate() {
- ASSERT_TRUE(_oldTop != this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ virtual void validate() {
+ OperationContextNoop txn;
+ ASSERT_TRUE(_oldTop != this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
private:
@@ -1142,7 +1148,7 @@ namespace mongo {
ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
@@ -1150,7 +1156,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
"b:{$10:null,$20:null,$30:null,$50:null,a:null},"
@@ -1172,7 +1178,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
@@ -1180,7 +1186,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$20:{$1:null,$2:null,$4:null,$10:null},"
"b:{$30:null,$40:null,$50:null,$60:null,$70:null},"
@@ -1203,7 +1209,7 @@ namespace mongo {
ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(15, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x30, 800));
ASSERT(this->unindex(k));
@@ -1211,7 +1217,7 @@ namespace mongo {
ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(15, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$9:{$1:{$0:null},$3:{$2:null},"
"$5:{$4:null},$7:{$6:null},_:{$8:null}},"
@@ -1237,7 +1243,7 @@ namespace mongo {
ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(16, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x5, 800));
ASSERT(this->unindex(k));
@@ -1245,7 +1251,7 @@ namespace mongo {
ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(16, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},"
"$30:{$25:null},$40:{$35:null},_:{$45:null}},"
@@ -1268,7 +1274,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
@@ -1276,7 +1282,7 @@ namespace mongo {
ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
"_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
@@ -1295,7 +1301,7 @@ namespace mongo {
const BSONObj k = BSON("" << "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead());
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1324,7 +1330,7 @@ namespace mongo {
const BSONObj k = BSON("" << "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead());
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1348,10 +1354,10 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
// force parent pack
- this->forcePackBucket(this->_helper.headManager.getHead());
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
@@ -1359,7 +1365,7 @@ namespace mongo {
ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
"_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
@@ -1382,7 +1388,7 @@ namespace mongo {
ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
@@ -1390,7 +1396,7 @@ namespace mongo {
ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$500:{ $30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},"
"$100:{$40:null,$50:null,$60:null,$70:null,$80:null},"
@@ -1411,7 +1417,7 @@ namespace mongo {
ASSERT_EQUALS(expectedSeparator(),
this->bucketRebalancedSeparatorPos(
- this->_helper.headManager.getHead(), 0));
+ this->_helper.headManager.getHead(&txn), 0));
}
virtual string treeSpec() const = 0;
@@ -1497,11 +1503,13 @@ namespace mongo {
virtual int rightSize() const { return MergeSizeJustRightRight<OnDiskFormat>::rightSize() + 1; }
virtual void initCheck() {
- _oldTop = this->getKey(this->_helper.headManager.getHead(), 0).data.toBson();
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
}
virtual void validate() {
- ASSERT_EQUALS(_oldTop, this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ OperationContextNoop txn;
+ ASSERT_EQUALS(_oldTop, this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
virtual bool merge() const { return false; }
@@ -1516,9 +1524,10 @@ namespace mongo {
virtual int leftSize() const { return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1; }
virtual void validate() {
+ OperationContextNoop txn;
// Different top means we rebalanced
- ASSERT_NOT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ ASSERT_NOT_EQUALS(this->_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
};
@@ -1526,12 +1535,14 @@ namespace mongo {
class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
virtual int leftSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1; }
virtual void initCheck() {
- this->_oldTop = this->getKey(this->_helper.headManager.getHead(), 0).data.toBson();
+ OperationContextNoop txn;
+ this->_oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
}
virtual void validate() {
+ OperationContextNoop txn;
ASSERT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
virtual bool merge() const { return false; }
@@ -1545,9 +1556,10 @@ namespace mongo {
virtual int rightSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1; }
virtual void validate() {
+ OperationContextNoop txn;
// Different top means we rebalanced
ASSERT_NOT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
};
@@ -1565,7 +1577,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
@@ -1573,7 +1585,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$5:{$1:null,$2:null,$3:null,$4:null},"
"$20:{$6:null,$10:null,$11:null,$13:null,$14:null},"
@@ -1595,7 +1607,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
@@ -1603,7 +1615,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{$10:{$1:null},"
"$31:{$11:null,$13:null,$14:null,$20:null},"
@@ -1625,7 +1637,7 @@ namespace mongo {
ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << bigNumString(0x7, 800));
ASSERT(this->unindex(k));
@@ -1633,7 +1645,7 @@ namespace mongo {
ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure(
"{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},"
@@ -1653,7 +1665,7 @@ namespace mongo {
ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "a");
ASSERT(this->unindex(k));
@@ -1661,7 +1673,7 @@ namespace mongo {
ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{b:null}");
}
@@ -1679,7 +1691,7 @@ namespace mongo {
ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
const BSONObj k = BSON("" << "b");
ASSERT(this->unindex(k));
@@ -1687,7 +1699,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
builder.checkStructure("{a:null,c:null,d:null}");
}
@@ -1706,7 +1718,7 @@ namespace mongo {
ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "c");
@@ -1715,7 +1727,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure("{a:null,b:null,d:null}");
@@ -1732,14 +1744,14 @@ namespace mongo {
builder.makeTree("{a:null,c:{b:null},d:null}");
const DiskLoc prevChildBucket =
- this->getKey(this->_helper.headManager.getHead(), 1).prevChildBucket;
+ this->getKey(this->_helper.headManager.getHead(&txn), 1).prevChildBucket;
this->markKeyUnused(prevChildBucket, 0);
long long unused = 0;
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(1, unused);
const BSONObj k = BSON("" << "c");
@@ -1749,7 +1761,7 @@ namespace mongo {
ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(1, unused);
// doesn't discriminate between used and unused
@@ -1770,7 +1782,7 @@ namespace mongo {
ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "a");
@@ -1780,7 +1792,7 @@ namespace mongo {
ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure("{b:null}");
@@ -1800,7 +1812,7 @@ namespace mongo {
ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "y");
@@ -1810,7 +1822,7 @@ namespace mongo {
ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure("{a:null,e:{c:{b:null},d:null},z:null}");
@@ -1830,7 +1842,7 @@ namespace mongo {
ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "a");
@@ -1840,7 +1852,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure("{c:null,_:{e:null,f:null}}");
@@ -1860,7 +1872,7 @@ namespace mongo {
ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "d");
@@ -1869,13 +1881,13 @@ namespace mongo {
ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(1, unused);
builder.checkStructure("{a:null,d:{c:{b:null}},e:null}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(), 1).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 1).recordLoc.getOfs() & 1);
}
};
@@ -1892,7 +1904,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << "a");
@@ -1901,13 +1913,13 @@ namespace mongo {
ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(1, unused);
builder.checkStructure("{a:null,_:{c:null,_:{d:null}}}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(), 0).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 0).recordLoc.getOfs() & 1);
}
};
@@ -1925,7 +1937,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
@@ -1934,7 +1946,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure("{$60:{$10:null,$20:null,"
@@ -1957,7 +1969,7 @@ namespace mongo {
ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
@@ -1966,7 +1978,7 @@ namespace mongo {
ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
ASSERT_EQUALS(0, unused);
builder.checkStructure(
@@ -1990,7 +2002,7 @@ namespace mongo {
this->insert(key3, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(3);
- this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(), 1);
+ this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&txn), 1);
}
};
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
index fd2c89c3704..47e610d8ab1 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
@@ -78,7 +78,7 @@ namespace mongo {
recordStore.insertRecord(&txn, randomData.c_str(), randomData.length(), false);
ASSERT_TRUE(s.isOK());
- ASSERT_EQUALS(1, recordStore.numRecords());
+ ASSERT_EQUALS(1, recordStore.numRecords(NULL));
dummyDiskLoc = s.getValue();
}
@@ -96,7 +96,7 @@ namespace mongo {
template <class OnDiskFormat>
DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj &spec) {
DiskLoc bucketLoc = _helper->btree._addBucket(_txn);
- BucketType* bucket = _helper->btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
BSONObjIterator i(spec);
while (i.more()) {
@@ -121,14 +121,14 @@ namespace mongo {
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const string &spec) const {
- checkStructure(fromjson(spec), _helper->headManager.getHead());
+ checkStructure(fromjson(spec), _helper->headManager.getHead(_txn));
}
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::push(
const DiskLoc bucketLoc, const BSONObj& key, const DiskLoc child) {
KeyDataOwnedType k(key);
- BucketType* bucket = _helper->btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, k, child));
_helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
@@ -137,7 +137,7 @@ namespace mongo {
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(
const BSONObj &spec, const DiskLoc node) const {
- BucketType* bucket = _helper->btree.getBucket(node);
+ BucketType* bucket = _helper->btree.getBucket(_txn, node);
BSONObjIterator j(spec);
for (int i = 0; i < bucket->n; ++i) {
@@ -201,7 +201,7 @@ namespace mongo {
const DiskLoc bucketLoc, int targetSize, char startKey) {
ASSERT_FALSE(bucketLoc.isNull());
- BucketType* bucket = _helper->btree.getBucket(bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
ASSERT_EQUALS(0, bucket->n);
static const int bigSize = KeyDataOwnedType(simpleKey('a', 801)).dataSize();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
index b460b108d1b..bb488a54111 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
@@ -54,7 +54,7 @@ namespace mongo {
*/
class TestHeadManager : public HeadManager {
public:
- virtual const DiskLoc getHead() const {
+ virtual const DiskLoc getHead( OperationContext* txn ) const {
return _head;
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index c84bb6a3cdf..45cfe0d5d4b 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -225,12 +225,13 @@ namespace mongo {
/* ------------------------------------------------------------------------- */
- int NamespaceDetails::_catalogFindIndexByName(const Collection* coll,
+ int NamespaceDetails::_catalogFindIndexByName(OperationContext* txn,
+ const Collection* coll,
const StringData& name,
bool includeBackgroundInProgress) const {
IndexIterator i = ii(includeBackgroundInProgress);
while( i.more() ) {
- const BSONObj obj = coll->docFor(i.next().info);
+ const BSONObj obj = coll->docFor(txn, i.next().info);
if ( name == obj.getStringField("name") )
return i.pos()-1;
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index cdda3b89d11..48ad001b90b 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -201,7 +201,8 @@ namespace mongo {
*
* @return > 0 if index name was found, -1 otherwise.
*/
- int _catalogFindIndexByName(const Collection* coll,
+ int _catalogFindIndexByName(OperationContext* txn,
+ const Collection* coll,
const StringData& name,
bool includeBackgroundInProgress) const;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index 27957a297a5..b6425151e12 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -51,11 +51,11 @@ namespace mongo {
return _db->getCollectionOptions( txn, ns().ns() );
}
- int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount() const {
+ int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount( OperationContext* txn ) const {
return _details->nIndexes + _details->indexBuildsInProgress;
}
- int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount() const {
+ int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount( OperationContext* txn ) const {
return _details->nIndexes;
}
@@ -63,17 +63,19 @@ namespace mongo {
return NamespaceDetails::NIndexesMax;
}
- void NamespaceDetailsCollectionCatalogEntry::getAllIndexes( std::vector<std::string>* names ) const {
+ void NamespaceDetailsCollectionCatalogEntry::getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const {
NamespaceDetails::IndexIterator i = _details->ii( true );
while ( i.more() ) {
const IndexDetails& id = i.next();
- const BSONObj obj = _indexRecordStore->dataFor( id.info ).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor( txn, id.info ).toBson();
names->push_back( obj.getStringField("name") );
}
}
- bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(const StringData& idxName) const {
- int idxNo = _findIndexNumber( idxName );
+ bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+ const StringData& idxName) const {
+ int idxNo = _findIndexNumber( txn, idxName );
invariant( idxNo >= 0 );
return isIndexMultikey( idxNo );
}
@@ -86,7 +88,7 @@ namespace mongo {
const StringData& indexName,
bool multikey ) {
- int idxNo = _findIndexNumber( indexName );
+ int idxNo = _findIndexNumber( txn, indexName );
invariant( idxNo >= 0 );
return setIndexIsMultikey( txn, idxNo, multikey );
}
@@ -118,39 +120,43 @@ namespace mongo {
return true;
}
- DiskLoc NamespaceDetailsCollectionCatalogEntry::getIndexHead( const StringData& idxName ) const {
- int idxNo = _findIndexNumber( idxName );
+ DiskLoc NamespaceDetailsCollectionCatalogEntry::getIndexHead( OperationContext* txn,
+ const StringData& idxName ) const {
+ int idxNo = _findIndexNumber( txn, idxName );
invariant( idxNo >= 0 );
return _details->idx( idxNo ).head;
}
- BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec( const StringData& idxName ) const {
- int idxNo = _findIndexNumber( idxName );
+ BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const {
+ int idxNo = _findIndexNumber( txn, idxName );
invariant( idxNo >= 0 );
const IndexDetails& id = _details->idx( idxNo );
- return _indexRecordStore->dataFor( id.info ).toBson();
+ return _indexRecordStore->dataFor( txn, id.info ).toBson();
}
void NamespaceDetailsCollectionCatalogEntry::setIndexHead( OperationContext* txn,
const StringData& idxName,
const DiskLoc& newHead ) {
- int idxNo = _findIndexNumber( idxName );
+ int idxNo = _findIndexNumber( txn, idxName );
invariant( idxNo >= 0 );
*txn->recoveryUnit()->writing( &_details->idx( idxNo ).head) = newHead;
}
- bool NamespaceDetailsCollectionCatalogEntry::isIndexReady( const StringData& idxName ) const {
- int idxNo = _findIndexNumber( idxName );
+ bool NamespaceDetailsCollectionCatalogEntry::isIndexReady( OperationContext* txn,
+ const StringData& idxName ) const {
+ int idxNo = _findIndexNumber( txn, idxName );
invariant( idxNo >= 0 );
- return idxNo < getCompletedIndexCount();
+ return idxNo < getCompletedIndexCount( txn );
}
- int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber( const StringData& idxName ) const {
+ int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber( OperationContext* txn,
+ const StringData& idxName ) const {
NamespaceDetails::IndexIterator i = _details->ii( true );
while ( i.more() ) {
const IndexDetails& id = i.next();
int idxNo = i.pos() - 1;
- const BSONObj obj = _indexRecordStore->dataFor( id.info ).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor( txn, id.info ).toBson();
if ( idxName == obj.getStringField("name") )
return idxNo;
}
@@ -183,14 +189,14 @@ namespace mongo {
Status NamespaceDetailsCollectionCatalogEntry::removeIndex( OperationContext* txn,
const StringData& indexName ) {
- int idxNo = _findIndexNumber( indexName );
+ int idxNo = _findIndexNumber( txn, indexName );
if ( idxNo < 0 )
return Status( ErrorCodes::NamespaceNotFound, "index not found to remove" );
DiskLoc infoLocation = _details->idx( idxNo ).info;
{ // sanity check
- BSONObj info = _indexRecordStore->dataFor( infoLocation ).toBson();
+ BSONObj info = _indexRecordStore->dataFor( txn, infoLocation ).toBson();
invariant( info["name"].String() == indexName );
}
@@ -213,10 +219,10 @@ namespace mongo {
else
d->nIndexes--;
- for ( int i = idxNo; i < getTotalIndexCount(); i++ )
+ for ( int i = idxNo; i < getTotalIndexCount( txn ); i++ )
d->idx(i) = d->idx(i+1);
- d->idx( getTotalIndexCount() ) = IndexDetails();
+ d->idx( getTotalIndexCount( txn ) ) = IndexDetails();
}
// remove from system.indexes
@@ -239,14 +245,14 @@ namespace mongo {
// 2) NamespaceDetails mods
IndexDetails *id;
try {
- id = &_details->idx(getTotalIndexCount(), true);
+ id = &_details->idx(getTotalIndexCount( txn ), true);
}
catch( DBException& ) {
_details->allocExtra(txn,
ns().ns(),
_db->_namespaceIndex,
- getTotalIndexCount());
- id = &_details->idx(getTotalIndexCount(), false);
+ getTotalIndexCount( txn ));
+ id = &_details->idx(getTotalIndexCount( txn ), false);
}
*txn->recoveryUnit()->writing( &id->info ) = systemIndexesEntry.getValue();
@@ -267,12 +273,12 @@ namespace mongo {
void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess( OperationContext* txn,
const StringData& indexName ) {
- int idxNo = _findIndexNumber( indexName );
+ int idxNo = _findIndexNumber( txn, indexName );
fassert( 17202, idxNo >= 0 );
// Make sure the newly created index is relocated to nIndexes, if it isn't already there
- if ( idxNo != getCompletedIndexCount() ) {
- int toIdxNo = getCompletedIndexCount();
+ if ( idxNo != getCompletedIndexCount( txn ) ) {
+ int toIdxNo = getCompletedIndexCount( txn );
//_details->swapIndex( txn, idxNo, toIdxNo );
@@ -287,24 +293,24 @@ namespace mongo {
setIndexIsMultikey( txn, toIdxNo, tempMultikey );
idxNo = toIdxNo;
- invariant( idxNo = _findIndexNumber( indexName ) );
+ invariant( idxNo = _findIndexNumber( txn, indexName ) );
}
txn->recoveryUnit()->writingInt( _details->indexBuildsInProgress ) -= 1;
txn->recoveryUnit()->writingInt( _details->nIndexes ) += 1;
- invariant( isIndexReady( indexName ) );
+ invariant( isIndexReady( txn, indexName ) );
}
void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting( OperationContext* txn,
const StringData& idxName,
long long newExpireSeconds ) {
- int idx = _findIndexNumber( idxName );
+ int idx = _findIndexNumber( txn, idxName );
invariant( idx >= 0 );
IndexDetails& indexDetails = _details->idx( idx );
- BSONObj obj = _indexRecordStore->dataFor( indexDetails.info ).toBson();
+ BSONObj obj = _indexRecordStore->dataFor( txn, indexDetails.info ).toBson();
const BSONElement oldExpireSecs = obj.getField("expireAfterSeconds");
// Important that we set the new value in-place. We are writing directly to the
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
index 78a5b96f181..111fb25c711 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
@@ -54,17 +54,20 @@ namespace mongo {
virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
- virtual int getTotalIndexCount() const;
+ virtual int getTotalIndexCount(OperationContext* txn) const;
- virtual int getCompletedIndexCount() const;
+ virtual int getCompletedIndexCount(OperationContext* txn) const;
virtual int getMaxAllowedIndexes() const;
- virtual void getAllIndexes( std::vector<std::string>* names ) const;
+ virtual void getAllIndexes( OperationContext* txn,
+ std::vector<std::string>* names ) const;
- virtual BSONObj getIndexSpec( const StringData& idxName ) const;
+ virtual BSONObj getIndexSpec( OperationContext* txn,
+ const StringData& idxName ) const;
- virtual bool isIndexMultikey(const StringData& indexName) const;
+ virtual bool isIndexMultikey(OperationContext* txn,
+ const StringData& indexName) const;
virtual bool isIndexMultikey(int idxNo) const;
virtual bool setIndexIsMultikey(OperationContext* txn,
@@ -74,13 +77,15 @@ namespace mongo {
const StringData& indexName,
bool multikey = true);
- virtual DiskLoc getIndexHead( const StringData& indexName ) const;
+ virtual DiskLoc getIndexHead( OperationContext* txn,
+ const StringData& indexName ) const;
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
const DiskLoc& newHead );
- virtual bool isIndexReady( const StringData& indexName ) const;
+ virtual bool isIndexReady( OperationContext* txn,
+ const StringData& indexName ) const;
virtual Status removeIndex( OperationContext* txn,
const StringData& indexName );
@@ -97,7 +102,7 @@ namespace mongo {
// not part of interface, but available to my storage engine
- int _findIndexNumber( const StringData& indexName) const;
+ int _findIndexNumber( OperationContext* txn, const StringData& indexName) const;
private:
NamespaceDetails* _details;
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
index 28bad934aa1..03bf97f98e7 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
@@ -36,7 +36,7 @@
namespace mongo {
- RecordData HeapRecordStoreBtree::dataFor(const DiskLoc& loc) const {
+ RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const DiskLoc& loc) const {
Records::const_iterator it = _records.find(loc);
invariant(it != _records.end());
const Record& rec = it->second;
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index 64d7f5f8ea9..893ff42a494 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -48,7 +48,7 @@ namespace mongo {
// DiskLoc(0,0) isn't valid for records.
explicit HeapRecordStoreBtree(const StringData& ns): RecordStore(ns), _nextId(1) { }
- virtual RecordData dataFor(const DiskLoc& loc) const;
+ virtual RecordData dataFor(OperationContext* txn, const DiskLoc& loc) const;
virtual void deleteRecord(OperationContext* txn, const DiskLoc& dl);
@@ -61,7 +61,7 @@ namespace mongo {
const DocWriter* doc,
bool enforceQuota);
- virtual long long numRecords() const { return _records.size(); }
+ virtual long long numRecords( OperationContext* txn ) const { return _records.size(); }
virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
@@ -149,7 +149,7 @@ namespace mongo {
invariant(false);
}
- virtual long long dataSize() const { invariant(false); }
+ virtual long long dataSize(OperationContext* txn) const { invariant(false); }
virtual Record* recordFor(const DiskLoc& loc) const { invariant(false); }
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 96768ef5983..4fc964d6769 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -273,7 +273,7 @@ namespace mongo {
details,
systemIndexRecordStore,
this );
- int indexI = ce._findIndexNumber( indexName );
+ int indexI = ce._findIndexNumber( txn, indexName );
IndexDetails& indexDetails = details->idx(indexI);
*txn->recoveryUnit()->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index 47ca3498893..213254993a6 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -104,7 +104,7 @@ namespace mongo {
return total;
}
- RecordData RecordStoreV1Base::dataFor( const DiskLoc& loc ) const {
+ RecordData RecordStoreV1Base::dataFor( OperationContext* txn, const DiskLoc& loc ) const {
return recordFor(loc)->toRecordData();
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index a71adc04a19..429866dd705 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -124,14 +124,14 @@ namespace mongo {
virtual ~RecordStoreV1Base();
- virtual long long dataSize() const { return _details->dataSize(); }
- virtual long long numRecords() const { return _details->numRecords(); }
+ virtual long long dataSize( OperationContext* txn ) const { return _details->dataSize(); }
+ virtual long long numRecords( OperationContext* txn ) const { return _details->numRecords(); }
virtual int64_t storageSize( OperationContext* txn,
BSONObjBuilder* extraInfo = NULL,
int level = 0 ) const;
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
void deleteRecord( OperationContext* txn,
const DiskLoc& dl );
@@ -290,7 +290,7 @@ namespace mongo {
virtual bool restoreState() { return true; }
- virtual RecordData dataFor( const DiskLoc& loc ) const { return _rs->dataFor(loc); }
+ virtual RecordData dataFor( const DiskLoc& loc ) const { return _rs->dataFor(_txn, loc); }
private:
virtual const Record* recordFor( const DiskLoc& loc ) const { return _rs->recordFor(loc); }
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index 79d4775d309..2a44aadc81b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -667,7 +667,7 @@ namespace mongo {
RARELY {
std::stringstream buf;
buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
- buf << "numRecords: " << numRecords() << '\n';
+ buf << "numRecords: " << numRecords(txn) << '\n';
int i = 0;
for ( DiskLoc e = _details->firstExtent(txn);
!e.isNull();
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
index 98557fa9193..beb52ddf739 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
@@ -219,7 +219,7 @@ namespace mongo {
}
RecordData CappedRecordStoreV1Iterator::dataFor( const DiskLoc& loc ) const {
- return _recordStore->dataFor( loc );
+ return _recordStore->dataFor( _txn, loc );
}
Extent* CappedRecordStoreV1Iterator::_getExtent( const DiskLoc& loc ) {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
index be42a991929..6dbc9f99a1a 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
@@ -187,7 +187,7 @@ namespace mongo {
}
RecordData RecordStoreV1RepairIterator::dataFor(const DiskLoc& loc) const {
- return _recordStore->dataFor( loc );
+ return _recordStore->dataFor( _txn, loc );
}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index 076cc3da79a..b654fc4c4d3 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -413,7 +413,7 @@ namespace mongo {
uassertStatusOK( status.getStatus() );
datasize += recordFor( status.getValue() )->netLength();
- adaptor->inserted( dataFor( status.getValue() ), status.getValue() );
+ adaptor->inserted( dataFor( txn, status.getValue() ), status.getValue() );
}
if( L.isNull() ) {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
index f6823bfb424..20122fea94d 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
@@ -124,7 +124,7 @@ namespace mongo {
}
RecordData SimpleRecordStoreV1Iterator::dataFor( const DiskLoc& loc ) const {
- return _recordStore->dataFor( loc );
+ return _recordStore->dataFor( _txn, loc );
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index 31f17f42b28..de3d9c2c5bf 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -164,7 +164,7 @@ namespace {
ASSERT( result.isOK() );
// The length of the allocated record is quantized.
- ASSERT_EQUALS( 320, rs.dataFor( result.getValue() ).size() + Record::HeaderSize );
+ ASSERT_EQUALS( 320, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
/**
@@ -184,7 +184,7 @@ namespace {
ASSERT( result.isOK() );
// The length of the allocated record is not quantized.
- ASSERT_EQUALS( 300, rs.dataFor( result.getValue() ).size() + Record::HeaderSize );
+ ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
@@ -201,7 +201,7 @@ namespace {
StatusWith<DiskLoc> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false);
ASSERT( result.isOK() );
- ASSERT_EQUALS( 300, rs.dataFor( result.getValue() ).size() + Record::HeaderSize );
+ ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
/** alloc() returns a non quantized record larger than the requested size. */
@@ -448,7 +448,7 @@ namespace {
StatusWith<DiskLoc> result = rs.insertRecord( &txn, "abc", 4, 1000 );
ASSERT_TRUE( result.isOK() );
ASSERT_EQUALS( 1, md->numRecords() );
- RecordData recordData = rs.dataFor( result.getValue() );
+ RecordData recordData = rs.dataFor( &txn, result.getValue() );
ASSERT_EQUALS( string("abc"), string(recordData.data()) );
}
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 925dfd2c639..fc01db60723 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -339,7 +339,7 @@ namespace mongo {
CollectionScanParams::FORWARD ) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
- BSONObj obj = coll->docFor( loc );
+ BSONObj obj = coll->docFor( txn, loc );
string ns = obj["name"].String();
@@ -389,7 +389,7 @@ namespace mongo {
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
- originalCollection->getIndexCatalog()->getIndexIterator( false );
+ originalCollection->getIndexCatalog()->getIndexIterator( txn, false );
while ( ii.more() ) {
IndexDescriptor* desc = ii.next();
indexes.push_back( desc->infoObj() );
@@ -407,7 +407,7 @@ namespace mongo {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
- BSONObj doc = originalCollection->docFor( loc );
+ BSONObj doc = originalCollection->docFor( txn, loc );
WriteUnitOfWork wunit(txn);
StatusWith<DiskLoc> result = tempCollection->insertDocument(txn,
diff --git a/src/mongo/db/storage/record_data.h b/src/mongo/db/storage/record_data.h
index dc2f673c253..61428b7ca61 100644
--- a/src/mongo/db/storage/record_data.h
+++ b/src/mongo/db/storage/record_data.h
@@ -60,7 +60,7 @@ namespace mongo {
private:
const char* _data;
int _size;
- const boost::shared_array<char> _dataPtr;
+ boost::shared_array<char> _dataPtr;
};
} // namespace mongo
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 8cf6d2a351e..e850bf13d3f 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -125,9 +125,9 @@ namespace mongo {
virtual const std::string& ns() const { return _ns; }
- virtual long long dataSize() const = 0;
+ virtual long long dataSize( OperationContext* txn ) const = 0;
- virtual long long numRecords() const = 0;
+ virtual long long numRecords( OperationContext* txn ) const = 0;
virtual bool isCapped() const = 0;
@@ -143,7 +143,7 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( const DiskLoc& loc) const = 0;
+ virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc) const = 0;
virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl ) = 0;
diff --git a/src/mongo/db/storage/rocks/SConscript b/src/mongo/db/storage/rocks/SConscript
index e7f6f23f330..721e498f1f0 100644
--- a/src/mongo/db/storage/rocks/SConscript
+++ b/src/mongo/db/storage/rocks/SConscript
@@ -17,6 +17,7 @@ if has_option("rocksdb"):
'$BUILD_DIR/mongo/bson',
'$BUILD_DIR/mongo/db/catalog/collection_options',
'$BUILD_DIR/mongo/db/index/index_descriptor',
+ '$BUILD_DIR/mongo/db/storage/bson_collection_catalog_entry',
'$BUILD_DIR/mongo/db/storage/index_entry_comparison',
'$BUILD_DIR/mongo/foundation',
'$BUILD_DIR/third_party/shim_snappy',
diff --git a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
index ecd7b374c1e..40487469f07 100644
--- a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
@@ -51,53 +51,16 @@ namespace mongo {
RocksCollectionCatalogEntry::RocksCollectionCatalogEntry( RocksEngine* engine,
const StringData& ns )
- : CollectionCatalogEntry( ns ),
+ : BSONCollectionCatalogEntry( ns ),
_engine( engine ),
_metaDataKey( string( "metadata-" ) + ns.toString() ) { }
- CollectionOptions RocksCollectionCatalogEntry::getCollectionOptions(
- OperationContext* txn ) const {
- // TODO: put more options in here?
- return CollectionOptions();
- }
-
- // ------- indexes ----------
-
- int RocksCollectionCatalogEntry::getTotalIndexCount() const {
- MetaData md = _getMetaData();
-
- return static_cast<int>( md.indexes.size() );
- }
-
- int RocksCollectionCatalogEntry::getCompletedIndexCount() const {
- MetaData md = _getMetaData();
-
- int num = 0;
- for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
- if ( md.indexes[i].ready )
- num++;
- }
- return num;
- }
-
int RocksCollectionCatalogEntry::getMaxAllowedIndexes() const {
return _maxAllowedIndexes;
}
- void RocksCollectionCatalogEntry::getAllIndexes( std::vector<std::string>* names ) const {
- MetaData md = _getMetaData();
-
- for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
- names->push_back( md.indexes[i].spec["name"].String() );
- }
- }
-
- BSONObj RocksCollectionCatalogEntry::getIndexSpec( const StringData& indexName ) const {
- return getIndexSpec( indexName, _engine->getDB() );
- }
-
- BSONObj RocksCollectionCatalogEntry::getIndexSpec( const StringData& indexName,
- rocksdb::DB* db ) const {
+ BSONObj RocksCollectionCatalogEntry::getOtherIndexSpec( const StringData& indexName,
+ rocksdb::DB* db ) const {
MetaData md = _getMetaData( db );
int offset = md.findIndexOffset( indexName );
@@ -105,30 +68,6 @@ namespace mongo {
return md.indexes[offset].spec.getOwned();
}
- bool RocksCollectionCatalogEntry::isIndexMultikey( const StringData& indexName) const {
- MetaData md = _getMetaData();
-
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].multikey;
- }
-
- DiskLoc RocksCollectionCatalogEntry::getIndexHead( const StringData& indexName ) const {
- MetaData md = _getMetaData();
-
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].head;
- }
-
- bool RocksCollectionCatalogEntry::isIndexReady( const StringData& indexName ) const {
- MetaData md = _getMetaData();
-
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].ready;
- }
-
bool RocksCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
const StringData& indexName,
bool multikey ) {
@@ -233,7 +172,7 @@ namespace mongo {
invariant( status.ok() );
}
- RocksCollectionCatalogEntry::MetaData RocksCollectionCatalogEntry::_getMetaData() const {
+ RocksCollectionCatalogEntry::MetaData RocksCollectionCatalogEntry::_getMetaData( OperationContext* txn ) const {
return _getMetaData( _engine->getDB() );
}
@@ -274,59 +213,4 @@ namespace mongo {
invariant( status.ok() );
}
- int RocksCollectionCatalogEntry::MetaData::findIndexOffset( const StringData& name ) const {
- for ( unsigned i = 0; i < indexes.size(); i++ )
- if ( indexes[i].spec["name"].String() == name )
- return i;
- return -1;
- }
-
- bool RocksCollectionCatalogEntry::MetaData::eraseIndex( const StringData& name ) {
- int indexOffset = findIndexOffset( name );
-
- if ( indexOffset < 0 ) {
- return false;
- }
-
- indexes.erase( indexes.begin() + indexOffset );
- return true;
- }
-
- BSONObj RocksCollectionCatalogEntry::MetaData::toBSON() const {
- BSONObjBuilder b;
- b.append( "ns", ns );
- {
- BSONArrayBuilder arr( b.subarrayStart( "indexes" ) );
- for ( unsigned i = 0; i < indexes.size(); i++ ) {
- BSONObjBuilder sub( arr.subobjStart() );
- sub.append( "spec", indexes[i].spec );
- sub.appendBool( "ready", indexes[i].ready );
- sub.appendBool( "multikey", indexes[i].multikey );
- sub.append( "head_a", indexes[i].head.a() );
- sub.append( "head_b", indexes[i].head.getOfs() );
- sub.done();
- }
- arr.done();
- }
- return b.obj();
- }
-
- void RocksCollectionCatalogEntry::MetaData::parse( const BSONObj& obj ) {
- ns = obj["ns"].valuestrsafe();
-
- BSONElement e = obj["indexes"];
- if ( e.isABSONObj() ) {
- std::vector<BSONElement> entries = e.Array();
- for ( unsigned i = 0; i < entries.size(); i++ ) {
- BSONObj idx = entries[i].Obj();
- IndexMetaData imd;
- imd.spec = idx["spec"].Obj().getOwned();
- imd.ready = idx["ready"].trueValue();
- imd.head = DiskLoc( idx["head_a"].Int(),
- idx["head_b"].Int() );
- imd.multikey = idx["multikey"].trueValue();
- indexes.push_back( imd );
- }
- }
- }
}
diff --git a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
index d8618fd68aa..52f6574cb67 100644
--- a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
+++ b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
@@ -31,6 +31,7 @@
#pragma once
#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/storage/bson_collection_catalog_entry.h"
namespace rocksdb {
class DB;
@@ -40,40 +41,24 @@ namespace mongo {
class RocksEngine;
- class RocksCollectionCatalogEntry : public CollectionCatalogEntry {
+ class RocksCollectionCatalogEntry : public BSONCollectionCatalogEntry {
public:
RocksCollectionCatalogEntry( RocksEngine* engine, const StringData& ns );
virtual ~RocksCollectionCatalogEntry(){}
- virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
-
// ------- indexes ----------
- virtual int getTotalIndexCount() const;
-
- virtual int getCompletedIndexCount() const;
-
virtual int getMaxAllowedIndexes() const;
- virtual void getAllIndexes( std::vector<std::string>* names ) const;
-
- virtual BSONObj getIndexSpec( const StringData& idxName ) const;
-
- virtual bool isIndexMultikey( const StringData& indexName) const;
-
virtual bool setIndexIsMultikey(OperationContext* txn,
const StringData& indexName,
bool multikey = true);
- virtual DiskLoc getIndexHead( const StringData& indexName ) const;
-
virtual void setIndexHead( OperationContext* txn,
const StringData& indexName,
const DiskLoc& newHead );
- virtual bool isIndexReady( const StringData& indexName ) const;
-
virtual Status removeIndex( OperationContext* txn,
const StringData& indexName );
@@ -93,7 +78,7 @@ namespace mongo {
// ------ internal api
- BSONObj getIndexSpec( const StringData& idxName, rocksdb::DB* db ) const;
+ BSONObj getOtherIndexSpec( const StringData& idxName, rocksdb::DB* db ) const;
// called once when collection is created.
void createMetaData();
@@ -104,35 +89,10 @@ namespace mongo {
const string metaDataKey() { return _metaDataKey; }
- struct IndexMetaData {
- IndexMetaData() {}
- IndexMetaData( BSONObj s, bool r, DiskLoc h, bool m )
- : spec( s ), ready( r ), head( h ), multikey( m ) {}
-
- BSONObj spec;
- bool ready;
- DiskLoc head;
- bool multikey;
- };
-
- struct MetaData {
- void parse( const BSONObj& obj );
- BSONObj toBSON() const;
-
- int findIndexOffset( const StringData& name ) const;
-
- /**
- * Removes information about an index from the MetaData. Returns true if an index
- * called name existed and was deleted, and false otherwise.
- */
- bool eraseIndex( const StringData& name );
-
- std::string ns;
- std::vector<IndexMetaData> indexes;
- };
+ protected:
+ virtual MetaData _getMetaData( OperationContext* txn ) const;
private:
- MetaData _getMetaData() const;
MetaData _getMetaData( rocksdb::DB* db ) const;
MetaData _getMetaData_inlock() const;
diff --git a/src/mongo/db/storage/rocks/rocks_engine.cpp b/src/mongo/db/storage/rocks/rocks_engine.cpp
index 05aeebf7a15..2816119b01e 100644
--- a/src/mongo/db/storage/rocks/rocks_engine.cpp
+++ b/src/mongo/db/storage/rocks/rocks_engine.cpp
@@ -474,7 +474,7 @@ namespace mongo {
// Generate the Ordering object for each index, allowing the column families
// representing these indexes to eventually be opened
const string indexName = ns.substr( sepPos + 1 );
- const BSONObj spec = entry->collectionEntry->getIndexSpec( indexName, db );
+ const BSONObj spec = entry->collectionEntry->getOtherIndexSpec( indexName, db );
const Ordering order = Ordering::make( spec["key"].Obj() );
indexOrderings.insert( std::make_pair( indexName, order ) );
diff --git a/src/mongo/db/storage/rocks/rocks_engine_test.cpp b/src/mongo/db/storage/rocks/rocks_engine_test.cpp
index 92f1a8f0eae..72993c79a25 100644
--- a/src/mongo/db/storage/rocks/rocks_engine_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_engine_test.cpp
@@ -94,7 +94,7 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
+ ASSERT_EQUALS( s, rs->dataFor( &opCtx, loc ).data() );
}
}
@@ -196,7 +196,10 @@ namespace mongo {
{
RocksCollectionCatalogEntry coll( &engine, "test.foo" );
coll.createMetaData();
- ASSERT_EQUALS( 0, coll.getTotalIndexCount() );
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT_EQUALS( 0, coll.getTotalIndexCount(&opCtx) );
+ }
BSONObj spec = BSON( "key" << BSON( "a" << 1 ) <<
"name" << "silly" <<
@@ -210,32 +213,50 @@ namespace mongo {
ASSERT_OK( status );
}
- ASSERT_EQUALS( 1, coll.getTotalIndexCount() );
- ASSERT_EQUALS( 0, coll.getCompletedIndexCount() );
- ASSERT( !coll.isIndexReady( "silly" ) );
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT_EQUALS( 1, coll.getTotalIndexCount(&opCtx) );
+ ASSERT_EQUALS( 0, coll.getCompletedIndexCount(&opCtx) );
+ ASSERT( !coll.isIndexReady( &opCtx, "silly" ) );
+ }
{
MyOperationContext opCtx( &engine );
coll.indexBuildSuccess( &opCtx, "silly" );
}
- ASSERT_EQUALS( 1, coll.getTotalIndexCount() );
- ASSERT_EQUALS( 1, coll.getCompletedIndexCount() );
- ASSERT( coll.isIndexReady( "silly" ) );
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT_EQUALS( 1, coll.getTotalIndexCount(&opCtx) );
+ ASSERT_EQUALS( 1, coll.getCompletedIndexCount(&opCtx) );
+ ASSERT( coll.isIndexReady( &opCtx, "silly" ) );
+ }
+
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT_EQUALS( DiskLoc(), coll.getIndexHead( &opCtx, "silly" ) );
+ }
- ASSERT_EQUALS( DiskLoc(), coll.getIndexHead( "silly" ) );
{
MyOperationContext opCtx( &engine );
coll.setIndexHead( &opCtx, "silly", DiskLoc( 123,321 ) );
}
- ASSERT_EQUALS( DiskLoc(123, 321), coll.getIndexHead( "silly" ) );
- ASSERT( !coll.isIndexMultikey( "silly" ) );
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT_EQUALS( DiskLoc(123, 321), coll.getIndexHead( &opCtx, "silly" ) );
+ ASSERT( !coll.isIndexMultikey( &opCtx, "silly" ) );
+ }
+
{
MyOperationContext opCtx( &engine );
coll.setIndexIsMultikey( &opCtx, "silly", true );
}
- ASSERT( coll.isIndexMultikey( "silly" ) );
+
+ {
+ MyOperationContext opCtx( &engine );
+ ASSERT( coll.isIndexMultikey( &opCtx, "silly" ) );
+ }
}
}
@@ -273,7 +294,7 @@ namespace mongo {
uow.commit();
}
- ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
+ ASSERT_EQUALS( s, rs->dataFor( &opCtx, loc ).data() );
engine.cleanShutdown( &opCtx );
}
}
@@ -281,7 +302,7 @@ namespace mongo {
{
RocksEngine engine( path );
RocksRecordStore* rs = engine.getEntry( "test.foo" )->recordStore.get();
- ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
+ ASSERT_EQUALS( s, rs->dataFor( NULL, loc ).data() );
}
}
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
index 36804cc9e31..e20cbdb3055 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store.cpp
@@ -127,7 +127,7 @@ namespace mongo {
return static_cast<int64_t>( storageSize );
}
- RecordData RocksRecordStore::dataFor( const DiskLoc& loc) const {
+ RecordData RocksRecordStore::dataFor( OperationContext* txn, const DiskLoc& loc) const {
// TODO investigate using cursor API to get a Slice and avoid double copying.
std::string value;
@@ -307,7 +307,7 @@ namespace mongo {
) const {
invariant( !tailable );
- return new Iterator( this, dir, start );
+ return new Iterator( txn, this, dir, start );
}
@@ -357,7 +357,7 @@ namespace mongo {
bool invalidObject = false;
boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
while( !iter->isEOF() ) {
- RecordData data = dataFor( iter->curr() );
+ RecordData data = dataFor( txn, iter->curr() );
size_t dataSize;
const Status status = adaptor->validate( data, &dataSize );
if (!status.isOK()) {
@@ -524,10 +524,12 @@ namespace mongo {
// --------
- RocksRecordStore::Iterator::Iterator( const RocksRecordStore* rs,
+ RocksRecordStore::Iterator::Iterator( OperationContext* txn,
+ const RocksRecordStore* rs,
const CollectionScanParams::Direction& dir,
const DiskLoc& start )
- : _rs( rs ),
+ : _txn( txn ),
+ _rs( rs ),
_dir( dir ),
// XXX not using a snapshot here
_iterator( _rs->_db->NewIterator( rs->_readOptions(), rs->_columnFamily ) ) {
@@ -597,7 +599,7 @@ namespace mongo {
}
RecordData RocksRecordStore::Iterator::dataFor( const DiskLoc& loc ) const {
- return _rs->dataFor( loc );
+ return _rs->dataFor( _txn, loc );
}
bool RocksRecordStore::Iterator::_forward() const {
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.h b/src/mongo/db/storage/rocks/rocks_record_store.h
index 16756fdbf22..39d26c6d84e 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.h
+++ b/src/mongo/db/storage/rocks/rocks_record_store.h
@@ -64,9 +64,9 @@ namespace mongo {
// name of the RecordStore implementation
virtual const char* name() const { return "rocks"; }
- virtual long long dataSize() const { return _dataSize; }
+ virtual long long dataSize( OperationContext* txn ) const { return _dataSize; }
- virtual long long numRecords() const { return _numRecords; }
+ virtual long long numRecords( OperationContext* txn ) const { return _numRecords; }
virtual bool isCapped() const { return _isCapped; }
@@ -76,7 +76,7 @@ namespace mongo {
// CRUD related
- virtual RecordData dataFor( const DiskLoc& loc ) const;
+ virtual RecordData dataFor( OperationContext* txn, const DiskLoc& loc ) const;
virtual void deleteRecord( OperationContext* txn, const DiskLoc& dl );
@@ -155,7 +155,8 @@ namespace mongo {
class Iterator : public RecordIterator {
public:
- Iterator( const RocksRecordStore* rs,
+ Iterator( OperationContext* txn,
+ const RocksRecordStore* rs,
const CollectionScanParams::Direction& dir,
const DiskLoc& start );
@@ -171,6 +172,7 @@ namespace mongo {
bool _forward() const;
void _checkStatus();
+ OperationContext* _txn;
const RocksRecordStore* _rs;
CollectionScanParams::Direction _dir;
boost::scoped_ptr<rocksdb::Iterator> _iterator;
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
index b5a4584da62..7ca0eecc55e 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
@@ -197,13 +197,13 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s, rs.dataFor( NULL, loc ).data() );
}
{
RocksRecordStore rs( "foo.bar", db.get(), cfh.get(), db->DefaultColumnFamily() );
- ASSERT_EQUALS( 1, rs.numRecords() );
- ASSERT_EQUALS( size, rs.dataSize() );
+ ASSERT_EQUALS( 1, rs.numRecords( NULL ) );
+ ASSERT_EQUALS( size, rs.dataSize( NULL ) );
}
}
@@ -229,20 +229,20 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s, rs.dataFor( loc ).data() );
- ASSERT_EQUALS( 1, rs.numRecords() );
- ASSERT_EQUALS( static_cast<long long> ( s.length() + 1 ), rs.dataSize() );
+ ASSERT_EQUALS( s, rs.dataFor( NULL, loc ).data() );
+ ASSERT_EQUALS( 1, rs.numRecords( NULL ) );
+ ASSERT_EQUALS( static_cast<long long> ( s.length() + 1 ), rs.dataSize( NULL ) );
}
- ASSERT( rs.dataFor( loc ).data() != NULL );
+ ASSERT( rs.dataFor( NULL, loc ).data() != NULL );
{
MyOperationContext opCtx( db.get() );
WriteUnitOfWork uow( &opCtx );
rs.deleteRecord( &opCtx, loc );
- ASSERT_EQUALS( 0, rs.numRecords() );
- ASSERT_EQUALS( 0, rs.dataSize() );
+ ASSERT_EQUALS( 0, rs.numRecords( NULL ) );
+ ASSERT_EQUALS( 0, rs.dataSize( NULL ) );
}
}
}
@@ -270,7 +270,7 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s1, rs.dataFor( NULL, loc ).data() );
}
{
@@ -287,7 +287,7 @@ namespace mongo {
ASSERT( loc == res.getValue() );
}
- ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s2, rs.dataFor( NULL, loc ).data() );
}
}
@@ -316,7 +316,7 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s1, rs.dataFor( NULL, loc ).data() );
}
{
@@ -335,7 +335,7 @@ namespace mongo {
dv );
ASSERT_OK( res );
}
- ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s2, rs.dataFor( NULL, loc ).data() );
}
}
@@ -383,8 +383,8 @@ namespace mongo {
ASSERT_EQUALS( a, b );
- ASSERT_EQUALS( string("a"), rs1.dataFor( a ).data() );
- ASSERT_EQUALS( string("b"), rs2.dataFor( b ).data() );
+ ASSERT_EQUALS( string("a"), rs1.dataFor( NULL, a ).data() );
+ ASSERT_EQUALS( string("b"), rs2.dataFor( NULL, b ).data() );
delete cf2;
delete cf1;
@@ -408,7 +408,7 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( s, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( s, rs.dataFor( NULL, loc ).data() );
}
{
@@ -442,7 +442,7 @@ namespace mongo {
loc = res.getValue();
}
- ASSERT_EQUALS( origStr, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( origStr, rs.dataFor( NULL, loc ).data() );
}
}
@@ -455,8 +455,8 @@ namespace mongo {
dbAndCfh.second.get(),
db->DefaultColumnFamily() );
- ASSERT_EQUALS( static_cast<long long> ( origStr.size() + 1 ), rs.dataSize() );
- ASSERT_EQUALS( 1, rs.numRecords() );
+ ASSERT_EQUALS( static_cast<long long> ( origStr.size() + 1 ), rs.dataSize( NULL ) );
+ ASSERT_EQUALS( 1, rs.numRecords( NULL ) );
{
MyOperationContext opCtx( db.get() );
@@ -467,7 +467,7 @@ namespace mongo {
ASSERT_OK( res.getStatus() );
}
- ASSERT_EQUALS( newStr, rs.dataFor( loc ).data() );
+ ASSERT_EQUALS( newStr, rs.dataFor( NULL, loc ).data() );
}
}
@@ -480,8 +480,8 @@ namespace mongo {
dbAndCfh.second.get(),
db->DefaultColumnFamily() );
- ASSERT_EQUALS( static_cast<long long>( newStr.size() + 1 ), rs.dataSize() );
- ASSERT_EQUALS( 1, rs.numRecords() );
+ ASSERT_EQUALS( static_cast<long long>( newStr.size() + 1 ), rs.dataSize( NULL ) );
+ ASSERT_EQUALS( 1, rs.numRecords( NULL ) );
{
MyOperationContext opCtx( db.get() );
@@ -491,8 +491,8 @@ namespace mongo {
}
}
- ASSERT_EQUALS( 0, rs.dataSize() );
- ASSERT_EQUALS( 0, rs.numRecords() );
+ ASSERT_EQUALS( 0, rs.dataSize( NULL ) );
+ ASSERT_EQUALS( 0, rs.numRecords( NULL ) );
}
}
@@ -648,8 +648,8 @@ namespace mongo {
Status stat = rs.truncate( &opCtx );
ASSERT_OK( stat );
- ASSERT_EQUALS( 0, rs.numRecords() );
- ASSERT_EQUALS( 0, rs.dataSize() );
+ ASSERT_EQUALS( 0, rs.numRecords( NULL ) );
+ ASSERT_EQUALS( 0, rs.dataSize( NULL ) );
}
// Test that truncate does not fail on an empty collection
@@ -659,8 +659,8 @@ namespace mongo {
Status stat = rs.truncate( &opCtx );
ASSERT_OK( stat );
- ASSERT_EQUALS( 0, rs.numRecords() );
- ASSERT_EQUALS( 0, rs.dataSize() );
+ ASSERT_EQUALS( 0, rs.numRecords( NULL ) );
+ ASSERT_EQUALS( 0, rs.dataSize( NULL ) );
}
}
}
@@ -695,13 +695,13 @@ namespace mongo {
rs.deleteRecord( &opCtx, loc );
- RecordData recData = rs.dataFor( loc/*, &opCtx */ );
+ RecordData recData = rs.dataFor( NULL, loc/*, &opCtx */ );
ASSERT( !recData.data() && recData.size() == 0 );
// XXX this test doesn't yet work, but there should be some notion of snapshots,
// and the op context that doesn't see the deletion shouldn't know that this data
// has been deleted
- RecordData recData2 = rs.dataFor( loc/*, &opCtx2 */ );
+ RecordData recData2 = rs.dataFor( NULL, loc/*, &opCtx2 */ );
ASSERT( recData.data() && recData.size() == size );
}
}
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
index 95f10ecdffb..fcbbdf3e518 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.cpp
@@ -462,7 +462,7 @@ namespace mongo {
}
}
- bool RocksSortedDataImpl::isEmpty() {
+ bool RocksSortedDataImpl::isEmpty( OperationContext* txn ) {
// XXX doesn't use snapshot
boost::scoped_ptr<rocksdb::Iterator> it( _db->NewIterator( rocksdb::ReadOptions(),
_columnFamily ) );
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
index 1c8d10c416a..645c0a177fd 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl.h
@@ -77,7 +77,7 @@ namespace mongo {
virtual void fullValidate(OperationContext* txn, long long* numKeysOut);
- virtual bool isEmpty();
+ virtual bool isEmpty(OperationContext* txn);
virtual Status touch(OperationContext* txn) const;
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index 486440f1252..33a38f29da1 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -98,7 +98,7 @@ namespace mongo {
*/
virtual long long getSpaceUsedBytes( OperationContext* txn ) const = 0;
- virtual bool isEmpty() = 0;
+ virtual bool isEmpty(OperationContext* txn) = 0;
/**
* Attempt to bring whole index into memory. No-op is ok if not supported.
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index a90c78d29a1..833fdf2a2ec 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -133,7 +133,7 @@ namespace mongo {
break;
}
- if ( collection->getIndexCatalog()->findIndexByKeyPattern( key ) == NULL ) {
+ if ( collection->getIndexCatalog()->findIndexByKeyPattern( txn, key ) == NULL ) {
// index not finished yet
LOG(1) << " skipping index because not finished";
continue;