summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCraig Harris <craig.harris@10gen.com>2014-06-25 17:06:03 -0400
committerCraigHarris <craig.harris@10gen.com>2014-07-08 10:53:53 -0400
commit0450a0f25d4509f6515e939a5ef3a671f744cc2a (patch)
treebd5004de51df5ebe4923a6af0863ab5c4b10f8fc /src
parentb1048dc6f42e184c08853fe98c21a90ecfb40d6b (diff)
downloadmongo-0450a0f25d4509f6515e939a5ef3a671f744cc2a.tar.gz
SERVER-14387 Propogate OperationContext through calls requiring document read locks, without doing the locking.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/collection.cpp29
-rw-r--r--src/mongo/db/catalog/collection.h9
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h2
-rw-r--r--src/mongo/db/catalog/database.cpp6
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp11
-rw-r--r--src/mongo/db/catalog/index_catalog.h4
-rw-r--r--src/mongo/db/catalog/index_create.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp7
-rw-r--r--src/mongo/db/commands/count.cpp9
-rw-r--r--src/mongo/db/commands/count.h3
-rw-r--r--src/mongo/db/commands/dbhash.cpp8
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/geonear.cpp4
-rw-r--r--src/mongo/db/commands/group.cpp4
-rw-r--r--src/mongo/db/commands/list_collections.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp5
-rw-r--r--src/mongo/db/commands/rename_collection.cpp6
-rw-r--r--src/mongo/db/commands/test_commands.cpp5
-rw-r--r--src/mongo/db/concurrency/SConscript3
-rw-r--r--src/mongo/db/concurrency/lock_mgr.cpp31
-rw-r--r--src/mongo/db/concurrency/lock_mgr.h46
-rw-r--r--src/mongo/db/d_concurrency.cpp9
-rw-r--r--src/mongo/db/d_concurrency.h5
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/dbcommands.cpp12
-rw-r--r--src/mongo/db/dbhelpers.cpp18
-rw-r--r--src/mongo/db/exec/2dcommon.cpp18
-rw-r--r--src/mongo/db/exec/2dcommon.h20
-rw-r--r--src/mongo/db/exec/2dnear.cpp4
-rw-r--r--src/mongo/db/exec/2dnear.h6
-rw-r--r--src/mongo/db/exec/collection_scan.cpp11
-rw-r--r--src/mongo/db/exec/collection_scan.h9
-rw-r--r--src/mongo/db/exec/count.cpp9
-rw-r--r--src/mongo/db/exec/count.h6
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp7
-rw-r--r--src/mongo/db/exec/distinct_scan.h5
-rw-r--r--src/mongo/db/exec/idhack.cpp16
-rw-r--r--src/mongo/db/exec/idhack.h11
-rw-r--r--src/mongo/db/exec/index_scan.cpp11
-rw-r--r--src/mongo/db/exec/index_scan.h9
-rw-r--r--src/mongo/db/exec/oplogstart.cpp14
-rw-r--r--src/mongo/db/exec/oplogstart.h10
-rw-r--r--src/mongo/db/exec/s2near.cpp8
-rw-r--r--src/mongo/db/exec/s2near.h9
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp32
-rw-r--r--src/mongo/db/exec/subplan.cpp22
-rw-r--r--src/mongo/db/exec/subplan.h13
-rw-r--r--src/mongo/db/exec/text.cpp10
-rw-r--r--src/mongo/db/exec/text.h12
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp4
-rw-r--r--src/mongo/db/geo/haystack.cpp4
-rw-r--r--src/mongo/db/index/btree_based_access_method.cpp23
-rw-r--r--src/mongo/db/index/btree_based_access_method.h15
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.h13
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp6
-rw-r--r--src/mongo/db/index/haystack_access_method.h5
-rw-r--r--src/mongo/db/index/index_access_method.h13
-rw-r--r--src/mongo/db/ops/delete_executor.cpp5
-rw-r--r--src/mongo/db/ops/update.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp7
-rw-r--r--src/mongo/db/pipeline/pipeline_d.h4
-rw-r--r--src/mongo/db/prefetch.cpp14
-rw-r--r--src/mongo/db/query/explain.h2
-rw-r--r--src/mongo/db/query/get_executor.cpp47
-rw-r--r--src/mongo/db/query/get_executor.h14
-rw-r--r--src/mongo/db/query/get_runner.cpp47
-rw-r--r--src/mongo/db/query/get_runner.h18
-rw-r--r--src/mongo/db/query/idhack_runner.cpp16
-rw-r--r--src/mongo/db/query/idhack_runner.h15
-rw-r--r--src/mongo/db/query/internal_plans.h16
-rw-r--r--src/mongo/db/query/internal_runner.h5
-rw-r--r--src/mongo/db/query/new_find.cpp19
-rw-r--r--src/mongo/db/query/stage_builder.cpp44
-rw-r--r--src/mongo/db/query/stage_builder.h5
-rw-r--r--src/mongo/db/query/subplan_runner.cpp19
-rw-r--r--src/mongo/db/query/subplan_runner.h12
-rw-r--r--src/mongo/db/repl/master_slave.cpp8
-rw-r--r--src/mongo/db/repl/oplog.cpp4
-rw-r--r--src/mongo/db/repl/repl_info.cpp5
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp5
-rw-r--r--src/mongo/db/storage/heap1/heap1_btree_impl.cpp22
-rw-r--r--src/mongo/db/storage/heap1/heap1_database_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp19
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp13
-rw-r--r--src/mongo/db/storage/rocks/rocks_btree_impl.cpp11
-rw-r--r--src/mongo/db/storage/rocks/rocks_btree_impl.h8
-rw-r--r--src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp13
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.h11
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_test.cpp3
-rw-r--r--src/mongo/db/structure/SConscript1
-rw-r--r--src/mongo/db/structure/btree/btree_interface.cpp35
-rw-r--r--src/mongo/db/structure/btree/btree_interface.h8
-rw-r--r--src/mongo/db/structure/btree/btree_logic.cpp178
-rw-r--r--src/mongo/db/structure/btree/btree_logic.h96
-rw-r--r--src/mongo/db/structure/btree/btree_logic_test.cpp178
-rw-r--r--src/mongo/db/structure/btree/btree_test_help.cpp3
-rw-r--r--src/mongo/db/structure/catalog/namespace_details_collection_entry.cpp4
-rw-r--r--src/mongo/db/structure/catalog/namespace_details_collection_entry.h2
-rw-r--r--src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.cpp9
-rw-r--r--src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.h6
-rw-r--r--src/mongo/db/structure/record_store.h18
-rw-r--r--src/mongo/db/structure/record_store_heap.cpp36
-rw-r--r--src/mongo/db/structure/record_store_heap.h24
-rw-r--r--src/mongo/db/structure/record_store_v1_base.cpp160
-rw-r--r--src/mongo/db/structure/record_store_v1_base.h50
-rw-r--r--src/mongo/db/structure/record_store_v1_capped.cpp75
-rw-r--r--src/mongo/db/structure/record_store_v1_capped.h13
-rw-r--r--src/mongo/db/structure/record_store_v1_capped_iterator.cpp17
-rw-r--r--src/mongo/db/structure/record_store_v1_capped_iterator.h6
-rw-r--r--src/mongo/db/structure/record_store_v1_capped_test.cpp20
-rw-r--r--src/mongo/db/structure/record_store_v1_repair_iterator.cpp13
-rw-r--r--src/mongo/db/structure/record_store_v1_repair_iterator.h6
-rw-r--r--src/mongo/db/structure/record_store_v1_simple.cpp43
-rw-r--r--src/mongo/db/structure/record_store_v1_simple.h6
-rw-r--r--src/mongo/db/structure/record_store_v1_simple_iterator.cpp17
-rw-r--r--src/mongo/db/structure/record_store_v1_simple_iterator.h6
-rw-r--r--src/mongo/db/structure/record_store_v1_simple_test.cpp18
-rw-r--r--src/mongo/db/structure/record_store_v1_test_help.cpp29
-rw-r--r--src/mongo/db/structure/record_store_v1_test_help.h9
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp4
-rw-r--r--src/mongo/dbtests/jstests.cpp3
-rw-r--r--src/mongo/dbtests/namespacetests.cpp14
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp4
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp4
-rw-r--r--src/mongo/dbtests/query_single_solution_runner.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp76
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp26
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp4
-rw-r--r--src/mongo/dbtests/repltests.cpp22
-rw-r--r--src/mongo/dbtests/runner_registry.cpp4
-rw-r--r--src/mongo/s/d_migrate.cpp4
-rw-r--r--src/mongo/s/d_split.cpp10
-rw-r--r--src/mongo/tools/dump.cpp8
146 files changed, 1398 insertions, 1012 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index f21a00abfc6..37d5ae84104 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -1,7 +1,7 @@
// collection.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -122,20 +122,23 @@ namespace mongo {
return true;
}
- RecordIterator* Collection::getIterator( const DiskLoc& start, bool tailable,
- const CollectionScanParams::Direction& dir) const {
+ RecordIterator* Collection::getIterator( OperationContext* txn,
+ const DiskLoc& start,
+ bool tailable,
+ const CollectionScanParams::Direction& dir) const {
invariant( ok() );
- return _recordStore->getIterator( start, tailable, dir );
+ return _recordStore->getIterator( txn, start, tailable, dir );
}
- vector<RecordIterator*> Collection::getManyIterators() const {
- return _recordStore->getManyIterators();
+ vector<RecordIterator*> Collection::getManyIterators( OperationContext* txn ) const {
+ return _recordStore->getManyIterators(txn);
}
- int64_t Collection::countTableScan( const MatchExpression* expression ) {
- scoped_ptr<RecordIterator> iterator( getIterator( DiskLoc(),
- false,
- CollectionScanParams::FORWARD ) );
+ int64_t Collection::countTableScan( OperationContext* txn, const MatchExpression* expression ) {
+ scoped_ptr<RecordIterator> iterator( getIterator( txn,
+ DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD ) );
int64_t count = 0;
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
@@ -178,7 +181,7 @@ namespace mongo {
if ( isCapped() ) {
// TOOD: old god not done
- Status ret = _indexCatalog.checkNoIndexConflicts( docToInsert );
+ Status ret = _indexCatalog.checkNoIndexConflicts( txn, docToInsert );
if ( !ret.isOK() )
return StatusWith<DiskLoc>( ret );
}
@@ -332,7 +335,7 @@ namespace mongo {
|| repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(objOld, objNew, oldLocation, options, updateTicket );
+ Status ret = iam->validateUpdate(txn, objOld, objNew, oldLocation, options, updateTicket );
if ( !ret.isOK() ) {
return StatusWith<DiskLoc>( ret );
}
@@ -527,7 +530,7 @@ namespace mongo {
invariant( iam );
int64_t keys;
- iam->validate(&keys);
+ iam->validate(txn, &keys);
indexes.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(keys));
idxn++;
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 6ddf8bd9cf4..dea27fdc77b 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -1,7 +1,7 @@
// collection.h
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -137,7 +137,8 @@ namespace mongo {
* canonical to get all would be
* getIterator( DiskLoc(), false, CollectionScanParams::FORWARD )
*/
- RecordIterator* getIterator( const DiskLoc& start = DiskLoc(),
+ RecordIterator* getIterator( OperationContext* txn,
+ const DiskLoc& start = DiskLoc(),
bool tailable = false,
const CollectionScanParams::Direction& dir = CollectionScanParams::FORWARD ) const;
@@ -146,7 +147,7 @@ namespace mongo {
* all returned iterators is equivalent to Iterating the full collection.
* Caller owns all pointers in the vector.
*/
- std::vector<RecordIterator*> getManyIterators() const;
+ std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
/**
@@ -154,7 +155,7 @@ namespace mongo {
* this should only be used at a very low level
* does no yielding, indexes, etc...
*/
- int64_t countTableScan( const MatchExpression* expression );
+ int64_t countTableScan( OperationContext* txn, const MatchExpression* expression );
void deleteDocument( OperationContext* txn,
const DiskLoc& loc,
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 28d2def59ff..1b0175b59c8 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -51,7 +51,7 @@ namespace mongo {
// ------- indexes ----------
- virtual CollectionOptions getCollectionOptions() const = 0;
+ virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const = 0;
virtual int getTotalIndexCount() const = 0;
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index cbfd70a1502..e9ff8027e50 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -1,7 +1,7 @@
// database.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -172,7 +172,7 @@ namespace mongo {
CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry( txn, ns );
- CollectionOptions options = coll->getCollectionOptions();
+ CollectionOptions options = coll->getCollectionOptions( txn );
if ( !options.temp )
continue;
@@ -267,7 +267,7 @@ namespace mongo {
size += collection->dataSize();
BSONObjBuilder temp;
- storageSize += collection->getRecordStore()->storageSize( &temp );
+ storageSize += collection->getRecordStore()->storageSize( opCtx, &temp );
numExtents += temp.obj()["numExtents"].numberInt(); // XXX
indexes += collection->getIndexCatalog()->numIndexesTotal();
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 840d0042872..52df950c274 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -1,7 +1,7 @@
// index_catalog.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -222,7 +222,8 @@ namespace mongo {
}
auto_ptr<Runner> runner(
- InternalPlanner::collectionScan(db->_indexesName,
+ InternalPlanner::collectionScan(txn,
+ db->_indexesName,
db->getCollection(txn, db->_indexesName)));
BSONObj index;
@@ -691,7 +692,7 @@ namespace mongo {
EqualityMatchExpression expr;
BSONObj nsBSON = BSON( "ns" << _collection->ns() );
invariant( expr.init( "ns", nsBSON.firstElement() ).isOK() );
- numSystemIndexesEntries = systemIndexes->countTableScan( &expr );
+ numSystemIndexesEntries = systemIndexes->countTableScan( txn, &expr );
}
else {
// this is ok, 0 is the right number
@@ -1078,7 +1079,7 @@ namespace mongo {
}
}
- Status IndexCatalog::checkNoIndexConflicts( const BSONObj &obj ) {
+ Status IndexCatalog::checkNoIndexConflicts( OperationContext* txn, const BSONObj &obj ) {
IndexIterator ii = getIndexIterator( true );
while ( ii.more() ) {
IndexDescriptor* descriptor = ii.next();
@@ -1096,7 +1097,7 @@ namespace mongo {
options.dupsAllowed = false;
UpdateTicket ticket;
- Status ret = iam->validateUpdate(BSONObj(), obj, DiskLoc(), options, &ticket);
+ Status ret = iam->validateUpdate(txn, BSONObj(), obj, DiskLoc(), options, &ticket);
if ( !ret.isOK() )
return ret;
}
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index d4f3edcb281..9d509b9ebbe 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -1,7 +1,7 @@
// index_catalog.h
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -247,7 +247,7 @@ namespace mongo {
* checks all unique indexes and checks for conflicts
* should not throw
*/
- Status checkNoIndexConflicts( const BSONObj& obj );
+ Status checkNoIndexConflicts( OperationContext* txn, const BSONObj& obj );
// ------- temp internal -------
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 176939669c4..b230121d533 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -1,7 +1,7 @@
// index_create.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -104,7 +104,7 @@ namespace mongo {
unsigned long long n = 0;
unsigned long long numDropped = 0;
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,collection));
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(txn,ns,collection));
std::string idxName = descriptor->indexName();
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index e7859976de0..d2dbf60e565 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -1,7 +1,7 @@
// collection_to_capped.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -81,9 +81,10 @@ namespace mongo {
// datasize and extentSize can't be compared exactly, so add some padding to 'size'
long long excessSize =
static_cast<long long>( fromCollection->dataSize() -
- ( toCollection->getRecordStore()->storageSize() * 2 ) );
+ ( toCollection->getRecordStore()->storageSize( txn ) * 2 ) );
- scoped_ptr<Runner> runner( InternalPlanner::collectionScan(fromNs,
+ scoped_ptr<Runner> runner( InternalPlanner::collectionScan(txn,
+ fromNs,
fromCollection,
InternalPlanner::FORWARD ) );
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index aa43882ea78..9c901e71025 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -98,7 +98,7 @@ namespace mongo {
limit = -limit;
}
- uassertStatusOK(getRunnerCount(collection, query, hintObj, &rawRunner));
+ uassertStatusOK(getRunnerCount(txn, collection, query, hintObj, &rawRunner));
auto_ptr<Runner> runner(rawRunner);
// Store the plan summary string in CurOp.
@@ -177,7 +177,7 @@ namespace mongo {
// Get an executor for the command and use it to generate the explain output.
CanonicalQuery* rawCq;
PlanExecutor* rawExec;
- Status execStatus = parseCountToExecutor(cmdObj, dbname, ns, collection,
+ Status execStatus = parseCountToExecutor(txn, cmdObj, dbname, ns, collection,
&rawCq, &rawExec);
if (!execStatus.isOK()) {
return execStatus;
@@ -189,7 +189,8 @@ namespace mongo {
return Explain::explainStages(exec.get(), cq.get(), verbosity, out);
}
- Status CmdCount::parseCountToExecutor(const BSONObj& cmdObj,
+ Status CmdCount::parseCountToExecutor(OperationContext* txn,
+ const BSONObj& cmdObj,
const std::string& dbname,
const std::string& ns,
Collection* collection,
@@ -229,7 +230,7 @@ namespace mongo {
auto_ptr<CanonicalQuery> autoCq(cq);
- Status execStat = getExecutor(collection, cq, execOut,
+ Status execStat = getExecutor(txn, collection, cq, execOut,
QueryPlannerParams::PRIVATE_IS_COUNT);
if (!execStat.isOK()) {
return execStat;
diff --git a/src/mongo/db/commands/count.h b/src/mongo/db/commands/count.h
index eeaaac204f1..f6dcc19c535 100644
--- a/src/mongo/db/commands/count.h
+++ b/src/mongo/db/commands/count.h
@@ -98,7 +98,8 @@ namespace mongo {
* TODO: the regular run() command for count should call this instead of getting
* a runner.
*/
- Status parseCountToExecutor(const BSONObj& cmdObj,
+ Status parseCountToExecutor(OperationContext* txn,
+ const BSONObj& cmdObj,
const std::string& dbname,
const std::string& ns,
Collection* collection,
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 9b4a2a385aa..47405dd9888 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -1,7 +1,7 @@
// dbhash.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -84,7 +84,8 @@ namespace mongo {
auto_ptr<Runner> runner;
if ( desc ) {
- runner.reset(InternalPlanner::indexScan(collection,
+ runner.reset(InternalPlanner::indexScan(opCtx,
+ collection,
desc,
BSONObj(),
BSONObj(),
@@ -93,7 +94,8 @@ namespace mongo {
InternalPlanner::IXSCAN_FETCH));
}
else if ( collection->isCapped() ) {
- runner.reset(InternalPlanner::collectionScan(fullCollectionName,
+ runner.reset(InternalPlanner::collectionScan(opCtx,
+ fullCollectionName,
collection));
}
else {
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 9e35ef3b0c7..ce582933d7a 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -1,7 +1,7 @@
// distinct.cpp
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -115,7 +115,7 @@ namespace mongo {
}
Runner* rawRunner;
- Status status = getRunnerDistinct(collection, query, key, &rawRunner);
+ Status status = getRunnerDistinct(txn, collection, query, key, &rawRunner);
if (!status.isOK()) {
uasserted(17216, mongoutils::str::stream() << "Can't get runner for query "
<< query << ": " << status.toString());
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 125826760a0..f242327f935 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -1,7 +1,7 @@
// find_and_modify.cpp
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -153,7 +153,7 @@ namespace mongo {
Runner* rawRunner;
massert(17384, "Could not get runner for query " + queryOriginal.toString(),
- getRunner(collection, cq, &rawRunner, QueryPlannerParams::DEFAULT).isOK());
+ getRunner(txn, collection, cq, &rawRunner, QueryPlannerParams::DEFAULT).isOK());
auto_ptr<Runner> runner(rawRunner);
diff --git a/src/mongo/db/commands/geonear.cpp b/src/mongo/db/commands/geonear.cpp
index bbcc5ab19f5..f994004e2b2 100644
--- a/src/mongo/db/commands/geonear.cpp
+++ b/src/mongo/db/commands/geonear.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -187,7 +187,7 @@ namespace mongo {
}
Runner* rawRunner;
- if (!getRunner(collection, cq, &rawRunner, 0).isOK()) {
+ if (!getRunner(txn, collection, cq, &rawRunner, 0).isOK()) {
errmsg = "can't get query runner";
return false;
}
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index b65f4f85fcd..519a5989dd1 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -1,7 +1,7 @@
// group.cpp
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -147,7 +147,7 @@ namespace mongo {
}
Runner* rawRunner;
- if (!getRunner(collection, cq, &rawRunner).isOK()) {
+ if (!getRunner(txn,collection, cq, &rawRunner).isOK()) {
uasserted(17213, "Can't get runner for query " + query.toString());
return 0;
}
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 46667c1a319..f7c32549627 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -1,7 +1,7 @@
// list_collections.cpp
/**
-* Copyright (C) 2014 10gen Inc.
+* Copyright (C) 2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -83,7 +83,7 @@ namespace mongo {
b.append( "name", nsToCollectionSubstring( ns ) );
CollectionOptions options =
- dbEntry->getCollectionCatalogEntry( txn, ns )->getCollectionOptions();
+ dbEntry->getCollectionCatalogEntry( txn, ns )->getCollectionOptions(txn);
b.append( "options", options.toBSON() );
arr.append( b.obj() );
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index a55597b58b2..b319dbd9e8a 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -981,7 +981,7 @@ namespace mongo {
whereCallback).isOK());
Runner* rawRunner;
- verify(getRunner(ctx->ctx().db()->getCollection(_txn, _config.incLong),
+ verify(getRunner(_txn, ctx->ctx().db()->getCollection(_txn, _config.incLong),
cq, &rawRunner, QueryPlannerParams::NO_TABLE_SCAN).isOK());
auto_ptr<Runner> runner(rawRunner);
@@ -1324,7 +1324,7 @@ namespace mongo {
}
Runner* rawRunner;
- if (!getRunner(ctx->db()->getCollection(txn, config.ns), cq, &rawRunner).isOK()) {
+ if (!getRunner(txn, ctx->db()->getCollection(txn, config.ns), cq, &rawRunner).isOK()) {
uasserted(17239, "Can't get runner for query " + config.filter.toString());
return 0;
}
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 4f41daa71f1..c10cf053534 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -185,7 +185,7 @@ namespace mongo {
"numCursors has to be between 1 and 10000" <<
" was: " << numCursors ) );
- OwnedPointerVector<RecordIterator> iterators(collection->getManyIterators());
+ OwnedPointerVector<RecordIterator> iterators(collection->getManyIterators(txn));
if (iterators.size() < numCursors) {
numCursors = iterators.size();
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 6e5232d4c12..5033dd05da7 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2011 10gen Inc.
+ * Copyright (c) 2011-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -324,7 +324,8 @@ namespace {
// This does mongod-specific stuff like creating the input Runner and adding to the
// front of the pipeline if needed.
- boost::shared_ptr<Runner> input = PipelineD::prepareCursorSource(collection,
+ boost::shared_ptr<Runner> input = PipelineD::prepareCursorSource(txn,
+ collection,
pPipeline,
pCtx);
pPipeline->stitch();
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index a878e8222f1..904041aa648 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -1,7 +1,7 @@
// rename_collection.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -181,7 +181,7 @@ namespace mongo {
indexesInProg = stopIndexBuilds( txn, srcCtx.db(), cmdObj );
capped = sourceColl->isCapped();
if ( capped ) {
- size = sourceColl->getRecordStore()->storageSize();
+ size = sourceColl->getRecordStore()->storageSize( txn );
}
}
}
@@ -252,7 +252,7 @@ namespace mongo {
{
Client::Context srcCtx(txn, source);
sourceColl = srcCtx.db()->getCollection( txn, source );
- sourceIt.reset( sourceColl->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) );
+ sourceIt.reset( sourceColl->getIterator( txn, DiskLoc(), false, CollectionScanParams::FORWARD ) );
}
Collection* targetColl = NULL;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 8f7e91d04f0..9036445a88b 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -1,7 +1,7 @@
// test_commands.cpp
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -150,7 +150,8 @@ namespace mongo {
Collection* collection = ctx.ctx().db()->getCollection( txn, nss.ns() );
massert( 13417, "captrunc collection not found or empty", collection);
- boost::scoped_ptr<Runner> runner(InternalPlanner::collectionScan(nss.ns(),
+ boost::scoped_ptr<Runner> runner(InternalPlanner::collectionScan(txn,
+ nss.ns(),
collection,
InternalPlanner::BACKWARD));
DiskLoc end;
diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript
index f032e337bd2..97507d58197 100644
--- a/src/mongo/db/concurrency/SConscript
+++ b/src/mongo/db/concurrency/SConscript
@@ -10,7 +10,8 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/base/base',
'$BUILD_DIR/third_party/shim_boost',
- '$BUILD_DIR/mongo/foundation'
+ '$BUILD_DIR/mongo/foundation',
+ '$BUILD_DIR/mongo/server_parameters'
],
)
diff --git a/src/mongo/db/concurrency/lock_mgr.cpp b/src/mongo/db/concurrency/lock_mgr.cpp
index c25636dad52..68e161fc0a9 100644
--- a/src/mongo/db/concurrency/lock_mgr.cpp
+++ b/src/mongo/db/concurrency/lock_mgr.cpp
@@ -33,10 +33,11 @@
#include <boost/thread/locks.hpp>
#include <sstream>
+#include "mongo/base/init.h"
+#include "mongo/db/server_parameters.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
-#include "mongo/base/init.h"
using boost::unique_lock;
@@ -274,6 +275,11 @@ namespace mongo {
/*---------- LockManager public functions (mutex guarded) ---------*/
+
+ // This startup parameter enables experimental document-level locking features
+ // It should be removed once full document-level locking is checked-in.
+ MONGO_EXPORT_STARTUP_SERVER_PARAMETER(useExperimentalDocLocking, bool, false);
+
static LockManager* _singleton = NULL;
MONGO_INITIALIZER(InstantiateLockManager)(InitializerContext* context) {
@@ -379,7 +385,12 @@ namespace mongo {
}
void LockManager::acquireLock(LockRequest* lr, Notifier* notifier) {
- if (NULL == lr) return;
+ if (!useExperimentalDocLocking) {
+ return;
+ }
+
+ invariant(lr);
+
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown();
@@ -408,6 +419,10 @@ namespace mongo {
const LockMode& mode,
const ResourceId& resId,
Notifier* notifier) {
+ if (kReservedResourceId == resId || !useExperimentalDocLocking) {
+ return;
+ }
+
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown();
@@ -472,7 +487,8 @@ namespace mongo {
}
LockManager::LockStatus LockManager::releaseLock(LockRequest* lr) {
- if (NULL == lr) return kLockNotFound;
+ if (!useExperimentalDocLocking) return kLockNotFound;
+ invariant(lr);
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown(lr->requestor);
@@ -485,6 +501,10 @@ namespace mongo {
LockManager::LockStatus LockManager::release(const Transaction* holder,
const LockMode& mode,
const ResourceId& resId) {
+ if (kReservedResourceId == resId || !useExperimentalDocLocking) {
+ return kLockNotFound;
+ }
+
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown(holder);
@@ -529,7 +549,7 @@ namespace mongo {
return numLocksReleased;
}
#endif
- void LockManager::abort(Transaction* goner) {
+ void LockManager::abort(Transaction* goner) {
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown(goner);
@@ -619,6 +639,9 @@ namespace mongo {
bool LockManager::isLocked(const Transaction* holder,
const LockMode& mode,
const ResourceId& resId) const {
+ if (!useExperimentalDocLocking) {
+ return false;
+ }
{
unique_lock<boost::mutex> lk(_mutex);
_throwIfShuttingDown(holder);
diff --git a/src/mongo/db/concurrency/lock_mgr.h b/src/mongo/db/concurrency/lock_mgr.h
index b19cb528077..9a94bd33799 100644
--- a/src/mongo/db/concurrency/lock_mgr.h
+++ b/src/mongo/db/concurrency/lock_mgr.h
@@ -39,7 +39,6 @@
#include "mongo/platform/compiler.h"
#include "mongo/platform/cstdint.h"
#include "mongo/util/timer.h"
-
#include "mongo/bson/util/atomic_int.h"
/*
@@ -60,6 +59,9 @@
namespace mongo {
+ // Defined in lock_mgr.cpp
+ extern bool useExperimentalDocLocking;
+
class ResourceId {
public:
ResourceId() : _rid(0) { }
@@ -69,7 +71,7 @@ namespace mongo {
operator size_t() const { return _rid; }
private:
- size_t _rid;
+ uint64_t _rid;
};
static const ResourceId kReservedResourceId = 0;
@@ -706,29 +708,29 @@ namespace mongo {
class SharedResourceLock : public ResourceLock {
public:
- SharedResourceLock(Transaction* requestor, void* resource)
- : ResourceLock(LockManager::getSingleton(),
- requestor,
- kShared,
- (size_t)resource) { }
- SharedResourceLock(Transaction* requestor, size_t resource)
- : ResourceLock(LockManager::getSingleton(),
- requestor,
- kShared,
- resource) { }
+ SharedResourceLock(Transaction* requestor, void* resource)
+ : ResourceLock(LockManager::getSingleton(),
+ requestor,
+ kShared,
+ (size_t)resource) { }
+ SharedResourceLock(Transaction* requestor, uint64_t resource)
+ : ResourceLock(LockManager::getSingleton(),
+ requestor,
+ kShared,
+ resource) { }
};
class ExclusiveResourceLock : public ResourceLock {
public:
- ExclusiveResourceLock(Transaction* requestor, void* resource)
- : ResourceLock(LockManager::getSingleton(),
- requestor,
- kExclusive,
- (size_t)resource) { }
- ExclusiveResourceLock(Transaction* requestor, size_t resource)
- : ResourceLock(LockManager::getSingleton(),
- requestor,
- kExclusive,
- resource) { }
+ ExclusiveResourceLock(Transaction* requestor, void* resource)
+ : ResourceLock(LockManager::getSingleton(),
+ requestor,
+ kExclusive,
+ (size_t)resource) { }
+ ExclusiveResourceLock(Transaction* requestor, uint64_t resource)
+ : ResourceLock(LockManager::getSingleton(),
+ requestor,
+ kExclusive,
+ resource) { }
};
} // namespace mongo
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
index 530dc4d3879..bccc4d977d0 100644
--- a/src/mongo/db/d_concurrency.cpp
+++ b/src/mongo/db/d_concurrency.cpp
@@ -1,7 +1,7 @@
// @file d_concurrency.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -847,11 +847,4 @@ namespace mongo {
}
} lockStatsServerStatusSection;
-
-
- // This startup parameter enables experimental document-level locking features, which work
- // for update-in-place changes only (i.e., no index updates and no document growth or
- // movement). It should be removed once full document-level locking is checked-in.
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(useExperimentalDocLocking, bool, false);
-
}
diff --git a/src/mongo/db/d_concurrency.h b/src/mongo/db/d_concurrency.h
index a11351b5a84..7a93b0936ae 100644
--- a/src/mongo/db/d_concurrency.h
+++ b/src/mongo/db/d_concurrency.h
@@ -1,7 +1,7 @@
// @file d_concurrency.h
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -250,7 +250,4 @@ namespace mongo {
~writelocktry();
bool got() const { return _got; }
};
-
- // Defined in instance.cpp
- extern bool useExperimentalDocLocking;
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index b3ba3020306..612c1c28d3d 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -1,7 +1,7 @@
// @file db.cpp : Defines main() for the mongod program.
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -366,7 +366,7 @@ namespace mongo {
const string systemIndexes = ctx.db()->name() + ".system.indexes";
Collection* coll = ctx.db()->getCollection( &txn, systemIndexes );
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(systemIndexes,coll));
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn, systemIndexes,coll));
BSONObj index;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&index, NULL))) {
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 96804e2dfc0..e8bfba4218c 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1,7 +1,7 @@
// dbcommands.cpp
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -674,7 +674,7 @@ namespace mongo {
}
Runner* rawRunner;
- if (!getRunner(coll, cq, &rawRunner, QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
+ if (!getRunner(txn, coll, cq, &rawRunner, QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
uasserted(17241, "Can't get runner for query " + query.toString());
return 0;
}
@@ -797,7 +797,7 @@ namespace mongo {
result.append( "millis" , timer.millis() );
return 1;
}
- runner.reset(InternalPlanner::collectionScan(ns,collection));
+ runner.reset(InternalPlanner::collectionScan(txn, ns,collection));
}
else if ( min.isEmpty() || max.isEmpty() ) {
errmsg = "only one of min or max specified";
@@ -822,7 +822,7 @@ namespace mongo {
min = Helpers::toKeyFormat( kp.extendRangeBound( min, false ) );
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
- runner.reset(InternalPlanner::indexScan(collection, idx, min, max, false));
+ runner.reset(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
}
long long avgObjSize = collection->dataSize() / collection->numRecords();
@@ -925,12 +925,12 @@ namespace mongo {
result.append( "avgObjSize" , collection->averageObjectSize() );
result.appendNumber( "storageSize",
- static_cast<long long>(collection->getRecordStore()->storageSize( &result,
+ static_cast<long long>(collection->getRecordStore()->storageSize( txn, &result,
verbose ? 1 : 0 ) ) /
scale );
result.append( "nindexes" , collection->getIndexCatalog()->numIndexesReady() );
- collection->getRecordStore()->appendCustomStats( &result, scale );
+ collection->getRecordStore()->appendCustomStats( txn, &result, scale );
BSONObjBuilder indexSizes;
result.appendNumber( "totalIndexSize" , db->getIndexSizeForCollection(txn,
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index f0ac18ca3bc..83b5b0b7ec5 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -1,7 +1,7 @@
// dbhelpers.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -114,7 +114,7 @@ namespace mongo {
Runner* rawRunner;
size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
massert(17245, "Could not get runner for query " + query.toString(),
- getRunner(collection, cq, &rawRunner, options).isOK());
+ getRunner(txn, collection, cq, &rawRunner, options).isOK());
auto_ptr<Runner> runner(rawRunner);
Runner::RunnerState state;
@@ -156,7 +156,7 @@ namespace mongo {
BtreeBasedAccessMethod* accessMethod =
static_cast<BtreeBasedAccessMethod*>(catalog->getIndex( desc ));
- DiskLoc loc = accessMethod->findSingle( query["_id"].wrap() );
+ DiskLoc loc = accessMethod->findSingle( txn, query["_id"].wrap() );
if ( loc.isNull() )
return false;
result = collection->docFor( loc );
@@ -173,7 +173,7 @@ namespace mongo {
// See SERVER-12397. This may not always be true.
BtreeBasedAccessMethod* accessMethod =
static_cast<BtreeBasedAccessMethod*>(catalog->getIndex( desc ));
- return accessMethod->findSingle( idquery["_id"].wrap() );
+ return accessMethod->findSingle( txn, idquery["_id"].wrap() );
}
/* Get the first object from a collection. Generally only useful if the collection
@@ -183,7 +183,8 @@ namespace mongo {
*/
bool Helpers::getSingleton(OperationContext* txn, const char *ns, BSONObj& result) {
Client::Context context(txn, ns);
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(txn,
+ ns,
context.db()->getCollection(txn,
ns)));
Runner::RunnerState state = runner->getNext(&result, NULL);
@@ -194,7 +195,8 @@ namespace mongo {
bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
Client::Context ctx(txn, ns);
Collection* coll = ctx.db()->getCollection( txn, ns );
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(txn,
+ ns,
coll,
InternalPlanner::BACKWARD));
Runner::RunnerState state = runner->getNext(&result, NULL);
@@ -359,7 +361,7 @@ namespace mongo {
IndexDescriptor* desc =
collection->getIndexCatalog()->findIndexByKeyPattern( indexKeyPattern.toBSON() );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, desc, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, desc, min, max,
maxInclusive,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
@@ -520,7 +522,7 @@ namespace mongo {
bool isLargeChunk = false;
long long docCount = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
diff --git a/src/mongo/db/exec/2dcommon.cpp b/src/mongo/db/exec/2dcommon.cpp
index 7ffc110d8f6..f7c01ac3a25 100644
--- a/src/mongo/db/exec/2dcommon.cpp
+++ b/src/mongo/db/exec/2dcommon.cpp
@@ -231,8 +231,12 @@ namespace twod_exec {
// The only time these may be equal is when we actually equal the location
// itself, otherwise our expanding algorithm will fail.
// static
- bool BtreeLocation::initial(const IndexDescriptor* descriptor, const TwoDIndexingParams& params,
- BtreeLocation& min, BtreeLocation& max, GeoHash start) {
+ bool BtreeLocation::initial(OperationContext* txn,
+ const IndexDescriptor* descriptor,
+ const TwoDIndexingParams& params,
+ BtreeLocation& min,
+ BtreeLocation& max,
+ GeoHash start) {
verify(descriptor);
min._eof = false;
@@ -294,10 +298,10 @@ namespace twod_exec {
verify(maxParams.bounds.isValidFor(descriptor->keyPattern(), 1));
min._ws.reset(new WorkingSet());
- min._scan.reset(new IndexScan(minParams, min._ws.get(), NULL));
+ min._scan.reset(new IndexScan(txn, minParams, min._ws.get(), NULL));
max._ws.reset(new WorkingSet());
- max._scan.reset(new IndexScan(maxParams, max._ws.get(), NULL));
+ max._scan.reset(new IndexScan(txn, maxParams, max._ws.get(), NULL));
min.advance();
max.advance();
@@ -396,7 +400,9 @@ namespace twod_exec {
// Are we finished getting points?
bool GeoBrowse::moreToDo() { return _state != DONE; }
- bool GeoBrowse::checkAndAdvance(BtreeLocation* bl, const GeoHash& hash, int& totalFound) {
+ bool GeoBrowse::checkAndAdvance(BtreeLocation* bl,
+ const GeoHash& hash,
+ int& totalFound) {
if (bl->eof()) { return false; }
//cout << "looking at " << bl->_loc.obj().toString() << " dl " << bl->_loc.toString() << endl;
@@ -433,7 +439,7 @@ namespace twod_exec {
if(! isNeighbor)
_prefix = expandStartHash();
- if (!BtreeLocation::initial(_descriptor, _params, _min, _max, _prefix)) {
+ if (!BtreeLocation::initial(&_txn, _descriptor, _params, _min, _max, _prefix)) {
_state = isNeighbor ? DONE_NEIGHBOR : DONE;
} else {
_state = DOING_EXPAND;
diff --git a/src/mongo/db/exec/2dcommon.h b/src/mongo/db/exec/2dcommon.h
index 9283e327ee0..1d27b610a70 100644
--- a/src/mongo/db/exec/2dcommon.h
+++ b/src/mongo/db/exec/2dcommon.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -32,10 +32,14 @@
#include "mongo/db/geo/shapes.h"
#include "mongo/db/index/2d_access_method.h"
+#include "mongo/db/operation_context_noop.h"
#pragma once
namespace mongo {
+
+ class OperationContext;
+
namespace twod_exec {
//
@@ -111,7 +115,6 @@ namespace twod_exec {
struct BtreeLocation {
BtreeLocation() : _eof(false) { }
-
scoped_ptr<IndexScan> _scan;
scoped_ptr<WorkingSet> _ws;
DiskLoc _loc;
@@ -130,8 +133,12 @@ namespace twod_exec {
// Returns the min and max keys which bound a particular location.
// The only time these may be equal is when we actually equal the location
// itself, otherwise our expanding algorithm will fail.
- static bool initial(const IndexDescriptor* descriptor, const TwoDIndexingParams& params,
- BtreeLocation& min, BtreeLocation& max, GeoHash start);
+ static bool initial(OperationContext* txn,
+ const IndexDescriptor* descriptor,
+ const TwoDIndexingParams& params,
+ BtreeLocation& min,
+ BtreeLocation& max,
+ GeoHash start);
};
//
@@ -207,7 +214,9 @@ namespace twod_exec {
// Fills the stack, but only checks a maximum number of maxToCheck points at a time.
// Further calls to this function will continue the expand/check neighbors algorithm.
- virtual void fillStack(int maxToCheck, int maxToAdd = -1, bool onlyExpand = false);
+ virtual void fillStack(int maxToCheck,
+ int maxToAdd = -1,
+ bool onlyExpand = false);
bool checkAndAdvance(BtreeLocation* bl, const GeoHash& hash, int& totalFound);
@@ -273,6 +282,7 @@ namespace twod_exec {
private:
const Collection* _collection;
+ OperationContextNoop _txn;
};
} // namespace twod_exec
diff --git a/src/mongo/db/exec/2dnear.cpp b/src/mongo/db/exec/2dnear.cpp
index d1f276c233a..a42ad52d5de 100644
--- a/src/mongo/db/exec/2dnear.cpp
+++ b/src/mongo/db/exec/2dnear.cpp
@@ -40,8 +40,8 @@ namespace mongo {
// static
const char* TwoDNear::kStageType = "GEO_NEAR_2D";
- TwoDNear::TwoDNear(const TwoDNearParams& params, WorkingSet* ws)
- : _commonStats(kStageType) {
+ TwoDNear::TwoDNear(OperationContext* txn, const TwoDNearParams& params, WorkingSet* ws)
+ : _txn(txn), _commonStats(kStageType) {
_params = params;
_workingSet = ws;
_initted = false;
diff --git a/src/mongo/db/exec/2dnear.h b/src/mongo/db/exec/2dnear.h
index 265f3ff6402..2216f76c41d 100644
--- a/src/mongo/db/exec/2dnear.h
+++ b/src/mongo/db/exec/2dnear.h
@@ -64,7 +64,7 @@ namespace mongo {
class TwoDNear : public PlanStage {
public:
- TwoDNear(const TwoDNearParams& params, WorkingSet* ws);
+ TwoDNear(OperationContext* txn, const TwoDNearParams& params, WorkingSet* ws);
virtual ~TwoDNear();
virtual bool isEOF();
@@ -83,6 +83,10 @@ namespace mongo {
static const char* kStageType;
private:
+
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
WorkingSet* _workingSet;
// Stats
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 195ab21e551..d76ce92e19c 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -42,10 +42,12 @@ namespace mongo {
// static
const char* CollectionScan::kStageType = "COLLSCAN";
- CollectionScan::CollectionScan(const CollectionScanParams& params,
+ CollectionScan::CollectionScan(OperationContext* txn,
+ const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : _workingSet(workingSet),
+ : _txn(txn),
+ _workingSet(workingSet),
_filter(filter),
_params(params),
_nsDropped(false),
@@ -66,7 +68,8 @@ namespace mongo {
return PlanStage::DEAD;
}
- _iter.reset( _params.collection->getIterator( _params.start,
+ _iter.reset( _params.collection->getIterator( _txn,
+ _params.start,
_params.tailable,
_params.direction ) );
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index f752c69e4c3..fbc47701d84 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -37,6 +37,7 @@ namespace mongo {
class RecordIterator;
class WorkingSet;
+ class OperationContext;
/**
* Scans over a collection, starting at the DiskLoc provided in params and continuing until
@@ -46,7 +47,8 @@ namespace mongo {
*/
class CollectionScan : public PlanStage {
public:
- CollectionScan(const CollectionScanParams& params,
+ CollectionScan(OperationContext* txn,
+ const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter);
@@ -71,6 +73,9 @@ namespace mongo {
*/
bool diskLocInMemory(DiskLoc loc);
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// WorkingSet is not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index eeb860e9e92..5b5264e0e63 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -36,8 +36,9 @@ namespace mongo {
// static
const char* Count::kStageType = "COUNT";
- Count::Count(const CountParams& params, WorkingSet* workingSet)
- : _workingSet(workingSet),
+ Count::Count(OperationContext* txn, const CountParams& params, WorkingSet* workingSet)
+ : _txn(txn),
+ _workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_btreeCursor(NULL),
@@ -54,7 +55,7 @@ namespace mongo {
cursorOptions.direction = CursorOptions::INCREASING;
IndexCursor *cursor;
- Status s = _iam->newCursor(cursorOptions, &cursor);
+ Status s = _iam->newCursor(_txn, cursorOptions, &cursor);
verify(s.isOK());
verify(cursor);
@@ -69,7 +70,7 @@ namespace mongo {
// Create the cursor that points at our end position.
IndexCursor* endCursor;
- verify(_iam->newCursor(cursorOptions, &endCursor).isOK());
+ verify(_iam->newCursor(_txn, cursorOptions, &endCursor).isOK());
verify(endCursor);
// Is this assumption always valid? See SERVER-12397
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index a3a82ab0032..3371587c368 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -36,6 +36,7 @@
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
+#include "mongo/db/operation_context.h"
#include "mongo/platform/unordered_set.h"
namespace mongo {
@@ -67,7 +68,7 @@ namespace mongo {
*/
class Count : public PlanStage {
public:
- Count(const CountParams& params, WorkingSet* workingSet);
+ Count(OperationContext* txn, const CountParams& params, WorkingSet* workingSet);
virtual ~Count() { }
virtual StageState work(WorkingSetID* out);
@@ -95,6 +96,9 @@ namespace mongo {
*/
void checkEnd();
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 3cbd9e84d08..a55ea4e997e 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -39,8 +39,9 @@ namespace mongo {
// static
const char* DistinctScan::kStageType = "DISTINCT";
- DistinctScan::DistinctScan(const DistinctParams& params, WorkingSet* workingSet)
- : _workingSet(workingSet),
+ DistinctScan::DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet)
+ : _txn(txn),
+ _workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_btreeCursor(NULL),
@@ -60,7 +61,7 @@ namespace mongo {
}
IndexCursor *cursor;
- Status s = _iam->newCursor(cursorOptions, &cursor);
+ Status s = _iam->newCursor(_txn, cursorOptions, &cursor);
verify(s.isOK());
verify(cursor);
// Is this assumption always valid? See SERVER-12397
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 9e088da5068..d81a936937f 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -79,7 +79,7 @@ namespace mongo {
*/
class DistinctScan : public PlanStage {
public:
- DistinctScan(const DistinctParams& params, WorkingSet* workingSet);
+ DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet);
virtual ~DistinctScan() { }
virtual StageState work(WorkingSetID* out);
@@ -105,6 +105,9 @@ namespace mongo {
/** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */
void checkEnd();
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index d27f7a34b5b..e577210226f 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -40,8 +40,10 @@ namespace mongo {
// static
const char* IDHackStage::kStageType = "IDHACK";
- IDHackStage::IDHackStage(const Collection* collection, CanonicalQuery* query, WorkingSet* ws)
- : _collection(collection),
+ IDHackStage::IDHackStage(OperationContext* txn, const Collection* collection,
+ CanonicalQuery* query, WorkingSet* ws)
+ : _txn(txn),
+ _collection(collection),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()),
_query(query),
@@ -49,8 +51,10 @@ namespace mongo {
_done(false),
_commonStats(kStageType) { }
- IDHackStage::IDHackStage(Collection* collection, const BSONObj& key, WorkingSet* ws)
- : _collection(collection),
+ IDHackStage::IDHackStage(OperationContext* txn, Collection* collection,
+ const BSONObj& key, WorkingSet* ws)
+ : _txn(txn),
+ _collection(collection),
_workingSet(ws),
_key(key),
_query(NULL),
@@ -88,7 +92,7 @@ namespace mongo {
static_cast<const BtreeBasedAccessMethod*>(catalog->getIndex(idDesc));
// Look up the key by going directly to the Btree.
- DiskLoc loc = accessMethod->findSingle( _key );
+ DiskLoc loc = accessMethod->findSingle( _txn, _key );
// Key not found.
if (loc.isNull()) {
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 3f266638d9e..85cdd78abae 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -42,9 +42,11 @@ namespace mongo {
class IDHackStage : public PlanStage {
public:
/** Takes ownership of all the arguments -collection. */
- IDHackStage(const Collection* collection, CanonicalQuery* query, WorkingSet* ws);
+ IDHackStage(OperationContext* txn, const Collection* collection,
+ CanonicalQuery* query, WorkingSet* ws);
- IDHackStage(Collection* collection, const BSONObj& key, WorkingSet* ws);
+ IDHackStage(OperationContext* txn, Collection* collection,
+ const BSONObj& key, WorkingSet* ws);
virtual ~IDHackStage();
@@ -69,6 +71,9 @@ namespace mongo {
static const char* kStageType;
private:
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// Not owned here.
const Collection* _collection;
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 1982f134b8d..b4ee4706cf0 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -51,9 +51,12 @@ namespace mongo {
// static
const char* IndexScan::kStageType = "IXSCAN";
- IndexScan::IndexScan(const IndexScanParams& params, WorkingSet* workingSet,
+ IndexScan::IndexScan(OperationContext* txn,
+ const IndexScanParams& params,
+ WorkingSet* workingSet,
const MatchExpression* filter)
- : _workingSet(workingSet),
+ : _txn(txn),
+ _workingSet(workingSet),
_hitEnd(false),
_filter(filter),
_shouldDedup(true),
@@ -90,7 +93,7 @@ namespace mongo {
}
IndexCursor *cursor;
- Status s = _iam->newCursor(cursorOptions, &cursor);
+ Status s = _iam->newCursor(_txn, cursorOptions, &cursor);
verify(s.isOK());
_indexCursor.reset(cursor);
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 6b993f387ad..02b14f1e6d8 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -80,7 +80,9 @@ namespace mongo {
*/
class IndexScan : public PlanStage {
public:
- IndexScan(const IndexScanParams& params, WorkingSet* workingSet,
+ IndexScan(OperationContext* txn,
+ const IndexScanParams& params,
+ WorkingSet* workingSet,
const MatchExpression* filter);
virtual ~IndexScan() { }
@@ -108,6 +110,9 @@ namespace mongo {
/** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */
void checkEnd();
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index c569bfb8a34..93d4028ed10 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -35,8 +35,12 @@
namespace mongo {
// Does not take ownership.
- OplogStart::OplogStart(const Collection* collection, MatchExpression* filter, WorkingSet* ws)
- : _needInit(true),
+ OplogStart::OplogStart(OperationContext* txn,
+ const Collection* collection,
+ MatchExpression* filter,
+ WorkingSet* ws)
+ : _txn(txn),
+ _needInit(true),
_backwardsScanning(false),
_extentHopping(false),
_done(false),
@@ -52,7 +56,7 @@ namespace mongo {
CollectionScanParams params;
params.collection = _collection;
params.direction = CollectionScanParams::BACKWARD;
- _cs.reset(new CollectionScan(params, _workingSet, NULL));
+ _cs.reset(new CollectionScan(_txn, params, _workingSet, NULL));
_needInit = false;
_backwardsScanning = true;
_timer.reset();
@@ -105,7 +109,7 @@ namespace mongo {
_cs.reset();
// Set up our extent hopping state.
- _subIterators = _collection->getManyIterators();
+ _subIterators = _collection->getManyIterators(_txn);
}
PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index e10721a9064..5d313368532 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -60,7 +60,10 @@ namespace mongo {
class OplogStart : public PlanStage {
public:
// Does not take ownership.
- OplogStart(const Collection* collection, MatchExpression* filter, WorkingSet* ws);
+ OplogStart(OperationContext* txn,
+ const Collection* collection,
+ MatchExpression* filter,
+ WorkingSet* ws);
virtual ~OplogStart();
virtual StageState work(WorkingSetID* out);
@@ -88,6 +91,9 @@ namespace mongo {
StageState workExtentHopping(WorkingSetID* out);
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// If we're backwards scanning we just punt to a collscan.
scoped_ptr<CollectionScan> _cs;
diff --git a/src/mongo/db/exec/s2near.cpp b/src/mongo/db/exec/s2near.cpp
index c7d37111840..66babb6d832 100644
--- a/src/mongo/db/exec/s2near.cpp
+++ b/src/mongo/db/exec/s2near.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 MongoDB Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -43,8 +43,8 @@ namespace mongo {
// static
const char* S2NearStage::kStageType = "GEO_NEAR_2DSPHERE";
- S2NearStage::S2NearStage(const S2NearParams& params, WorkingSet* ws)
- : _commonStats(kStageType) {
+ S2NearStage::S2NearStage(OperationContext* txn, const S2NearParams& params, WorkingSet* ws)
+ : _txn(txn), _commonStats(kStageType) {
_initted = false;
_params = params;
_ws = ws;
@@ -282,7 +282,7 @@ namespace mongo {
// Owns geo filter.
_keyGeoFilter.reset(new GeoS2KeyMatchExpression(
&_annulus, _params.baseBounds.fields[_nearFieldIndex].name));
- IndexScan* scan = new IndexScan(params, _ws, _keyGeoFilter.get());
+ IndexScan* scan = new IndexScan(_txn, params, _ws, _keyGeoFilter.get());
// Owns 'scan'.
_child.reset(new FetchStage(_ws, scan, _params.filter, _params.collection));
diff --git a/src/mongo/db/exec/s2near.h b/src/mongo/db/exec/s2near.h
index f273b8bf992..e71f697c23e 100644
--- a/src/mongo/db/exec/s2near.h
+++ b/src/mongo/db/exec/s2near.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 MongoDB Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -43,6 +43,8 @@
namespace mongo {
+ class OperationContext;
+
struct S2NearParams {
S2NearParams() : collection(NULL) { }
Collection* collection;
@@ -63,7 +65,7 @@ namespace mongo {
* Takes: index to scan over, MatchExpression with near point, other MatchExpressions for
* covered data,
*/
- S2NearStage(const S2NearParams& params, WorkingSet* ws);
+ S2NearStage(OperationContext* txn, const S2NearParams& params, WorkingSet* ws);
virtual ~S2NearStage();
@@ -87,6 +89,9 @@ namespace mongo {
StageState addResultToQueue(WorkingSetID* out);
void nextAnnulus();
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
bool _worked;
S2NearParams _params;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index c2f6698414d..346d481d8be 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -134,7 +134,7 @@ namespace mongo {
OwnedPointerVector<MatchExpression> exprs;
auto_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* userRoot = parseQuery(collection, planObj, ws.get(), &exprs);
+ PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs);
uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot);
// Add a fetch at the top for the user so we can get obj back for sure.
@@ -153,7 +153,8 @@ namespace mongo {
return true;
}
- PlanStage* parseQuery(Collection* collection,
+ PlanStage* parseQuery(OperationContext* txn,
+ Collection* collection,
BSONObj obj,
WorkingSet* workingSet,
OwnedPointerVector<MatchExpression>* exprs) {
@@ -211,7 +212,7 @@ namespace mongo {
params.bounds.endKeyInclusive = nodeArgs["endKeyInclusive"].Bool();
params.direction = nodeArgs["direction"].numberInt();
- return new IndexScan(params, workingSet, matcher);
+ return new IndexScan(txn, params, workingSet, matcher);
}
else if ("andHash" == nodeName) {
uassert(16921, "Nodes argument must be provided to AND",
@@ -226,7 +227,7 @@ namespace mongo {
uassert(16922, "node of AND isn't an obj?: " + e.toString(),
e.isABSONObj());
- PlanStage* subNode = parseQuery(collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
uassert(16923, "Can't parse sub-node of AND: " + e.Obj().toString(),
NULL != subNode);
// takes ownership
@@ -252,7 +253,7 @@ namespace mongo {
uassert(16925, "node of AND isn't an obj?: " + e.toString(),
e.isABSONObj());
- PlanStage* subNode = parseQuery(collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
uassert(16926, "Can't parse sub-node of AND: " + e.Obj().toString(),
NULL != subNode);
// takes ownership
@@ -275,7 +276,7 @@ namespace mongo {
while (it.more()) {
BSONElement e = it.next();
if (!e.isABSONObj()) { return NULL; }
- PlanStage* subNode = parseQuery(collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
uassert(16936, "Can't parse sub-node of OR: " + e.Obj().toString(),
NULL != subNode);
// takes ownership
@@ -287,7 +288,8 @@ namespace mongo {
else if ("fetch" == nodeName) {
uassert(16929, "Node argument must be provided to fetch",
nodeArgs["node"].isABSONObj());
- PlanStage* subNode = parseQuery(collection,
+ PlanStage* subNode = parseQuery(txn,
+ collection,
nodeArgs["node"].Obj(),
workingSet,
exprs);
@@ -300,7 +302,8 @@ namespace mongo {
nodeArgs["node"].isABSONObj());
uassert(16931, "Num argument must be provided to limit",
nodeArgs["num"].isNumber());
- PlanStage* subNode = parseQuery(collection,
+ PlanStage* subNode = parseQuery(txn,
+ collection,
nodeArgs["node"].Obj(),
workingSet,
exprs);
@@ -313,7 +316,8 @@ namespace mongo {
nodeArgs["node"].isABSONObj());
uassert(16933, "Num argument must be provided to skip",
nodeArgs["num"].isNumber());
- PlanStage* subNode = parseQuery(collection,
+ PlanStage* subNode = parseQuery(txn,
+ collection,
nodeArgs["node"].Obj(),
workingSet,
exprs);
@@ -333,7 +337,7 @@ namespace mongo {
params.direction = CollectionScanParams::BACKWARD;
}
- return new CollectionScan(params, workingSet, matcher);
+ return new CollectionScan(txn, params, workingSet, matcher);
}
// sort is disabled for now.
#if 0
@@ -342,7 +346,7 @@ namespace mongo {
nodeArgs["node"].isABSONObj());
uassert(16970, "Pattern argument must be provided to sort",
nodeArgs["pattern"].isABSONObj());
- PlanStage* subNode = parseQuery(db, nodeArgs["node"].Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(txn, db, nodeArgs["node"].Obj(), workingSet, exprs);
SortStageParams params;
params.pattern = nodeArgs["pattern"].Obj();
return new SortStage(params, workingSet, subNode);
@@ -367,7 +371,7 @@ namespace mongo {
uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(),
e.isABSONObj());
- PlanStage* subNode = parseQuery(collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
uassert(16974, "Can't parse sub-node of mergeSort: " + e.Obj().toString(),
NULL != subNode);
// takes ownership
@@ -403,7 +407,7 @@ namespace mongo {
return NULL;
}
- return new TextStage(params, workingSet, matcher);
+ return new TextStage(txn, params, workingSet, matcher);
}
else {
return NULL;
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 67e631abebd..6875a53f977 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -46,11 +46,13 @@ namespace mongo {
// static
const char* SubplanStage::kStageType = "SUBPLAN";
- SubplanStage::SubplanStage(Collection* collection,
+ SubplanStage::SubplanStage(OperationContext* txn,
+ Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : _state(SubplanStage::PLANNING),
+ : _txn(txn),
+ _state(SubplanStage::PLANNING),
_collection(collection),
_ws(ws),
_plannerParams(params),
@@ -75,12 +77,13 @@ namespace mongo {
}
// static
- Status SubplanStage::make(Collection* collection,
+ Status SubplanStage::make(OperationContext* txn,
+ Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq,
SubplanStage** out) {
- auto_ptr<SubplanStage> autoStage(new SubplanStage(collection, ws, params, cq));
+ auto_ptr<SubplanStage> autoStage(new SubplanStage(txn, collection, ws, params, cq));
Status planningStatus = autoStage->planSubqueries();
if (!planningStatus.isOK()) {
return planningStatus;
@@ -262,7 +265,8 @@ namespace mongo {
// Dump all the solutions into the MPR.
for (size_t ix = 0; ix < solutions.size(); ++ix) {
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_collection,
+ verify(StageBuilder::build(_txn,
+ _collection,
*solutions[ix],
sharedWorkingSet,
&nextPlanRoot));
@@ -349,7 +353,7 @@ namespace mongo {
auto_ptr<MultiPlanStage> multiPlanStage(new MultiPlanStage(_collection, _query));
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(_collection, *soln, ws, &root));
+ verify(StageBuilder::build(_txn, _collection, *soln, ws, &root));
multiPlanStage->addPlan(soln, root, ws); // Takes ownership first two arguments.
multiPlanStage->pickBestPlan();
@@ -402,7 +406,9 @@ namespace mongo {
else if (!_killed) {
// Couldn't run as subplans so we'll just call normal getExecutor.
PlanExecutor* exec;
- Status status = getExecutorAlwaysPlan(_collection, _query, _plannerParams, &exec);
+
+ Status status = getExecutorAlwaysPlan(
+ _txn, _collection, _query, _plannerParams, &exec);
if (!status.isOK()) {
// We utterly failed.
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index 1fb6c698523..5e63d82286c 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -40,6 +40,8 @@
namespace mongo {
+ class OperationContext;
+
/**
* The SubplanStage is used for rooted $or queries. It plans each clause of the $or
* individually, and then creates an overall query plan based on the winning plan from
@@ -55,7 +57,8 @@ namespace mongo {
*
* 'out' is valid only if an OK status is returned.
*/
- static Status make(Collection* collection,
+ static Status make(OperationContext* txn,
+ Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq,
@@ -91,7 +94,8 @@ namespace mongo {
Status planSubqueries();
private:
- SubplanStage(Collection* collection,
+ SubplanStage(OperationContext* txn,
+ Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq);
@@ -103,6 +107,9 @@ namespace mongo {
RUNNING,
};
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
SubplanningState _state;
Collection* _collection;
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 50f7e3f99e4..85ddde49100 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -41,10 +41,12 @@ namespace mongo {
// static
const char* TextStage::kStageType = "TEXT";
- TextStage::TextStage(const TextStageParams& params,
+ TextStage::TextStage(OperationContext* txn,
+ const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : _params(params),
+ : _txn(txn),
+ _params(params),
_ftsMatcher(params.query, params.spec),
_ws(ws),
_filter(filter),
@@ -180,7 +182,7 @@ namespace mongo {
params.bounds.isSimpleRange = true;
params.descriptor = _params.index;
params.direction = -1;
- _scanners.mutableVector().push_back(new IndexScan(params, _ws, NULL));
+ _scanners.mutableVector().push_back(new IndexScan(_txn, params, _ws, NULL));
}
// If we have no terms we go right to EOF.
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 851d3cdada7..48f4c595b27 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -52,6 +52,8 @@ namespace mongo {
using fts::FTSSpec;
using fts::MAX_WEIGHT;
+ class OperationContext;
+
struct TextStageParams {
TextStageParams(const FTSSpec& s) : spec(s) {}
@@ -93,7 +95,10 @@ namespace mongo {
DONE,
};
- TextStage(const TextStageParams& params, WorkingSet* ws, const MatchExpression* filter);
+ TextStage(OperationContext* txn,
+ const TextStageParams& params,
+ WorkingSet* ws,
+ const MatchExpression* filter);
virtual ~TextStage();
@@ -137,6 +142,9 @@ namespace mongo {
*/
StageState returnResults(WorkingSetID* out);
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// Parameters of this text stage.
TextStageParams _params;
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index 4c0915df8db..a2c4b35e165 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -1,7 +1,7 @@
// fts_command_mongod.h
/**
-* Copyright (C) 2012 10gen Inc.
+* Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -113,7 +113,7 @@ namespace mongo {
}
Runner* rawRunner;
- Status getRunnerStatus = getRunner(ctx.ctx().db()->getCollection(txn, ns), cq, &rawRunner);
+ Status getRunnerStatus = getRunner(txn, ctx.ctx().db()->getCollection(txn, ns), cq, &rawRunner);
if (!getRunnerStatus.isOK()) {
errmsg = getRunnerStatus.reason();
return false;
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 27cc3cb0633..ad8b06f00eb 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2008-2013 10gen Inc.
+ * Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -110,7 +110,7 @@ namespace mongo {
IndexDescriptor* desc = idxs[0];
HaystackAccessMethod* ham =
static_cast<HaystackAccessMethod*>( collection->getIndexCatalog()->getIndex(desc) );
- ham->searchCommand(collection, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
+ ham->searchCommand(txn, collection, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
&result, limit);
return 1;
}
diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp
index 0e09b1cb98a..664aa7de94c 100644
--- a/src/mongo/db/index/btree_based_access_method.cpp
+++ b/src/mongo/db/index/btree_based_access_method.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -143,8 +143,8 @@ namespace mongo {
return ret;
}
- Status BtreeBasedAccessMethod::newCursor(const CursorOptions& opts, IndexCursor** out) const {
- *out = new BtreeIndexCursor(_newInterface->newCursor(opts.direction));
+ Status BtreeBasedAccessMethod::newCursor(OperationContext* txn, const CursorOptions& opts, IndexCursor** out) const {
+ *out = new BtreeIndexCursor(_newInterface->newCursor(txn, opts.direction));
return Status::OK();
}
@@ -197,11 +197,11 @@ namespace mongo {
return _newInterface->initAsEmpty(txn);
}
- Status BtreeBasedAccessMethod::touch(const BSONObj& obj) {
+ Status BtreeBasedAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
BSONObjSet keys;
getKeys(obj, &keys);
- boost::scoped_ptr<BtreeInterface::Cursor> cursor(_newInterface->newCursor(1));
+ boost::scoped_ptr<BtreeInterface::Cursor> cursor(_newInterface->newCursor(txn, 1));
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
cursor->locate(*i, DiskLoc());
}
@@ -214,8 +214,8 @@ namespace mongo {
return _newInterface->touch(txn);
}
- DiskLoc BtreeBasedAccessMethod::findSingle(const BSONObj& key) const {
- boost::scoped_ptr<BtreeInterface::Cursor> cursor(_newInterface->newCursor(1));
+ DiskLoc BtreeBasedAccessMethod::findSingle( OperationContext* txn, const BSONObj& key) const {
+ boost::scoped_ptr<BtreeInterface::Cursor> cursor(_newInterface->newCursor(txn, 1));
cursor->locate(key, minDiskLoc);
// A null bucket means the key wasn't found (nor was anything found after it).
@@ -233,15 +233,16 @@ namespace mongo {
return cursor->getDiskLoc();
}
- Status BtreeBasedAccessMethod::validate(int64_t* numKeys) {
+ Status BtreeBasedAccessMethod::validate(OperationContext* txn, int64_t* numKeys) {
// XXX: long long vs int64_t
long long keys;
- _newInterface->fullValidate(&keys);
+ _newInterface->fullValidate(txn, &keys);
*numKeys = keys;
return Status::OK();
}
- Status BtreeBasedAccessMethod::validateUpdate(const BSONObj &from,
+ Status BtreeBasedAccessMethod::validateUpdate(OperationContext* txn,
+ const BSONObj &from,
const BSONObj &to,
const DiskLoc &record,
const InsertDeleteOptions &options,
@@ -264,7 +265,7 @@ namespace mongo {
if (checkForDups) {
for (vector<BSONObj*>::iterator i = data->added.begin(); i != data->added.end(); i++) {
- Status check = _newInterface->dupKeyCheck(**i, record);
+ Status check = _newInterface->dupKeyCheck(txn, **i, record);
if (!check.isOK()) {
status->_isValid = false;
return check;
diff --git a/src/mongo/db/index/btree_based_access_method.h b/src/mongo/db/index/btree_based_access_method.h
index 5b1aa351ae1..710a323b60a 100644
--- a/src/mongo/db/index/btree_based_access_method.h
+++ b/src/mongo/db/index/btree_based_access_method.h
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -74,7 +74,8 @@ namespace mongo {
const InsertDeleteOptions& options,
int64_t* numDeleted);
- virtual Status validateUpdate(const BSONObj& from,
+ virtual Status validateUpdate(OperationContext* txn,
+ const BSONObj& from,
const BSONObj& to,
const DiskLoc& loc,
const InsertDeleteOptions& options,
@@ -84,7 +85,9 @@ namespace mongo {
const UpdateTicket& ticket,
int64_t* numUpdated);
- virtual Status newCursor(const CursorOptions& opts, IndexCursor** out) const;
+ virtual Status newCursor(OperationContext* txn,
+ const CursorOptions& opts,
+ IndexCursor** out) const;
virtual Status initializeAsEmpty(OperationContext* txn);
@@ -94,14 +97,14 @@ namespace mongo {
bool mayInterrupt,
std::set<DiskLoc>* dups );
- virtual Status touch(const BSONObj& obj);
+ virtual Status touch(OperationContext* txn, const BSONObj& obj);
virtual Status touch(OperationContext* txn) const;
- virtual Status validate(int64_t* numKeys);
+ virtual Status validate(OperationContext* txn, int64_t* numKeys);
// XXX: consider migrating callers to use IndexCursor instead
- virtual DiskLoc findSingle( const BSONObj& key ) const;
+ virtual DiskLoc findSingle( OperationContext* txn, const BSONObj& key ) const;
/**
* Invalidates all active cursors, which point at the bucket being deleted.
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.h b/src/mongo/db/index/btree_based_bulk_access_method.h
index 88dad32ea04..0c766aad1ec 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.h
+++ b/src/mongo/db/index/btree_based_bulk_access_method.h
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -75,7 +75,7 @@ namespace mongo {
return Status::OK();
}
- virtual Status touch(const BSONObj& obj) {
+ virtual Status touch(OperationContext* txn, const BSONObj& obj) {
return _notAllowed();
}
@@ -83,7 +83,7 @@ namespace mongo {
return _notAllowed();
}
- virtual Status validate(int64_t* numKeys) {
+ virtual Status validate(OperationContext* txn, int64_t* numKeys) {
return _notAllowed();
}
@@ -95,7 +95,8 @@ namespace mongo {
return _notAllowed();
}
- virtual Status validateUpdate(const BSONObj& from,
+ virtual Status validateUpdate(OperationContext* txn,
+ const BSONObj& from,
const BSONObj& to,
const DiskLoc& loc,
const InsertDeleteOptions& options,
@@ -109,7 +110,9 @@ namespace mongo {
return _notAllowed();
}
- virtual Status newCursor(const CursorOptions& opts, IndexCursor** out) const {
+ virtual Status newCursor(OperationContext*txn,
+ const CursorOptions& opts,
+ IndexCursor** out) const {
return _notAllowed();
}
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 44492bc87a2..a2fbe010a3f 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -61,7 +61,7 @@ namespace mongo {
ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
}
- void HaystackAccessMethod::searchCommand(Collection* collection,
+ void HaystackAccessMethod::searchCommand(OperationContext* txn, Collection* collection,
const BSONObj& nearObj, double maxDistance,
const BSONObj& search, BSONObjBuilder* result,
unsigned limit) {
@@ -100,7 +100,7 @@ namespace mongo {
unordered_set<DiskLoc, DiskLoc::Hasher> thisPass;
- scoped_ptr<Runner> runner(InternalPlanner::indexScan(collection,
+ scoped_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection,
_descriptor, key, key, true));
Runner::RunnerState state;
DiskLoc loc;
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
index 5ce9d787450..ebab7177a59 100644
--- a/src/mongo/db/index/haystack_access_method.h
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -36,6 +36,7 @@
namespace mongo {
class Collection;
+ class OperationContext;
/**
* Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
@@ -62,7 +63,7 @@ namespace mongo {
protected:
friend class GeoHaystackSearchCommand;
- void searchCommand(Collection* collection,
+ void searchCommand(OperationContext* txn, Collection* collection,
const BSONObj& nearObj, double maxDistance, const BSONObj& search,
BSONObjBuilder* result, unsigned limit);
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 90bed6811d1..8cef5a5ef29 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -91,7 +91,8 @@ namespace mongo {
*
* There is no obligation to perform the update after performing validation.
*/
- virtual Status validateUpdate(const BSONObj& from,
+ virtual Status validateUpdate(OperationContext* txn,
+ const BSONObj& from,
const BSONObj& to,
const DiskLoc& loc,
const InsertDeleteOptions& options,
@@ -113,7 +114,7 @@ namespace mongo {
* Fills in '*out' with an IndexCursor. Return a status indicating success or reason of
* failure. If the latter, '*out' contains NULL. See index_cursor.h for IndexCursor usage.
*/
- virtual Status newCursor(const CursorOptions& opts, IndexCursor** out) const = 0;
+ virtual Status newCursor(OperationContext* txn, const CursorOptions& opts, IndexCursor** out) const = 0;
// ------ index level operations ------
@@ -131,12 +132,12 @@ namespace mongo {
* appropriate pages are not swapped out.
* See prefetch.cpp.
*/
- virtual Status touch(const BSONObj& obj) = 0;
+ virtual Status touch(OperationContext* txn, const BSONObj& obj) = 0;
/**
* this pages in the entire index
*/
- virtual Status touch( OperationContext* txn ) const = 0;
+ virtual Status touch(OperationContext* txn) const = 0;
/**
* Walk the entire index, checking the internal structure for consistency.
@@ -147,7 +148,7 @@ namespace mongo {
* Currently wasserts that the index is invalid. This could/should be changed in
* the future to return a Status.
*/
- virtual Status validate(int64_t* numKeys) = 0;
+ virtual Status validate(OperationContext* txn, int64_t* numKeys) = 0;
//
// Bulk operations support
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index 2d776984236..e7eb2a9546c 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -118,11 +118,12 @@ namespace mongo {
Runner* rawRunner;
if (_canonicalQuery.get()) {
- uassertStatusOK(getRunner(collection, _canonicalQuery.release(), &rawRunner));
+ uassertStatusOK(getRunner(txn, collection, _canonicalQuery.release(), &rawRunner));
}
else {
CanonicalQuery* ignored;
- uassertStatusOK(getRunner(collection,
+ uassertStatusOK(getRunner(txn,
+ collection,
ns.ns(),
_request->getQuery(),
&rawRunner,
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index b39008a1bef..e675fd142ac 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -1,7 +1,7 @@
//@file update.cpp
/**
- * Copyright (C) 2008 10gen Inc.
+ * Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -457,8 +457,8 @@ namespace mongo {
Runner* rawRunner;
Status status = cq ?
- getRunner(collection, cqHolder.release(), &rawRunner) :
- getRunner(collection, nsString.ns(), request.getQuery(), &rawRunner, &cq);
+ getRunner(txn, collection, cqHolder.release(), &rawRunner) :
+ getRunner(txn, collection, nsString.ns(), request.getQuery(), &rawRunner, &cq);
uassert(17243,
"could not get runner " + request.getQuery().toString() + "; " + causedBy(status),
status.isOK());
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 6f62767f999..1f6f93f1274 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2012 10gen Inc.
+ * Copyright (c) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -75,6 +75,7 @@ namespace {
}
boost::shared_ptr<Runner> PipelineD::prepareCursorSource(
+ OperationContext* txn,
Collection* collection,
const intrusive_ptr<Pipeline>& pPipeline,
const intrusive_ptr<ExpressionContext>& pExpCtx) {
@@ -178,7 +179,7 @@ namespace {
&cq,
whereCallback);
Runner* rawRunner;
- if (status.isOK() && getRunner(collection, cq, &rawRunner, runnerOptions).isOK()) {
+ if (status.isOK() && getRunner(txn, collection, cq, &rawRunner, runnerOptions).isOK()) {
// success: The Runner will handle sorting for us using an index.
runner.reset(rawRunner);
sortInRunner = true;
@@ -203,7 +204,7 @@ namespace {
whereCallback));
Runner* rawRunner;
- uassertStatusOK(getRunner(collection, cq, &rawRunner, runnerOptions));
+ uassertStatusOK(getRunner(txn, collection, cq, &rawRunner, runnerOptions));
runner.reset(rawRunner);
}
diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h
index b6b34e6f378..b9d85a69dbb 100644
--- a/src/mongo/db/pipeline/pipeline_d.h
+++ b/src/mongo/db/pipeline/pipeline_d.h
@@ -1,5 +1,5 @@
/**
- * Copyright 2012 (c) 10gen Inc.
+ * Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -34,6 +34,7 @@ namespace mongo {
class Collection;
class DocumentSourceCursor;
struct ExpressionContext;
+ class OperationContext;
class Pipeline;
class Runner;
@@ -71,6 +72,7 @@ namespace mongo {
* @param pExpCtx the expression context for this pipeline
*/
static boost::shared_ptr<Runner> prepareCursorSource(
+ OperationContext* txn,
Collection* collection,
const intrusive_ptr<Pipeline> &pPipeline,
const intrusive_ptr<ExpressionContext> &pExpCtx);
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 88ba08716e7..fe2500f4b16 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -58,7 +58,8 @@ namespace mongo {
"repl.preload.docs",
&prefetchDocStats );
- void prefetchIndexPages(Collection* collection,
+ void prefetchIndexPages(OperationContext* txn,
+ Collection* collection,
const repl::ReplSetImpl::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj);
@@ -111,7 +112,7 @@ namespace mongo {
// a way to achieve that would be to prefetch the record first, and then afterwards do
// this part.
//
- prefetchIndexPages(collection, prefetchConfig, obj);
+ prefetchIndexPages(txn, collection, prefetchConfig, obj);
// do not prefetch the data for inserts; it doesn't exist yet
//
@@ -128,7 +129,8 @@ namespace mongo {
}
// page in pages needed for all index lookups on a given object
- void prefetchIndexPages(Collection* collection,
+ void prefetchIndexPages(OperationContext* txn,
+ Collection* collection,
const repl::ReplSetImpl::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
DiskLoc unusedDl; // unused
@@ -151,7 +153,7 @@ namespace mongo {
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
verify( iam );
- iam->touch(obj);
+ iam->touch(txn, obj);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
@@ -170,7 +172,7 @@ namespace mongo {
IndexDescriptor* desc = ii.next();
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
verify( iam );
- iam->touch(obj);
+ iam->touch(txn, obj);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 32d144466f5..bf4b5b4742c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -38,6 +38,7 @@
namespace mongo {
class Collection;
+ class OperationContext;
// Temporarily hide the new explain implementation behind a setParameter.
// TODO: take this out, and make the new implementation the default.
@@ -131,7 +132,6 @@ namespace mongo {
* are no stages, this requires a special explain format.
*/
static void explainCountEmptyQuery(BSONObjBuilder* out);
-
};
} // namespace
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 74ec3f62f38..2a547a465c9 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -162,7 +162,8 @@ namespace mongo {
plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
}
- Status getExecutorIDHack(Collection* collection,
+ Status getExecutorIDHack(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* query,
const QueryPlannerParams& plannerParams,
PlanExecutor** out) {
@@ -170,7 +171,7 @@ namespace mongo {
LOG(2) << "Using idhack: " << query->toStringShort();
WorkingSet* ws = new WorkingSet();
- PlanStage* root = new IDHackStage(collection, query, ws);
+ PlanStage* root = new IDHackStage(txn, collection, query, ws);
// Might have to filter out orphaned docs.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
@@ -201,7 +202,8 @@ namespace mongo {
return Status::OK();
}
- Status getExecutor(Collection* collection,
+ Status getExecutor(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
PlanExecutor** out,
size_t plannerOptions) {
@@ -226,7 +228,7 @@ namespace mongo {
// If we have an _id index we can use the idhack runner.
if (IDHackStage::supportsQuery(*canonicalQuery) &&
collection->getIndexCatalog()->findIdIndex()) {
- return getExecutorIDHack(collection, canonicalQuery, plannerParams, out);
+ return getExecutorIDHack(txn, collection, canonicalQuery, plannerParams, out);
}
// Tailable: If the query requests tailable the collection must be capped.
@@ -267,7 +269,7 @@ namespace mongo {
WorkingSet* sharedWs = new WorkingSet();
PlanStage *root, *backupRoot=NULL;
- verify(StageBuilder::build(collection, *qs, sharedWs, &root));
+ verify(StageBuilder::build(txn, collection, *qs, sharedWs, &root));
if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT)
&& turnIxscanIntoCount(qs)) {
LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
@@ -278,7 +280,7 @@ namespace mongo {
}
}
else if (NULL != backupQs) {
- verify(StageBuilder::build(collection, *backupQs, sharedWs, &backupRoot));
+ verify(StageBuilder::build(txn, collection, *backupQs, sharedWs, &backupRoot));
}
// add a CachedPlanStage on top of the previous root
@@ -298,7 +300,7 @@ namespace mongo {
auto_ptr<WorkingSet> ws(new WorkingSet());
SubplanStage* subplan;
- Status runnerStatus = SubplanStage::make(collection, ws.get(), plannerParams,
+ Status runnerStatus = SubplanStage::make(txn, collection, ws.get(), plannerParams,
canonicalQuery, &subplan);
if (!runnerStatus.isOK()) {
return runnerStatus;
@@ -308,10 +310,11 @@ namespace mongo {
return Status::OK();
}
- return getExecutorAlwaysPlan(collection, canonicalQuery, plannerParams, out);
+ return getExecutorAlwaysPlan(txn, collection, canonicalQuery, plannerParams, out);
}
- Status getExecutorAlwaysPlan(Collection* collection,
+ Status getExecutorAlwaysPlan(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
const QueryPlannerParams& plannerParams,
PlanExecutor** execOut) {
@@ -355,7 +358,8 @@ namespace mongo {
// We're not going to cache anything that's fast count.
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[i], ws, &root));
+
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
*execOut = new PlanExecutor(ws, root, solutions[i], collection);
return Status::OK();
@@ -371,7 +375,8 @@ namespace mongo {
// Only one possible plan. Run it. Build the stages from the solution.
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[0], ws, &root));
+
+ verify(StageBuilder::build(txn, collection, *solutions[0], ws, &root));
*execOut = new PlanExecutor(ws, root, solutions[0], collection);
return Status::OK();
@@ -391,7 +396,7 @@ namespace mongo {
// version of StageBuild::build when WorkingSet is shared
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(collection, *solutions[ix],
+ verify(StageBuilder::build(txn, collection, *solutions[ix],
sharedWorkingSet, &nextPlanRoot));
// Owns none of the arguments
@@ -581,7 +586,8 @@ namespace mongo {
} // namespace
- Status getExecutorCount(Collection* collection,
+ Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const BSONObj& hintObj,
PlanExecutor** execOut) {
@@ -602,7 +608,7 @@ namespace mongo {
scoped_ptr<CanonicalQuery> cleanupCq(cq);
- return getExecutor(collection, cq, execOut, QueryPlannerParams::PRIVATE_IS_COUNT);
+ return getExecutor(txn, collection, cq, execOut, QueryPlannerParams::PRIVATE_IS_COUNT);
}
//
@@ -658,7 +664,8 @@ namespace mongo {
return false;
}
- Status getExecutorDistinct(Collection* collection,
+ Status getExecutorDistinct(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const std::string& field,
PlanExecutor** out) {
@@ -709,7 +716,7 @@ namespace mongo {
scoped_ptr<CanonicalQuery> cleanupCq(cq);
// Does not take ownership of its args.
- return getExecutor(collection, cq, out);
+ return getExecutor(txn, collection, cq, out);
}
//
@@ -758,7 +765,7 @@ namespace mongo {
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *soln, ws, &root));
+ verify(StageBuilder::build(txn, collection, *soln, ws, &root));
// Takes ownership of 'ws', 'root', and 'soln'.
*out = new PlanExecutor(ws, root, soln, collection);
return Status::OK();
@@ -768,7 +775,7 @@ namespace mongo {
vector<QuerySolution*> solutions;
status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
- return getExecutor(collection, cq, out);
+ return getExecutor(txn, collection, cq, out);
}
// We look for a solution that has an ixscan we can turn into a distinctixscan
@@ -787,7 +794,7 @@ namespace mongo {
// Build and return the SSR over solutions[i].
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[i], ws, &root));
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
// Takes ownership of 'ws', 'root', and 'solutions[i]'.
*out = new PlanExecutor(ws, root, solutions[i], collection);
return Status::OK();
@@ -810,7 +817,7 @@ namespace mongo {
cleanupCq.reset(cq);
// Does not take ownership.
- return getExecutor(collection, cq, out);
+ return getExecutor(txn, collection, cq, out);
}
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index fc78e302709..f1eb40db2db 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -62,7 +62,8 @@ namespace mongo {
* If the query cannot be executed, returns a Status indicating why. Deletes
* rawCanonicalQuery.
*/
- Status getExecutor(Collection* collection,
+ Status getExecutor(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
PlanExecutor** out,
size_t plannerOptions = 0);
@@ -94,7 +95,8 @@ namespace mongo {
* possible values of a certain field. As such, we can skip lots of data in certain cases (see
* body of method for detail).
*/
- Status getExecutorDistinct(Collection* collection,
+ Status getExecutorDistinct(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const std::string& field,
PlanExecutor** out);
@@ -106,7 +108,8 @@ namespace mongo {
* As such, with certain covered queries, we can skip the overhead of fetching etc. when
* executing a count.
*/
- Status getExecutorCount(Collection* collection,
+ Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const BSONObj& hintObj,
PlanExecutor** execOut);
@@ -119,7 +122,8 @@ namespace mongo {
* Returns the resulting executor through 'execOut'. The caller must delete 'execOut',
* if an OK status is returned.
*/
- Status getExecutorAlwaysPlan(Collection* collection,
+ Status getExecutorAlwaysPlan(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* canonicalQuery,
const QueryPlannerParams& plannerParams,
PlanExecutor** execOut);
diff --git a/src/mongo/db/query/get_runner.cpp b/src/mongo/db/query/get_runner.cpp
index 3e027d60cff..429cbc15d33 100644
--- a/src/mongo/db/query/get_runner.cpp
+++ b/src/mongo/db/query/get_runner.cpp
@@ -69,7 +69,8 @@ namespace mongo {
MONGO_LOG_DEFAULT_COMPONENT_FILE(::mongo::logger::LogComponent::kQuery);
- Status getRunner(Collection* collection,
+ Status getRunner(OperationContext* txn,
+ Collection* collection,
const std::string& ns,
const BSONObj& unparsedQuery,
Runner** outRunner,
@@ -92,13 +93,13 @@ namespace mongo {
collection->ns(), unparsedQuery, outCanonicalQuery, whereCallback);
if (!status.isOK())
return status;
- return getRunner(collection, *outCanonicalQuery, outRunner, plannerOptions);
+ return getRunner(txn, collection, *outCanonicalQuery, outRunner, plannerOptions);
}
LOG(2) << "Using idhack: " << unparsedQuery.toString();
*outCanonicalQuery = NULL;
- *outRunner = new IDHackRunner(collection, unparsedQuery["_id"].wrap());
+ *outRunner = new IDHackRunner(txn, collection, unparsedQuery["_id"].wrap());
return Status::OK();
}
@@ -111,7 +112,8 @@ namespace mongo {
/**
* For a given query, get a runner.
*/
- Status getRunner(Collection* collection,
+ Status getRunner(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* rawCanonicalQuery,
Runner** out,
size_t plannerOptions) {
@@ -132,7 +134,7 @@ namespace mongo {
if (IDHackStage::supportsQuery(*canonicalQuery) &&
collection->getIndexCatalog()->findIdIndex()) {
LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
- *out = new IDHackRunner(collection, canonicalQuery.release());
+ *out = new IDHackRunner(txn, collection, canonicalQuery.release());
return Status::OK();
}
@@ -179,7 +181,7 @@ namespace mongo {
WorkingSet* sharedWs = new WorkingSet();
PlanStage *root, *backupRoot=NULL;
- verify(StageBuilder::build(collection, *qs, sharedWs, &root));
+ verify(StageBuilder::build(txn, collection, *qs, sharedWs, &root));
if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT)
&& turnIxscanIntoCount(qs)) {
LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
@@ -190,7 +192,7 @@ namespace mongo {
}
}
else if (NULL != backupQs) {
- verify(StageBuilder::build(collection, *backupQs, sharedWs, &backupRoot));
+ verify(StageBuilder::build(txn, collection, *backupQs, sharedWs, &backupRoot));
}
// add a CachedPlanStage on top of the previous root
@@ -210,7 +212,7 @@ namespace mongo {
LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
SubplanRunner* runner;
- Status runnerStatus = SubplanRunner::make(collection, plannerParams,
+ Status runnerStatus = SubplanRunner::make(txn, collection, plannerParams,
canonicalQuery.release(), &runner);
if (!runnerStatus.isOK()) {
return runnerStatus;
@@ -220,10 +222,11 @@ namespace mongo {
return Status::OK();
}
- return getRunnerAlwaysPlan(collection, canonicalQuery.release(), plannerParams, out);
+ return getRunnerAlwaysPlan(txn, collection, canonicalQuery.release(), plannerParams, out);
}
- Status getRunnerAlwaysPlan(Collection* collection,
+ Status getRunnerAlwaysPlan(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* rawCanonicalQuery,
const QueryPlannerParams& plannerParams,
Runner** out) {
@@ -266,7 +269,7 @@ namespace mongo {
// We're not going to cache anything that's fast count.
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[i], ws, &root));
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
*out = new SingleSolutionRunner(collection,
canonicalQuery.release(),
solutions[i],
@@ -285,7 +288,7 @@ namespace mongo {
// Only one possible plan. Run it. Build the stages from the solution.
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[0], ws, &root));
+ verify(StageBuilder::build(txn, collection, *solutions[0], ws, &root));
// And, run the plan.
*out = new SingleSolutionRunner(collection,
@@ -310,7 +313,7 @@ namespace mongo {
// version of StageBuild::build when WorkingSet is shared
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(collection, *solutions[ix],
+ verify(StageBuilder::build(txn, collection, *solutions[ix],
sharedWorkingSet, &nextPlanRoot));
// Owns none of the arguments
@@ -506,7 +509,8 @@ namespace mongo {
} // namespace
- Status getRunnerCount(Collection* collection,
+ Status getRunnerCount(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const BSONObj& hintObj,
Runner** out) {
@@ -525,14 +529,15 @@ namespace mongo {
&cq,
whereCallback));
- return getRunner(collection, cq, out, QueryPlannerParams::PRIVATE_IS_COUNT);
+ return getRunner(txn, collection, cq, out, QueryPlannerParams::PRIVATE_IS_COUNT);
}
//
// Distinct hack
//
- Status getRunnerDistinct(Collection* collection,
+ Status getRunnerDistinct(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const string& field,
Runner** out) {
@@ -581,7 +586,7 @@ namespace mongo {
}
// Takes ownership of cq.
- return getRunner(collection, cq, out);
+ return getRunner(txn, collection, cq, out);
}
//
@@ -628,7 +633,7 @@ namespace mongo {
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *soln, ws, &root));
+ verify(StageBuilder::build(txn, collection, *soln, ws, &root));
*out = new SingleSolutionRunner(collection, cq, soln, root, ws);
return Status::OK();
}
@@ -637,7 +642,7 @@ namespace mongo {
vector<QuerySolution*> solutions;
status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
- return getRunner(collection, cq, out);
+ return getRunner(txn, collection, cq, out);
}
// We look for a solution that has an ixscan we can turn into a distinctixscan
@@ -656,7 +661,7 @@ namespace mongo {
// Build and return the SSR over solutions[i].
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(collection, *solutions[i], ws, &root));
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
*out = new SingleSolutionRunner(collection, cq, solutions[i], root, ws);
return Status::OK();
}
@@ -677,7 +682,7 @@ namespace mongo {
}
// Takes ownership of cq.
- return getRunner(collection, cq, out);
+ return getRunner(txn, collection, cq, out);
}
ScopedRunnerRegistration::ScopedRunnerRegistration(Runner* runner)
diff --git a/src/mongo/db/query/get_runner.h b/src/mongo/db/query/get_runner.h
index eabc78d3bdd..2d9dd75c88e 100644
--- a/src/mongo/db/query/get_runner.h
+++ b/src/mongo/db/query/get_runner.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -37,6 +37,7 @@
namespace mongo {
class Collection;
+ class OperationContext;
/**
* Get a runner for a query. Takes ownership of rawCanonicalQuery.
@@ -47,7 +48,8 @@ namespace mongo {
* If the query cannot be executed, returns a Status indicating why. Deletes
* rawCanonicalQuery.
*/
- Status getRunner(Collection* collection,
+ Status getRunner(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* rawCanonicalQuery,
Runner** out,
size_t plannerOptions = 0);
@@ -63,7 +65,8 @@ namespace mongo {
* the returned runner. On failure, returns other status values, and '*outRunner' and
* '*outCanonicalQuery' have unspecified values.
*/
- Status getRunner(Collection* collection,
+ Status getRunner(OperationContext* txn,
+ Collection* collection,
const std::string& ns,
const BSONObj& unparsedQuery,
Runner** outRunner,
@@ -76,7 +79,8 @@ namespace mongo {
* possible values of a certain field. As such, we can skip lots of data in certain cases (see
* body of method for detail).
*/
- Status getRunnerDistinct(Collection* collection,
+ Status getRunnerDistinct(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const std::string& field,
Runner** out);
@@ -88,7 +92,8 @@ namespace mongo {
* As such, with certain covered queries, we can skip the overhead of fetching etc. when
* executing a count.
*/
- Status getRunnerCount(Collection* collection,
+ Status getRunnerCount(OperationContext* txn,
+ Collection* collection,
const BSONObj& query,
const BSONObj& hintObj,
Runner** out);
@@ -96,7 +101,8 @@ namespace mongo {
/**
* Get a runner for a query. Ignores the cache and always plans the full query.
*/
- Status getRunnerAlwaysPlan(Collection* collection,
+ Status getRunnerAlwaysPlan(OperationContext* txn,
+ Collection* collection,
CanonicalQuery* rawCanonicalQuery,
const QueryPlannerParams& plannerParams,
Runner** out);
diff --git a/src/mongo/db/query/idhack_runner.cpp b/src/mongo/db/query/idhack_runner.cpp
index b9c0da6f82b..5382a4e4bfa 100644
--- a/src/mongo/db/query/idhack_runner.cpp
+++ b/src/mongo/db/query/idhack_runner.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright 2013 MongoDB Inc.
+ * Copyright 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -43,8 +43,11 @@
namespace mongo {
- IDHackRunner::IDHackRunner(const Collection* collection, CanonicalQuery* query)
- : _collection(collection),
+ IDHackRunner::IDHackRunner(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* query)
+ : _txn(txn),
+ _collection(collection),
_key(query->getQueryObj()["_id"].wrap()),
_query(query),
_killed(false),
@@ -52,8 +55,9 @@ namespace mongo {
_nscanned(0),
_nscannedObjects(0) { }
- IDHackRunner::IDHackRunner(Collection* collection, const BSONObj& key)
- : _collection(collection),
+ IDHackRunner::IDHackRunner(OperationContext* txn, Collection* collection, const BSONObj& key)
+ : _txn(txn),
+ _collection(collection),
_key(key),
_query(NULL),
_killed(false),
@@ -80,7 +84,7 @@ namespace mongo {
static_cast<const BtreeBasedAccessMethod*>(catalog->getIndex(idDesc));
// Look up the key by going directly to the Btree.
- DiskLoc loc = accessMethod->findSingle( _key );
+ DiskLoc loc = accessMethod->findSingle(_txn, _key );
// Key not found.
if (loc.isNull()) {
diff --git a/src/mongo/db/query/idhack_runner.h b/src/mongo/db/query/idhack_runner.h
index 23e2d02691c..a6747a6d83b 100644
--- a/src/mongo/db/query/idhack_runner.h
+++ b/src/mongo/db/query/idhack_runner.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -50,10 +50,14 @@ namespace mongo {
class IDHackRunner : public Runner {
public:
- /** Takes ownership of all the arguments -collection. */
- IDHackRunner(const Collection* collection, CanonicalQuery* query);
+ /** Takes ownership of all the arguments -txn, -collection. */
+ IDHackRunner(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* query);
- IDHackRunner(Collection* collection, const BSONObj& key);
+ IDHackRunner(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& key);
virtual ~IDHackRunner();
@@ -88,6 +92,9 @@ namespace mongo {
*/
BSONObj applyProjection(const BSONObj& docObj) const;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// Not owned here.
const Collection* _collection;
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index c6a576d80f9..dae7af1c521 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -38,6 +38,8 @@
namespace mongo {
+ class OperationContext;
+
/**
* The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
* that do not require advanced queries could be served by plans already in here.
@@ -62,7 +64,8 @@ namespace mongo {
/**
* Return a collection scan. Caller owns pointer.
*/
- static Runner* collectionScan(const StringData& ns,
+ static Runner* collectionScan(OperationContext* txn,
+ const StringData& ns,
Collection* collection,
const Direction direction = FORWARD,
const DiskLoc startLoc = DiskLoc()) {
@@ -84,14 +87,15 @@ namespace mongo {
}
WorkingSet* ws = new WorkingSet();
- CollectionScan* cs = new CollectionScan(params, ws, NULL);
+ CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
return new InternalRunner(collection, cs, ws);
}
/**
* Return an index scan. Caller owns returned pointer.
*/
- static Runner* indexScan(const Collection* collection,
+ static Runner* indexScan(OperationContext* txn,
+ const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey, const BSONObj& endKey,
bool endKeyInclusive, Direction direction = FORWARD,
@@ -108,11 +112,11 @@ namespace mongo {
params.bounds.endKeyInclusive = endKeyInclusive;
WorkingSet* ws = new WorkingSet();
- IndexScan* ix = new IndexScan(params, ws, NULL);
+ IndexScan* ix = new IndexScan(txn, params, ws, NULL);
if (IXSCAN_FETCH & options) {
return new InternalRunner(
- collection, new FetchStage(ws, ix, NULL, collection), ws);
+ collection, new FetchStage(ws, ix, NULL, collection), ws);
}
else {
return new InternalRunner(collection, ix, ws);
diff --git a/src/mongo/db/query/internal_runner.h b/src/mongo/db/query/internal_runner.h
index d6af5e43db2..d01a39606f6 100644
--- a/src/mongo/db/query/internal_runner.h
+++ b/src/mongo/db/query/internal_runner.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -39,11 +39,12 @@ namespace mongo {
class BSONObj;
class CanonicalQuery;
class DiskLoc;
+ class OperationContext;
class PlanExecutor;
+ struct PlanInfo;
class PlanStage;
struct QuerySolution;
class TypeExplain;
- struct PlanInfo;
class WorkingSet;
/**
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index ac4e4d1766b..db2afd847fb 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -365,7 +365,10 @@ namespace mongo {
return qr;
}
- Status getOplogStartHack(Collection* collection, CanonicalQuery* cq, Runner** runnerOut) {
+ Status getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* cq,
+ Runner** runnerOut) {
if ( collection == NULL )
return Status(ErrorCodes::InternalError,
"getOplogStartHack called with a NULL collection" );
@@ -398,7 +401,7 @@ namespace mongo {
// Make an oplog start finding stage.
WorkingSet* oplogws = new WorkingSet();
- OplogStart* stage = new OplogStart(collection, tsExpr, oplogws);
+ OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
// Takes ownership of ws and stage.
auto_ptr<InternalRunner> runner(new InternalRunner(collection, stage, oplogws));
@@ -408,7 +411,7 @@ namespace mongo {
Runner::RunnerState state = runner->getNext(NULL, &startLoc);
// This is normal. The start of the oplog is the beginning of the collection.
- if (Runner::RUNNER_EOF == state) { return getRunner(collection, cq, runnerOut); }
+ if (Runner::RUNNER_EOF == state) { return getRunner(txn, collection, cq, runnerOut); }
// This is not normal. An error was encountered.
if (Runner::RUNNER_ADVANCED != state) {
@@ -426,7 +429,7 @@ namespace mongo {
params.tailable = cq->getParsed().hasOption(QueryOption_CursorTailable);
WorkingSet* ws = new WorkingSet();
- CollectionScan* cs = new CollectionScan(params, ws, cq->root());
+ CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
// Takes ownership of cq, cs, ws.
*runnerOut = new SingleSolutionRunner(collection, cq, NULL, cs, ws);
return Status::OK();
@@ -528,7 +531,7 @@ namespace mongo {
bb.skip(sizeof(QueryResult));
PlanExecutor* rawExec;
- Status execStatus = getExecutor(collection, cq, &rawExec, options);
+ Status execStatus = getExecutor(txn, collection, cq, &rawExec, options);
if (!execStatus.isOK()) {
uasserted(17510, "Explain error: " + execStatus.reason());
}
@@ -581,7 +584,7 @@ namespace mongo {
rawRunner = new EOFRunner(cq, cq->ns());
}
else if (pq.hasOption(QueryOption_OplogReplay)) {
- status = getOplogStartHack(collection, cq, &rawRunner);
+ status = getOplogStartHack(txn, collection, cq, &rawRunner);
}
else {
// Takes ownership of cq.
@@ -589,7 +592,7 @@ namespace mongo {
if (shardingState.needCollectionMetadata(pq.ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
- status = getRunner(collection, cq, &rawRunner, options);
+ status = getRunner(txn, collection, cq, &rawRunner, options);
}
if (!status.isOK()) {
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index a332088c1b2..5e29d472dfb 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -53,7 +53,8 @@
namespace mongo {
- PlanStage* buildStages(Collection* collection,
+ PlanStage* buildStages(OperationContext* txn,
+ Collection* collection,
const QuerySolution& qsol,
const QuerySolutionNode* root,
WorkingSet* ws) {
@@ -65,7 +66,7 @@ namespace mongo {
params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD
: CollectionScanParams::BACKWARD;
params.maxScan = csn->maxScan;
- return new CollectionScan(params, ws, csn->filter.get());
+ return new CollectionScan(txn, params, ws, csn->filter.get());
}
else if (STAGE_IXSCAN == root->getType()) {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
@@ -89,17 +90,17 @@ namespace mongo {
params.direction = ixn->direction;
params.maxScan = ixn->maxScan;
params.addKeyMetadata = ixn->addKeyMetadata;
- return new IndexScan(params, ws, ixn->filter.get());
+ return new IndexScan(txn, params, ws, ixn->filter.get());
}
else if (STAGE_FETCH == root->getType()) {
const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new FetchStage(ws, childStage, fn->filter.get(), collection);
}
else if (STAGE_SORT == root->getType()) {
const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
SortStageParams params;
params.collection = collection;
@@ -110,7 +111,7 @@ namespace mongo {
}
else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, pn->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
if (NULL == childStage) { return NULL; }
ProjectionStageParams params(WhereCallbackReal(collection->ns().db()));
@@ -135,13 +136,13 @@ namespace mongo {
}
else if (STAGE_LIMIT == root->getType()) {
const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, ln->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
if (NULL == childStage) { return NULL; }
return new LimitStage(ln->limit, ws, childStage);
}
else if (STAGE_SKIP == root->getType()) {
const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new SkipStage(sn->skip, ws, childStage);
}
@@ -149,7 +150,7 @@ namespace mongo {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get(), collection));
for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage = buildStages(collection, qsol, ahn->children[i], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
@@ -159,7 +160,7 @@ namespace mongo {
const OrNode * orn = static_cast<const OrNode*>(root);
auto_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage = buildStages(collection, qsol, orn->children[i], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
@@ -169,7 +170,7 @@ namespace mongo {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get(), collection));
for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage = buildStages(collection, qsol, asn->children[i], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
@@ -182,7 +183,7 @@ namespace mongo {
params.pattern = msn->sort;
auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage = buildStages(collection, qsol, msn->children[i], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
if (NULL == childStage) { return NULL; }
ret->addChild(childStage);
}
@@ -198,7 +199,7 @@ namespace mongo {
params.numWanted = node->numWanted;
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
- return new TwoDNear(params, ws);
+ return new TwoDNear(txn, params, ws);
}
else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
@@ -210,7 +211,7 @@ namespace mongo {
params.filter = node->filter.get();
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
- return new S2NearStage(params, ws);
+ return new S2NearStage(txn, params, ws);
}
else if (STAGE_TEXT == root->getType()) {
const TextNode* node = static_cast<const TextNode*>(root);
@@ -245,18 +246,18 @@ namespace mongo {
return NULL;
}
- return new TextStage(params, ws, node->filter.get());
+ return new TextStage(txn, params, ws, node->filter.get());
}
else if (STAGE_SHARDING_FILTER == root->getType()) {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
if (NULL == childStage) { return NULL; }
return new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()),
ws, childStage);
}
else if (STAGE_KEEP_MUTATIONS == root->getType()) {
const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
- PlanStage* childStage = buildStages(collection, qsol, km->children[0], ws);
+ PlanStage* childStage = buildStages(txn, collection, qsol, km->children[0], ws);
if (NULL == childStage) { return NULL; }
return new KeepMutationsStage(km->filter.get(), ws, childStage);
}
@@ -275,7 +276,7 @@ namespace mongo {
params.direction = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return new DistinctScan(params, ws);
+ return new DistinctScan(txn, params, ws);
}
else if (STAGE_COUNT == root->getType()) {
const CountNode* cn = static_cast<const CountNode*>(root);
@@ -294,7 +295,7 @@ namespace mongo {
params.endKey = cn->endKey;
params.endKeyInclusive = cn->endKeyInclusive;
- return new Count(params, ws);
+ return new Count(txn, params, ws);
}
else {
mongoutils::str::stream ss;
@@ -306,14 +307,15 @@ namespace mongo {
}
// static (this one is used for Cached and MultiPlanStage)
- bool StageBuilder::build(Collection* collection,
+ bool StageBuilder::build(OperationContext* txn,
+ Collection* collection,
const QuerySolution& solution,
WorkingSet* wsIn,
PlanStage** rootOut) {
if (NULL == wsIn || NULL == rootOut) { return false; }
QuerySolutionNode* solutionNode = solution.root.get();
if (NULL == solutionNode) { return false; }
- return NULL != (*rootOut = buildStages(collection, solution, solutionNode, wsIn));
+ return NULL != (*rootOut = buildStages(txn, collection, solution, solutionNode, wsIn));
}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index 9f3c9f20d60..c9c88e800bd 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -34,6 +34,8 @@
namespace mongo {
+ class OperationContext;
+
/**
* The StageBuilder converts a QuerySolution to an executable tree of PlanStage(s).
*/
@@ -47,7 +49,8 @@ namespace mongo {
*
* Returns false otherwise. *rootOut and *wsOut are invalid.
*/
- static bool build(Collection* collection,
+ static bool build(OperationContext* txn,
+ Collection* collection,
const QuerySolution& solution,
WorkingSet* wsIn,
PlanStage** rootOut);
diff --git a/src/mongo/db/query/subplan_runner.cpp b/src/mongo/db/query/subplan_runner.cpp
index 765bd236a90..c107f829c77 100644
--- a/src/mongo/db/query/subplan_runner.cpp
+++ b/src/mongo/db/query/subplan_runner.cpp
@@ -93,11 +93,12 @@ namespace mongo {
}
// static
- Status SubplanRunner::make(Collection* collection,
+ Status SubplanRunner::make(OperationContext* txn,
+ Collection* collection,
const QueryPlannerParams& params,
CanonicalQuery* cq,
SubplanRunner** out) {
- auto_ptr<SubplanRunner> autoRunner(new SubplanRunner(collection, params, cq));
+ auto_ptr<SubplanRunner> autoRunner(new SubplanRunner(txn, collection, params, cq));
Status planningStatus = autoRunner->planSubqueries();
if (!planningStatus.isOK()) {
return planningStatus;
@@ -107,10 +108,12 @@ namespace mongo {
return Status::OK();
}
- SubplanRunner::SubplanRunner(Collection* collection,
+ SubplanRunner::SubplanRunner(OperationContext* txn,
+ Collection* collection,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : _state(SubplanRunner::PLANNING),
+ : _txn(txn),
+ _state(SubplanRunner::PLANNING),
_collection(collection),
_plannerParams(params),
_query(cq),
@@ -150,7 +153,8 @@ namespace mongo {
Runner* runner;
Status status = getRunnerAlwaysPlan(
- _collection, _query.release(), _plannerParams, &runner);
+ _txn, _collection, _query.release(), _plannerParams, &runner
+ );
if (!status.isOK()) {
// We utterly failed.
@@ -315,7 +319,8 @@ namespace mongo {
// Dump all the solutions into the MPR.
for (size_t ix = 0; ix < solutions.size(); ++ix) {
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_collection,
+ verify(StageBuilder::build(_txn,
+ _collection,
*solutions[ix],
sharedWorkingSet,
&nextPlanRoot));
@@ -404,7 +409,7 @@ namespace mongo {
MultiPlanStage* multiPlanStage = new MultiPlanStage(_collection, _query.get());
WorkingSet* ws = new WorkingSet();
PlanStage* root;
- verify(StageBuilder::build(_collection, *soln, ws, &root));
+ verify(StageBuilder::build(_txn, _collection, *soln, ws, &root));
multiPlanStage->addPlan(soln, root, ws); // Takes ownership first two arguments.
multiPlanStage->pickBestPlan();
diff --git a/src/mongo/db/query/subplan_runner.h b/src/mongo/db/query/subplan_runner.h
index 907fe719363..896772450ad 100644
--- a/src/mongo/db/query/subplan_runner.h
+++ b/src/mongo/db/query/subplan_runner.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -42,6 +42,7 @@ namespace mongo {
class BSONObj;
class CanonicalQuery;
class DiskLoc;
+ class OperationContext;
class TypeExplain;
struct PlanInfo;
@@ -53,7 +54,8 @@ namespace mongo {
*
* 'out' is valid only if an OK status is returned.
*/
- static Status make(Collection* collection,
+ static Status make(OperationContext* txn,
+ Collection* collection,
const QueryPlannerParams& params,
CanonicalQuery* cq,
SubplanRunner** out);
@@ -94,7 +96,8 @@ namespace mongo {
Status planSubqueries();
private:
- SubplanRunner(Collection* collection,
+ SubplanRunner(OperationContext* txn,
+ Collection* collection,
const QueryPlannerParams& params,
CanonicalQuery* cq);
@@ -105,6 +108,9 @@ namespace mongo {
RUNNING,
};
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
SubplanRunnerState _state;
Collection* _collection;
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 54afae7e969..00c7e40d86f 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -252,7 +252,8 @@ namespace repl {
// add if missing
int n = 0;
auto_ptr<Runner> runner(
- InternalPlanner::collectionScan(localSources,
+ InternalPlanner::collectionScan(txn,
+ localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
Runner::RunnerState state;
@@ -296,7 +297,8 @@ namespace repl {
}
auto_ptr<Runner> runner(
- InternalPlanner::collectionScan(localSources,
+ InternalPlanner::collectionScan(txn,
+ localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
Runner::RunnerState state;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index dbe5fd62b79..f8414766eea 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1,7 +1,7 @@
// @file oplog.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -473,7 +473,7 @@ namespace repl {
if ( collection ) {
if (replSettings.oplogSize != 0) {
- int o = (int)(collection->getRecordStore()->storageSize() / ( 1024 * 1024 ) );
+ int o = (int)(collection->getRecordStore()->storageSize(&txn) / ( 1024 * 1024 ) );
int n = (int)(replSettings.oplogSize / (1024 * 1024));
if ( n != o ) {
stringstream ss;
diff --git a/src/mongo/db/repl/repl_info.cpp b/src/mongo/db/repl/repl_info.cpp
index 17f5b893cd5..740cc8f044a 100644
--- a/src/mongo/db/repl/repl_info.cpp
+++ b/src/mongo/db/repl/repl_info.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -83,7 +83,8 @@ namespace repl {
const char* localSources = "local.sources";
Client::ReadContext ctx(txn, localSources);
auto_ptr<Runner> runner(
- InternalPlanner::collectionScan(localSources,
+ InternalPlanner::collectionScan(txn,
+ localSources,
ctx.ctx().db()->getCollection(txn,
localSources)));
BSONObj obj;
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 321885f60d7..32093b02374 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -1,6 +1,6 @@
/* @file rs_rollback.cpp
*
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -237,7 +237,8 @@ namespace repl {
Client::Context ctx(txn, rsoplog);
boost::scoped_ptr<Runner> runner(
- InternalPlanner::collectionScan(rsoplog,
+ InternalPlanner::collectionScan(txn,
+ rsoplog,
ctx.db()->getCollection(txn, rsoplog),
InternalPlanner::BACKWARD));
diff --git a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
index 25695ccef65..2d5ae2fc63b 100644
--- a/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
+++ b/src/mongo/db/storage/heap1/heap1_btree_impl.cpp
@@ -279,12 +279,12 @@ namespace {
}
- virtual void fullValidate(long long *numKeysOut) {
+ virtual void fullValidate(OperationContext* txn, long long *numKeysOut) {
// TODO check invariants?
*numKeysOut = _data->size();
}
- virtual Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc) {
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
invariant(!hasFieldNames(key));
if (isDup(*_data, key, loc))
return dupKeyError(key);
@@ -302,8 +302,9 @@ namespace {
class ForwardCursor : public BtreeInterface::Cursor {
public:
- ForwardCursor(const IndexSet& data)
- : _data(data),
+ ForwardCursor(const IndexSet& data, OperationContext* txn)
+ : _txn(txn),
+ _data(data),
_it(data.end())
{}
@@ -387,6 +388,7 @@ namespace {
}
private:
+ OperationContext* _txn; // not owned
const IndexSet& _data;
IndexSet::const_iterator _it;
@@ -399,8 +401,9 @@ namespace {
// TODO see if this can share any code with ForwardIterator
class ReverseCursor : public BtreeInterface::Cursor {
public:
- ReverseCursor(const IndexSet& data)
- : _data(data),
+ ReverseCursor(const IndexSet& data, OperationContext* txn)
+ : _txn(txn),
+ _data(data),
_it(data.rend())
{}
@@ -499,6 +502,7 @@ namespace {
return IndexSet::const_reverse_iterator(it);
}
+ OperationContext* _txn; // not owned
const IndexSet& _data;
IndexSet::const_reverse_iterator _it;
@@ -508,12 +512,12 @@ namespace {
DiskLoc _savedLoc;
};
- virtual BtreeInterface::Cursor* newCursor(int direction) const {
+ virtual BtreeInterface::Cursor* newCursor(OperationContext* txn, int direction) const {
if (direction == 1)
- return new ForwardCursor(*_data);
+ return new ForwardCursor(*_data, txn);
invariant(direction == -1);
- return new ReverseCursor(*_data);
+ return new ReverseCursor(*_data, txn);
}
virtual Status initAsEmpty(OperationContext* txn) {
diff --git a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
index 825477c5ffb..1268d26ff3e 100644
--- a/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
+++ b/src/mongo/db/storage/heap1/heap1_database_catalog_entry.h
@@ -150,7 +150,7 @@ namespace mongo {
const StringData& idxName,
long long newExpireSeconds );
- CollectionOptions getCollectionOptions() const { return options; }
+ CollectionOptions getCollectionOptions(OperationContext* txn) const { return options; }
CollectionOptions options;
scoped_ptr<HeapRecordStore> rs;
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index cf1d805900d..a9ed7d8650f 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -180,7 +180,7 @@ namespace mongo {
invariant( details );
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
- scoped_ptr<RecordIterator> it( systemIndexRecordStore->getIterator() );
+ scoped_ptr<RecordIterator> it( systemIndexRecordStore->getIterator(txn) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
@@ -289,7 +289,7 @@ namespace mongo {
BSONObj oldSpec;
{
RecordStoreV1Base* rs = _getNamespaceRecordStore();
- scoped_ptr<RecordIterator> it( rs->getIterator() );
+ scoped_ptr<RecordIterator> it( rs->getIterator(txn) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
BSONObj entry = it->dataFor( loc ).toBson();
@@ -481,7 +481,7 @@ namespace mongo {
&_extentManager,
false ) );
- if ( nsEntry->recordStore->storageSize() == 0 )
+ if ( nsEntry->recordStore->storageSize( txn ) == 0 )
nsEntry->recordStore->increaseStorageSize( txn, _extentManager.initialSize( 128 ), -1 );
}
@@ -497,7 +497,7 @@ namespace mongo {
&_extentManager,
true ) );
- if ( indexEntry->recordStore->storageSize() == 0 )
+ if ( indexEntry->recordStore->storageSize( txn ) == 0 )
indexEntry->recordStore->increaseStorageSize( txn, _extentManager.initialSize( 128 ), -1 );
}
@@ -581,10 +581,10 @@ namespace mongo {
// Must do this at least once, otherwise we leave the collection with no
// extents, which is invalid.
int sz = _massageExtentSize( &_extentManager,
- options.cappedSize - rs->storageSize() );
+ options.cappedSize - rs->storageSize(txn) );
sz &= 0xffffff00;
rs->increaseStorageSize( txn, sz, -1 );
- } while( rs->storageSize() < options.cappedSize );
+ } while( rs->storageSize(txn) < options.cappedSize );
}
else {
rs->increaseStorageSize( txn, _extentManager.initialSize( 128 ), -1 );
@@ -773,7 +773,7 @@ namespace mongo {
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant( rs );
- scoped_ptr<RecordIterator> it( rs->getIterator() );
+ scoped_ptr<RecordIterator> it( rs->getIterator(txn) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
BSONObj entry = it->dataFor( loc ).toBson();
@@ -785,7 +785,8 @@ namespace mongo {
}
}
- CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions( const StringData& ns ) const {
+ CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions( OperationContext* txn,
+ const StringData& ns ) const {
if ( nsToCollectionSubstring( ns ) == "system.namespaces" ) {
return CollectionOptions();
}
@@ -793,7 +794,7 @@ namespace mongo {
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant( rs );
- scoped_ptr<RecordIterator> it( rs->getIterator() );
+ scoped_ptr<RecordIterator> it( rs->getIterator(txn) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
BSONObj entry = it->dataFor( loc ).toBson();
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index 9215ef37d93..16a88b84ede 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -101,7 +101,8 @@ namespace mongo {
const MmapV1ExtentManager* getExtentManager() const { return &_extentManager; }
MmapV1ExtentManager* getExtentManager() { return &_extentManager; }
- CollectionOptions getCollectionOptions( const StringData& ns ) const;
+ CollectionOptions getCollectionOptions( OperationContext* txn,
+ const StringData& ns ) const;
private:
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 836827a0ee6..8fb06a5eae6 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -345,9 +345,10 @@ namespace mongo {
Client::Context ctx(txn, ns );
Collection* coll = originalDatabase->getCollection( txn, ns );
if ( coll ) {
- scoped_ptr<RecordIterator> it( coll->getIterator( DiskLoc(),
- false,
- CollectionScanParams::FORWARD ) );
+ scoped_ptr<RecordIterator> it( coll->getIterator( txn,
+ DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD ) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
BSONObj obj = coll->docFor( loc );
@@ -411,9 +412,9 @@ namespace mongo {
}
- scoped_ptr<RecordIterator> iterator( originalCollection->getIterator( DiskLoc(),
- false,
- CollectionScanParams::FORWARD ) );
+ scoped_ptr<RecordIterator> iterator(
+ originalCollection->getIterator( txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD ));
while ( !iterator->isEOF() ) {
DiskLoc loc = iterator->getNext();
invariant( !loc.isNull() );
diff --git a/src/mongo/db/storage/rocks/rocks_btree_impl.cpp b/src/mongo/db/storage/rocks/rocks_btree_impl.cpp
index 385081bbf2d..00cbbf1c580 100644
--- a/src/mongo/db/storage/rocks/rocks_btree_impl.cpp
+++ b/src/mongo/db/storage/rocks/rocks_btree_impl.cpp
@@ -202,6 +202,7 @@ namespace mongo {
}
scoped_ptr<rocksdb::Iterator> _iterator;
+ OperationContext* _txn; // not owned
bool _direction;
mutable bool _cached;
@@ -231,7 +232,7 @@ namespace mongo {
if ( !dupsAllowed ) {
// XXX: this is slow
- Status status = dupKeyCheck( key, loc );
+ Status status = dupKeyCheck( txn, key, loc );
if ( !status.isOK() )
return status;
}
@@ -263,12 +264,12 @@ namespace mongo {
return 1; // XXX: fix? does it matter since its so slow to check?
}
- Status RocksBtreeImpl::dupKeyCheck(const BSONObj& key, const DiskLoc& loc) {
+ Status RocksBtreeImpl::dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) {
// XXX: not done yet!
return Status::OK();
}
- void RocksBtreeImpl::fullValidate(long long* numKeysOut) {
+ void RocksBtreeImpl::fullValidate(OperationContext* txn, long long* numKeysOut) {
// XXX: no key counts
if ( numKeysOut )
numKeysOut[0] = -1;
@@ -284,9 +285,11 @@ namespace mongo {
return Status::OK();
}
- BtreeInterface::Cursor* RocksBtreeImpl::newCursor(int direction) const {
+ BtreeInterface::Cursor* RocksBtreeImpl::newCursor(OperationContext* txn,
+ int direction) const {
return new RocksCursor( _db->NewIterator( rocksdb::ReadOptions(),
_columnFamily ),
+ txn,
direction );
}
diff --git a/src/mongo/db/storage/rocks/rocks_btree_impl.h b/src/mongo/db/storage/rocks/rocks_btree_impl.h
index 5bb419f1d55..2a15e46aad5 100644
--- a/src/mongo/db/storage/rocks/rocks_btree_impl.h
+++ b/src/mongo/db/storage/rocks/rocks_btree_impl.h
@@ -64,15 +64,17 @@ namespace mongo {
const BSONObj& key,
const DiskLoc& loc);
- virtual Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc);
+ virtual Status dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc);
- virtual void fullValidate(long long* numKeysOut);
+ virtual void fullValidate(OperationContext* txn, long long* numKeysOut);
virtual bool isEmpty();
virtual Status touch(OperationContext* txn) const;
- virtual Cursor* newCursor(int direction) const;
+ virtual Cursor* newCursor(OperationContext* int direction) const;
virtual Status initAsEmpty(OperationContext* txn);
diff --git a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
index 5b45242a9ec..fdbae7d4442 100644
--- a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.cpp
@@ -53,7 +53,7 @@ namespace mongo {
_metaDataKey = string("metadata-") + ns.toString();
}
- CollectionOptions RocksCollectionCatalogEntry::getCollectionOptions() const {
+ CollectionOptions RocksCollectionCatalogEntry::getCollectionOptions( OperationContext* txn ) const {
// todo: put more options in here?
return CollectionOptions();
}
diff --git a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
index a32387a2da7..6a9a71804bf 100644
--- a/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
+++ b/src/mongo/db/storage/rocks/rocks_collection_catalog_entry.h
@@ -42,7 +42,7 @@ namespace mongo {
virtual ~RocksCollectionCatalogEntry(){}
- virtual CollectionOptions getCollectionOptions() const;
+ virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
// ------- indexes ----------
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
index 06ae8bf6242..664e643b760 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store.cpp
@@ -180,7 +180,8 @@ namespace mongo {
return Status::OK();
}
- RecordIterator* RocksRecordStore::getIterator( const DiskLoc& start,
+ RecordIterator* RocksRecordStore::getIterator( OperationContext* txn,
+ const DiskLoc& start,
bool tailable,
const CollectionScanParams::Direction& dir
) const {
@@ -191,11 +192,11 @@ namespace mongo {
}
- RecordIterator* RocksRecordStore::getIteratorForRepair() const {
- return getIterator();
+ RecordIterator* RocksRecordStore::getIteratorForRepair(OperationContext* txn) const {
+ return getIterator(txn);
}
- std::vector<RecordIterator*> RocksRecordStore::getManyIterators() const {
+ std::vector<RecordIterator*> RocksRecordStore::getManyIterators(OperationContext* txn) const {
invariant( false );
}
@@ -218,7 +219,9 @@ namespace mongo {
return Status::OK();
}
- void RocksRecordStore::appendCustomStats( BSONObjBuilder* result, double scale ) const {
+ void RocksRecordStore::appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const {
string statsString;
bool valid = _db->GetProperty( _columnFamily, "rocksdb.stats", &statsString );
invariant( valid );
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.h b/src/mongo/db/storage/rocks/rocks_record_store.h
index 492e8079fc6..eeaf8df9882 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.h
+++ b/src/mongo/db/storage/rocks/rocks_record_store.h
@@ -90,15 +90,16 @@ namespace mongo {
const char* damangeSource,
const mutablebson::DamageVector& damages );
- virtual RecordIterator* getIterator( const DiskLoc& start = DiskLoc(),
+ virtual RecordIterator* getIterator( OperationContext* txn,
+ const DiskLoc& start = DiskLoc(),
bool tailable = false,
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD
) const;
- virtual RecordIterator* getIteratorForRepair() const;
+ virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const;
- virtual std::vector<RecordIterator*> getManyIterators() const;
+ virtual std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
virtual Status truncate( OperationContext* txn );
@@ -114,7 +115,9 @@ namespace mongo {
ValidateAdaptor* adaptor,
ValidateResults* results, BSONObjBuilder* output ) const;
- virtual void appendCustomStats( BSONObjBuilder* result, double scale ) const;
+ virtual void appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const;
virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const;
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
index 119aacb565e..4c003da75f9 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
@@ -330,8 +330,9 @@ namespace mongo {
}
{
+ MyOperationContext opCtx( db.get() );
BSONObjBuilder b;
- rs.appendCustomStats( &b, 1 );
+ rs.appendCustomStats( &opCtx, &b, 1 );
BSONObj obj = b.obj();
ASSERT( obj["stats"].String().find( "WAL" ) != string::npos );
}
diff --git a/src/mongo/db/structure/SConscript b/src/mongo/db/structure/SConscript
index 360876db22a..f6163c4aaf4 100644
--- a/src/mongo/db/structure/SConscript
+++ b/src/mongo/db/structure/SConscript
@@ -10,6 +10,7 @@ env.Library(
LIBDEPS= [
'$BUILD_DIR/mongo/bson',
'$BUILD_DIR/mongo/db/commands/server_status_core',
+ '$BUILD_DIR/mongo/db/concurrency/lock_mgr',
'$BUILD_DIR/mongo/foundation',
'$BUILD_DIR/mongo/mongocommon',
]
diff --git a/src/mongo/db/structure/btree/btree_interface.cpp b/src/mongo/db/structure/btree/btree_interface.cpp
index 46e3997f84f..ef5efccf832 100644
--- a/src/mongo/db/structure/btree/btree_interface.cpp
+++ b/src/mongo/db/structure/btree/btree_interface.cpp
@@ -98,12 +98,14 @@ namespace mongo {
return _btree->unindex(txn, key, loc);
}
- virtual void fullValidate(long long *numKeysOut) {
- *numKeysOut = _btree->fullValidate(NULL, false, false, 0);
+ virtual void fullValidate(OperationContext* txn, long long *numKeysOut) {
+ *numKeysOut = _btree->fullValidate(txn, NULL, false, false, 0);
}
- virtual Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc) {
- return _btree->dupKeyCheck(key, loc);
+ virtual Status dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc) {
+ return _btree->dupKeyCheck(txn, key, loc);
}
virtual bool isEmpty() {
@@ -116,8 +118,11 @@ namespace mongo {
class Cursor : public BtreeInterface::Cursor {
public:
- Cursor(const BtreeLogic<OnDiskFormat>* btree, int direction)
- : _btree(btree),
+ Cursor(OperationContext* txn,
+ const BtreeLogic<OnDiskFormat>* btree,
+ int direction)
+ : _txn(txn),
+ _btree(btree),
_direction(direction),
_bucket(btree->getHead()), // XXX this shouldn't be nessisary, but is.
_ofs(0) {
@@ -142,7 +147,7 @@ namespace mongo {
}
virtual bool locate(const BSONObj& key, const DiskLoc& loc) {
- return _btree->locate(key, loc, _direction, &_ofs, &_bucket);
+ return _btree->locate(_txn, key, loc, _direction, &_ofs, &_bucket);
}
virtual void customLocate(const BSONObj& keyBegin,
@@ -151,7 +156,8 @@ namespace mongo {
const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive) {
- _btree->customLocate(&_bucket,
+ _btree->customLocate(_txn,
+ &_bucket,
&_ofs,
keyBegin,
keyBeginLen,
@@ -167,7 +173,8 @@ namespace mongo {
const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive) {
- _btree->advanceTo(&_bucket,
+ _btree->advanceTo(_txn,
+ &_bucket,
&_ofs,
keyBegin,
keyBeginLen,
@@ -186,7 +193,7 @@ namespace mongo {
}
virtual void advance() {
- _btree->advance(&_bucket, &_ofs, _direction);
+ _btree->advance(_txn, &_bucket, &_ofs, _direction);
}
virtual void savePosition() {
@@ -198,7 +205,8 @@ namespace mongo {
virtual void restorePosition() {
if (!_bucket.isNull()) {
- _btree->restorePosition(_savedKey,
+ _btree->restorePosition(_txn,
+ _savedKey,
_savedLoc,
_direction,
&_bucket,
@@ -207,6 +215,7 @@ namespace mongo {
}
private:
+ OperationContext* _txn; // not owned
const BtreeLogic<OnDiskFormat>* const _btree;
const int _direction;
@@ -218,8 +227,8 @@ namespace mongo {
DiskLoc _savedLoc;
};
- virtual Cursor* newCursor(int direction) const {
- return new Cursor(_btree.get(), direction);
+ virtual Cursor* newCursor(OperationContext* txn, int direction) const {
+ return new Cursor(txn, _btree.get(), direction);
}
virtual Status initAsEmpty(OperationContext* txn) {
diff --git a/src/mongo/db/structure/btree/btree_interface.h b/src/mongo/db/structure/btree/btree_interface.h
index 92339c1fe63..845a02d467a 100644
--- a/src/mongo/db/structure/btree/btree_interface.h
+++ b/src/mongo/db/structure/btree/btree_interface.h
@@ -107,14 +107,16 @@ namespace mongo {
const DiskLoc& loc) = 0;
// TODO: Hide this by exposing an update method?
- virtual Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual Status dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc) = 0;
//
// Information about the tree
//
// TODO: expose full set of args for testing?
- virtual void fullValidate(long long* numKeysOut) = 0;
+ virtual void fullValidate(OperationContext* txn, long long* numKeysOut) = 0;
virtual bool isEmpty() = 0;
@@ -187,7 +189,7 @@ namespace mongo {
/**
* Caller takes ownership. BtreeInterface must outlive all Cursors it produces.
*/
- virtual Cursor* newCursor(int direction) const = 0;
+ virtual Cursor* newCursor(OperationContext* txn, int direction) const = 0;
//
// Index creation
diff --git a/src/mongo/db/structure/btree/btree_logic.cpp b/src/mongo/db/structure/btree/btree_logic.cpp
index 88b6f32b8f4..8d05a65ab0c 100644
--- a/src/mongo/db/structure/btree/btree_logic.cpp
+++ b/src/mongo/db/structure/btree/btree_logic.cpp
@@ -566,10 +566,8 @@ namespace mongo {
memcpy(bucket->data + ofs, temp + ofs, dataUsed);
bucket->emptySize = tdz - dataUsed - bucket->n * sizeof(KeyHeaderType);
- {
- int foo = bucket->emptySize;
- invariant( foo >= 0 );
- }
+ int foo = bucket->emptySize;
+ invariant( foo >= 0 );
setPacked(bucket);
assertValid(_indexName, bucket, _ordering);
}
@@ -667,7 +665,8 @@ namespace mongo {
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::customLocate(DiskLoc* locInOut,
+ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
int* keyOfsInOut,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -677,7 +676,8 @@ namespace mongo {
int direction) const {
pair<DiskLoc, int> unused;
- customLocate(locInOut,
+ customLocate(txn,
+ locInOut,
keyOfsInOut,
keyBegin,
keyBeginLen,
@@ -687,27 +687,32 @@ namespace mongo {
direction,
unused);
- skipUnusedKeys(locInOut, keyOfsInOut, direction);
+ skipUnusedKeys(txn, locInOut, keyOfsInOut, direction);
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advance(DiskLoc* bucketLocInOut,
+ void BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+ DiskLoc* bucketLocInOut,
int* posInOut,
int direction) const {
- *bucketLocInOut = advance(*bucketLocInOut, posInOut, direction);
- skipUnusedKeys(bucketLocInOut, posInOut, direction);
+ *bucketLocInOut = advance(txn, *bucketLocInOut, posInOut, direction);
+ skipUnusedKeys(txn, bucketLocInOut, posInOut, direction);
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::skipUnusedKeys(DiskLoc* loc, int* pos, int direction) const {
+ void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* txn,
+ DiskLoc* loc,
+ int* pos,
+ int direction) const {
while (!loc->isNull() && !keyIsUsed(*loc, *pos)) {
- *loc = advance(*loc, pos, direction);
+ *loc = advance(txn, *loc, pos, direction);
}
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advanceTo(DiskLoc* thisLocInOut,
+ void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
+ DiskLoc* thisLocInOut,
int* keyOfsInOut,
const BSONObj &keyBegin,
int keyBeginLen,
@@ -716,7 +721,8 @@ namespace mongo {
const vector<bool>& keyEndInclusive,
int direction) const {
- advanceToImpl(thisLocInOut,
+ advanceToImpl(txn,
+ thisLocInOut,
keyOfsInOut,
keyBegin,
keyBeginLen,
@@ -725,7 +731,7 @@ namespace mongo {
keyEndInclusive,
direction);
- skipUnusedKeys(thisLocInOut, keyOfsInOut, direction);
+ skipUnusedKeys(txn, thisLocInOut, keyOfsInOut, direction);
}
/**
@@ -738,7 +744,8 @@ namespace mongo {
* and reverse implementations would be more efficient
*/
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advanceToImpl(DiskLoc* thisLocInOut,
+ void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
+ DiskLoc* thisLocInOut,
int* keyOfsInOut,
const BSONObj &keyBegin,
int keyBeginLen,
@@ -831,7 +838,8 @@ namespace mongo {
}
}
- customLocate(thisLocInOut,
+ customLocate(txn,
+ thisLocInOut,
keyOfsInOut,
keyBegin,
keyBeginLen,
@@ -843,7 +851,8 @@ namespace mongo {
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::customLocate(DiskLoc* locInOut,
+ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
int* keyOfsInOut,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -1071,28 +1080,31 @@ namespace mongo {
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::exists(const KeyDataType& key) const {
+ bool BtreeLogic<BtreeLayout>::exists(OperationContext* txn, const KeyDataType& key) const {
int position = 0;
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(getRootLoc(), key, &position, &found, minDiskLoc, 1);
+
+ DiskLoc bucket = _locate(txn, getRootLoc(), key, &position, &found, minDiskLoc, 1);
while (!bucket.isNull()) {
FullKey fullKey = getFullKey(getBucket(bucket), position);
if (fullKey.header.isUsed()) {
return fullKey.data.woEqual(key);
}
- bucket = advance(bucket, &position, 1);
+ bucket = advance(txn, bucket, &position, 1);
}
return false;
}
template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::dupKeyCheck(const BSONObj& key, const DiskLoc& loc) const {
+ Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc) const {
KeyDataOwnedType theKey(key);
- if (!wouldCreateDup(theKey, loc)) {
+ if (!wouldCreateDup(txn, theKey, loc)) {
return Status::OK();
}
@@ -1100,11 +1112,13 @@ namespace mongo {
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::wouldCreateDup(const KeyDataType& key,
+ bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
+ const KeyDataType& key,
const DiskLoc self) const {
int position;
bool found;
- DiskLoc posLoc = _locate(getRootLoc(), key, &position, &found, minDiskLoc, 1);
+
+ DiskLoc posLoc = _locate(txn, getRootLoc(), key, &position, &found, minDiskLoc, 1);
while (!posLoc.isNull()) {
FullKey fullKey = getFullKey(getBucket(posLoc), position);
@@ -1117,7 +1131,7 @@ namespace mongo {
break;
}
- posLoc = advance(posLoc, &position, 1);
+ posLoc = advance(txn, posLoc, &position, 1);
}
return false;
}
@@ -1146,7 +1160,8 @@ namespace mongo {
* note result might be an Unused location!
*/
template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::_find(BucketType* bucket,
+ Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
+ BucketType* bucket,
const KeyDataType& key,
const DiskLoc& recordLoc,
bool errorIfDup,
@@ -1180,8 +1195,8 @@ namespace mongo {
// This is expensive and we only want to do it once(? -- when would
// it happen twice).
dupsChecked = true;
- if (exists(key)) {
- if (wouldCreateDup(key, genericRecordLoc)) {
+ if (exists(txn, key)) {
+ if (wouldCreateDup(txn, key, genericRecordLoc)) {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
}
else {
@@ -1259,7 +1274,7 @@ namespace mongo {
_bucketDeletion->aboutToDeleteBucket(bucketLoc);
BucketType* p = getBucket(bucket->parent);
- int parentIdx = indexInParent(bucket, bucketLoc);
+ int parentIdx = indexInParent(txn, bucket, bucketLoc);
*txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
deallocBucket(txn, bucket, bucketLoc);
}
@@ -1274,7 +1289,8 @@ namespace mongo {
}
template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::restorePosition(const BSONObj& savedKey,
+ void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* txn,
+ const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
DiskLoc* bucketLocInOut,
@@ -1285,7 +1301,7 @@ namespace mongo {
// thing should be kept BTree-internal. This'll go away with finer grained locking: we
// can hold on to a bucket for as long as we need it.
if (-1 == *keyOffsetInOut) {
- locate(savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+ locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
return;
}
@@ -1296,19 +1312,19 @@ namespace mongo {
invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
return;
}
if (*keyOffsetInOut > 0) {
(*keyOffsetInOut)--;
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
return;
}
}
- locate(savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+ locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
}
template <class BtreeLayout>
@@ -1337,7 +1353,6 @@ namespace mongo {
int p) {
invariant(bucket->n > 0);
DiskLoc left = childLocForPos(bucket, p);
-
if (bucket->n == 1) {
if (left.isNull() && bucket->nextChild.isNull()) {
_delKeyAtPos(bucket, p);
@@ -1400,7 +1415,7 @@ namespace mongo {
invariant(!lchild.isNull() || !rchild.isNull());
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
- DiskLoc advanceLoc = advance(bucketLoc, &advanceKeyOfs, advanceDirection);
+ DiskLoc advanceLoc = advance(txn, bucketLoc, &advanceKeyOfs, advanceDirection);
// advanceLoc must be a descentant of thisLoc, because thisLoc has a
// child in the proper direction and all descendants of thisLoc must be
// nonempty because they are not the root.
@@ -1435,7 +1450,7 @@ namespace mongo {
}
else {
BucketType* parentBucket = getBucket(bucket->parent);
- int bucketIndexInParent = indexInParent(bucket, bucketLoc);
+ int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
*txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
bucket->nextChild;
}
@@ -1446,9 +1461,10 @@ namespace mongo {
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::canMergeChildren(BucketType* bucket,
- const DiskLoc bucketLoc,
- const int leftIndex) {
+ bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ const int leftIndex) {
invariant(leftIndex >= 0 && leftIndex < bucket->n);
DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
@@ -1477,9 +1493,12 @@ namespace mongo {
* splitPos().
*/
template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(BucketType* bucket, int leftIndex) {
+ int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
+ BucketType* bucket,
+ int leftIndex) {
int split = -1;
int rightSize = 0;
+
const BucketType* l = childForPos(bucket, leftIndex);
const BucketType* r = childForPos(bucket, leftIndex + 1);
@@ -1539,6 +1558,7 @@ namespace mongo {
DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
+
BucketType* l = btreemod(txn, getBucket(leftNodeLoc));
BucketType* r = btreemod(txn, getBucket(rightNodeLoc));
@@ -1578,8 +1598,9 @@ namespace mongo {
}
template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::indexInParent(BucketType* bucket,
- const DiskLoc bucketLoc) const {
+ int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) const {
invariant(!bucket->parent.isNull());
const BucketType* p = getBucket(bucket->parent);
if (p->nextChild == bucketLoc) {
@@ -1609,7 +1630,7 @@ namespace mongo {
// If we can merge, then we must merge rather than balance to preserve bucket utilization
// constraints.
- if (canMergeChildren(bucket, bucketLoc, leftIndex)) {
+ if (canMergeChildren(txn, bucket, bucketLoc, leftIndex)) {
return false;
}
@@ -1718,7 +1739,7 @@ namespace mongo {
BucketType* r = btreemod(txn, getBucket(rchild));
_packReadyForMod(r, zeropos);
- int split = _rebalancedSeparatorPos(bucket, leftIndex);
+ int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
@@ -1744,7 +1765,7 @@ namespace mongo {
}
BucketType* p = getBucket(bucket->parent);
- int parentIdx = indexInParent(bucket, bucketLoc);
+ int parentIdx = indexInParent(txn, bucket, bucketLoc);
// TODO will missing neighbor case be possible long term? Should we try to merge/balance
// somehow in that case if so?
@@ -1782,7 +1803,8 @@ namespace mongo {
int pos;
bool found = false;
KeyDataOwnedType ownedKey(key);
- DiskLoc loc = _locate(getRootLoc(), ownedKey, &pos, &found, recordLoc, 1);
+
+ DiskLoc loc = _locate(txn, getRootLoc(), ownedKey, &pos, &found, recordLoc, 1);
if (found) {
BucketType* bucket = btreemod(txn, getBucket(loc));
delKeyAtPos(txn, bucket, loc, pos);
@@ -1997,6 +2019,8 @@ namespace mongo {
StatusWith<DiskLoc> loc = _recordStore->insertRecord(txn, &docWriter, false);
// XXX: remove this(?) or turn into massert or sanely bubble it back up.
uassertStatusOK(loc.getStatus());
+
+ // this is a new bucket, not referenced by anyone, probably don't need this lock
BucketType* b = btreemod(txn, getBucket(loc.getValue()));
init(b);
return loc.getValue();
@@ -2059,15 +2083,17 @@ namespace mongo {
}
template <class BtreeLayout>
- long long BtreeLogic<BtreeLayout>::fullValidate(long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth) {
- return _fullValidate(getRootLoc(), unusedCount, strict, dumpBuckets, depth);
+ long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* txn,
+ long long *unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth) {
+ return _fullValidate(txn, getRootLoc(), unusedCount, strict, dumpBuckets, depth);
}
template <class BtreeLayout>
- long long BtreeLogic<BtreeLayout>::_fullValidate(const DiskLoc bucketLoc,
+ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
+ const DiskLoc bucketLoc,
long long *unusedCount,
bool strict,
bool dumpBuckets,
@@ -2103,7 +2129,7 @@ namespace mongo {
wassert(b->parent == bucketLoc);
}
- keyCount += _fullValidate(left, unusedCount, strict, dumpBuckets, depth + 1);
+ keyCount += _fullValidate(txn, left, unusedCount, strict, dumpBuckets, depth + 1);
}
}
@@ -2116,7 +2142,7 @@ namespace mongo {
wassert(b->parent == bucketLoc);
}
- keyCount += _fullValidate(bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
+ keyCount += _fullValidate(txn, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
}
return keyCount;
@@ -2230,7 +2256,7 @@ namespace mongo {
int pos;
bool found;
- Status findStatus = _find(bucket, key, recordLoc, !dupsAllowed, &pos, &found);
+ Status findStatus = _find(txn, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
if (!findStatus.isOK()) {
return findStatus;
}
@@ -2269,9 +2295,10 @@ namespace mongo {
}
template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::advance(const DiskLoc& bucketLoc,
- int* posInOut,
- int direction) const {
+ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ int* posInOut,
+ int direction) const {
BucketType* bucket = getBucket(bucketLoc);
if (*posInOut < 0 || *posInOut >= bucket->n ) {
@@ -2343,7 +2370,8 @@ namespace mongo {
}
template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::locate(const BSONObj& key,
+ bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
+ const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
int* posOut,
@@ -2355,28 +2383,33 @@ namespace mongo {
bool found = false;
KeyDataOwnedType owned(key);
- *bucketLocOut = _locate(getRootLoc(), owned, posOut, &found, recordLoc, direction);
+ *bucketLocOut = _locate(txn, getRootLoc(), owned, posOut, &found, recordLoc, direction);
if (!found) {
return false;
}
- skipUnusedKeys(bucketLocOut, posOut, direction);
+ skipUnusedKeys(txn, bucketLocOut, posOut, direction);
return found;
}
+ /**
+ * Recursively walk down the btree, looking for a match of key and recordLoc.
+ * Caller should have acquired lock on bucketLoc.
+ */
template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::_locate(const DiskLoc& bucketLoc,
- const KeyDataType& key,
- int* posOut,
- bool* foundOut,
- const DiskLoc& recordLoc,
- const int direction) const {
+ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const KeyDataType& key,
+ int* posOut,
+ bool* foundOut,
+ const DiskLoc& recordLoc,
+ const int direction) const {
int position;
BucketType* bucket = getBucket(bucketLoc);
// XXX: owned to not owned conversion(?)
- _find(bucket, key, recordLoc, false, &position, foundOut);
+ _find(txn, bucket, key, recordLoc, false, &position, foundOut);
// Look in our current bucket.
if (*foundOut) {
@@ -2388,7 +2421,7 @@ namespace mongo {
DiskLoc childLoc = childLocForPos(bucket, position);
if (!childLoc.isNull()) {
- DiskLoc inChild = _locate(childLoc, key, posOut, foundOut, recordLoc, direction);
+ DiskLoc inChild = _locate(txn, childLoc, key, posOut, foundOut, recordLoc, direction);
if (!inChild.isNull()) {
return inChild;
}
@@ -2455,7 +2488,8 @@ namespace mongo {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
BtreeLogic<BtreeLayout>::childForPos(BucketType* bucket, int pos) const {
- return getBucket(childLocForPos(bucket, pos));
+ DiskLoc loc = childLocForPos(bucket, pos);
+ return getBucket(loc);
}
template <class BtreeLayout>
diff --git a/src/mongo/db/structure/btree/btree_logic.h b/src/mongo/db/structure/btree/btree_logic.h
index 157040e4891..b7a31f80f52 100644
--- a/src/mongo/db/structure/btree/btree_logic.h
+++ b/src/mongo/db/structure/btree/btree_logic.h
@@ -137,7 +137,9 @@ namespace mongo {
*/
Builder* newBuilder(OperationContext* txn, bool dupsAllowed);
- Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc) const;
+ Status dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc) const;
Status insert(OperationContext* txn,
const BSONObj& rawKey,
@@ -152,15 +154,19 @@ namespace mongo {
* bucketLocOut would contain the bucket containing key which is before or after the
* searched one (dependent on the direction).
*/
- bool locate(const BSONObj& key,
+ bool locate(OperationContext* txn,
+ const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
int* posOut,
DiskLoc* bucketLocOut) const;
- void advance(DiskLoc* bucketLocInOut, int* posInOut, int direction) const;
+ void advance(OperationContext* txn,
+ DiskLoc* bucketLocInOut,
+ int* posInOut,
+ int direction) const;
- bool exists(const KeyDataType& key) const;
+ bool exists(OperationContext* txn, const KeyDataType& key) const;
bool unindex(OperationContext* txn,
const BSONObj& key,
@@ -168,7 +174,8 @@ namespace mongo {
bool isEmpty() const;
- long long fullValidate(long long *unusedCount,
+ long long fullValidate(OperationContext*,
+ long long *unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth);
@@ -185,7 +192,8 @@ namespace mongo {
// Composite key navigation methods
//
- void customLocate(DiskLoc* locInOut,
+ void customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
int* keyOfsInOut,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -194,7 +202,8 @@ namespace mongo {
const vector<bool>& keyEndInclusive,
int direction) const;
- void advanceTo(DiskLoc* thisLocInOut,
+ void advanceTo(OperationContext*,
+ DiskLoc* thisLocInOut,
int* keyOfsInOut,
const BSONObj &keyBegin,
int keyBeginLen,
@@ -203,7 +212,8 @@ namespace mongo {
const vector<bool>& keyEndInclusive,
int direction) const;
- void restorePosition(const BSONObj& savedKey,
+ void restorePosition(OperationContext* txn,
+ const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
DiskLoc* bucketInOut,
@@ -330,7 +340,8 @@ namespace mongo {
void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int &refPos);
- void customLocate(DiskLoc* locInOut,
+ void customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
int* keyOfsInOut,
const BSONObj& keyBegin,
int keyBeginLen,
@@ -340,12 +351,13 @@ namespace mongo {
int direction,
pair<DiskLoc, int>& bestParent) const;
- Status _find(BucketType* bucket,
- const KeyDataType& key,
- const DiskLoc& recordLoc,
- bool errorIfDup,
- int* keyPositionOut,
- bool* foundOut) const;
+ Status _find(OperationContext* txn,
+ BucketType* bucket,
+ const KeyDataType& key,
+ const DiskLoc& recordLoc,
+ bool errorIfDup,
+ int* keyPositionOut,
+ bool* foundOut) const;
bool customFind(int low,
int high,
@@ -360,7 +372,8 @@ namespace mongo {
int* keyOfsInOut,
pair<DiskLoc, int>& bestParent) const;
- void advanceToImpl(DiskLoc* thisLocInOut,
+ void advanceToImpl(OperationContext* txn,
+ DiskLoc* thisLocInOut,
int* keyOfsInOut,
const BSONObj &keyBegin,
int keyBeginLen,
@@ -369,35 +382,48 @@ namespace mongo {
const vector<bool>& keyEndInclusive,
int direction) const;
- bool wouldCreateDup(const KeyDataType& key, const DiskLoc self) const;
+ bool wouldCreateDup(OperationContext* txn,
+ const KeyDataType& key,
+ const DiskLoc self) const;
bool keyIsUsed(const DiskLoc& loc, const int& pos) const;
- void skipUnusedKeys(DiskLoc* loc, int* pos, int direction) const;
+ void skipUnusedKeys(OperationContext* txn,
+ DiskLoc* loc,
+ int* pos,
+ int direction) const;
- DiskLoc advance(const DiskLoc& bucketLoc, int* posInOut, int direction) const;
+ DiskLoc advance(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ int* posInOut,
+ int direction) const;
- DiskLoc _locate(const DiskLoc& bucketLoc,
- const KeyDataType& key,
- int* posOut,
- bool* foundOut,
- const DiskLoc& recordLoc,
- const int direction) const;
+ DiskLoc _locate(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const KeyDataType& key,
+ int* posOut,
+ bool* foundOut,
+ const DiskLoc& recordLoc,
+ const int direction) const;
- long long _fullValidate(const DiskLoc bucketLoc,
- long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth);
+ long long _fullValidate(OperationContext* txn,
+ const DiskLoc bucketLoc,
+ long long *unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth);
DiskLoc _addBucket(OperationContext* txn);
- bool canMergeChildren(BucketType* bucket,
+ bool canMergeChildren(OperationContext* txn,
+ BucketType* bucket,
const DiskLoc bucketLoc,
const int leftIndex);
// has to look in children of 'bucket' and requires record store
- int _rebalancedSeparatorPos(BucketType* bucket, int leftIndex);
+ int _rebalancedSeparatorPos(OperationContext* txn,
+ BucketType* bucket,
+ int leftIndex);
void _packReadyForMod(BucketType* bucket, int &refPos);
@@ -466,7 +492,7 @@ namespace mongo {
void doBalanceRightToLeft(OperationContext* txn,
BucketType* bucket,
- const DiskLoc bucketLoc,
+ const DiskLoc thisLoc,
int leftIndex,
int split,
BucketType* l,
@@ -479,7 +505,9 @@ namespace mongo {
const DiskLoc bucketLoc,
int leftIndex);
- int indexInParent(BucketType* bucket, const DiskLoc bucketLoc) const;
+ int indexInParent(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) const;
void doMergeChildren(OperationContext* txn,
BucketType* bucket,
diff --git a/src/mongo/db/structure/btree/btree_logic_test.cpp b/src/mongo/db/structure/btree/btree_logic_test.cpp
index deabd340184..3cfa48a3a25 100644
--- a/src/mongo/db/structure/btree/btree_logic_test.cpp
+++ b/src/mongo/db/structure/btree/btree_logic_test.cpp
@@ -63,7 +63,8 @@ namespace mongo {
protected:
void checkValidNumKeys(int nKeys) {
- ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(NULL, true, true, 0));
+ OperationContextNoop txn;
+ ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, true, 0));
}
void insert(const BSONObj &key, const DiskLoc dl) {
@@ -83,8 +84,9 @@ namespace mongo {
int direction) {
int pos;
DiskLoc loc;
+ OperationContextNoop txn;
ASSERT_EQUALS(expectedFound,
- _helper.btree.locate(key, _helper.dummyDiskLoc, direction, &pos, &loc));
+ _helper.btree.locate(&txn, key, _helper.dummyDiskLoc, direction, &pos, &loc));
ASSERT_EQUALS(expectedLocation, loc);
ASSERT_EQUALS(expectedPos, pos);
}
@@ -128,8 +130,8 @@ namespace mongo {
int bucketRebalancedSeparatorPos(const DiskLoc bucketLoc, int leftIndex) {
BucketType* bucket = _helper.btree.getBucket(bucketLoc);
-
- return _helper.btree._rebalancedSeparatorPos(bucket, leftIndex);
+ OperationContextNoop txn;
+ return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
}
FullKey getKey(const DiskLoc bucketLoc, int pos) const {
@@ -328,21 +330,21 @@ namespace mongo {
// 'E' is the split point and should be in the head the rest should be ~50/50
const BSONObj splitPoint = simpleKey('E', 800);
- this->_helper.btree.locate(splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
ASSERT_EQUALS(this->_helper.headManager.getHead(), loc);
ASSERT_EQUALS(0, pos);
// Find the one before 'E'
int largePos;
DiskLoc largeLoc;
- this->_helper.btree.locate(splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&largeLoc, &largePos, -1);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
// Find the one after 'E'
int smallPos;
DiskLoc smallLoc;
- this->_helper.btree.locate(splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&smallLoc, &smallPos, 1);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -378,23 +380,23 @@ namespace mongo {
// 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
const BSONObj splitPoint = simpleKey('H', 800);
- this->_helper.btree.locate(splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
ASSERT_EQUALS(this->_helper.headManager.getHead(), loc);
ASSERT_EQUALS(0, pos);
// Find the one before 'H'
int largePos;
DiskLoc largeLoc;
- this->_helper.btree.locate(
+ this->_helper.btree.locate(&txn,
splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&largeLoc, &largePos, -1);
+ this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
// Find the one after 'H'
int smallPos;
DiskLoc smallLoc;
- this->_helper.btree.locate(
+ this->_helper.btree.locate(&txn,
splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&smallLoc, &smallPos, 1);
+ this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -441,7 +443,7 @@ namespace mongo {
ASSERT_EQUALS(1, this->_helper.recordStore.numRecords() - 1);
long long unusedCount = 0;
- ASSERT_EQUALS(expectedCount, this->_helper.btree.fullValidate(&unusedCount, true, true, 0));
+ ASSERT_EQUALS(expectedCount, this->_helper.btree.fullValidate(&txn, &unusedCount, true, true, 0));
ASSERT_EQUALS(0, unusedCount);
}
@@ -497,7 +499,7 @@ namespace mongo {
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords() - 1);
long long unusedCount = 0;
- ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&unusedCount, true, true, 0));
+ ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, true, 0));
ASSERT_EQUALS(0, unusedCount);
}
};
@@ -510,7 +512,7 @@ namespace mongo {
ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -518,7 +520,7 @@ namespace mongo {
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -535,7 +537,7 @@ namespace mongo {
ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -543,7 +545,7 @@ namespace mongo {
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -564,7 +566,7 @@ namespace mongo {
builder.makeTree("{d:{b:{a:null},c:null}}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -572,7 +574,7 @@ namespace mongo {
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -590,7 +592,7 @@ namespace mongo {
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -598,7 +600,7 @@ namespace mongo {
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -617,7 +619,7 @@ namespace mongo {
builder.makeTree("{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -625,7 +627,7 @@ namespace mongo {
const BSONObj k = BSON("" << "ff");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -646,7 +648,7 @@ namespace mongo {
"dd:null,"
"_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -654,7 +656,7 @@ namespace mongo {
const BSONObj k = BSON("" << "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -674,7 +676,7 @@ namespace mongo {
builder.makeTree("{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}");
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -682,7 +684,7 @@ namespace mongo {
const BSONObj k = BSON("" << "g");
verify(this->unindex(k));
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -700,7 +702,7 @@ namespace mongo {
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -708,7 +710,7 @@ namespace mongo {
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -726,7 +728,7 @@ namespace mongo {
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -734,7 +736,7 @@ namespace mongo {
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -752,7 +754,7 @@ namespace mongo {
builder.makeTree("{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords());
@@ -760,7 +762,7 @@ namespace mongo {
const BSONObj k = BSON("" << "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -778,7 +780,7 @@ namespace mongo {
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -786,7 +788,7 @@ namespace mongo {
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -805,7 +807,7 @@ namespace mongo {
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -813,7 +815,7 @@ namespace mongo {
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -831,7 +833,7 @@ namespace mongo {
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -839,7 +841,7 @@ namespace mongo {
const BSONObj k = BSON("" << "c");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -897,7 +899,7 @@ namespace mongo {
const char *keys = delKeys();
for (const char *i = keys; *i; ++i) {
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
ASSERT_EQUALS(0, unused);
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
@@ -910,7 +912,7 @@ namespace mongo {
}
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
ASSERT_EQUALS(0, unused);
validate();
@@ -1136,7 +1138,7 @@ namespace mongo {
"b:{$20:null,$30:null,$40:null,$50:null,a:null},"
"_:{c:null}}");
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1144,7 +1146,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1166,7 +1168,7 @@ namespace mongo {
"b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
"_:{c:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1174,7 +1176,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1197,7 +1199,7 @@ namespace mongo {
"b:{$30:null,$40:{$35:null},$50:{$45:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(23, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords());
@@ -1205,7 +1207,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x30, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords());
@@ -1231,7 +1233,7 @@ namespace mongo {
"$90:{$85:null},$100:{$95:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(25, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords());
@@ -1239,7 +1241,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x5, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(24, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords());
@@ -1262,7 +1264,7 @@ namespace mongo {
builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1270,7 +1272,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1342,7 +1344,7 @@ namespace mongo {
builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1353,7 +1355,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1376,7 +1378,7 @@ namespace mongo {
"$200:null,$300:null,$400:null,$500:null,$600:null,"
"$700:null,$800:null,$900:null,_:{c:null}}");
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1384,7 +1386,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(21, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords());
@@ -1559,7 +1561,7 @@ namespace mongo {
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$30:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1567,7 +1569,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1589,7 +1591,7 @@ namespace mongo {
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1597,7 +1599,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1619,7 +1621,7 @@ namespace mongo {
"_:{$20:null,$30:null,$40:null,$50:null,"
"$60:null,$70:null,$80:null,$90:null}}");
- ASSERT_EQUALS(15, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -1627,7 +1629,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x7, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1647,7 +1649,7 @@ namespace mongo {
builder.makeTree("{b:{a:null}}");
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1655,7 +1657,7 @@ namespace mongo {
const BSONObj k = BSON("" << "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
@@ -1673,7 +1675,7 @@ namespace mongo {
builder.makeTree("{a:null,c:{b:null},d:null}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1681,7 +1683,7 @@ namespace mongo {
const BSONObj k = BSON("" << "b");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(NULL, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
@@ -1700,7 +1702,7 @@ namespace mongo {
builder.makeTree("{a:null,c:{b:null},d:null}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1709,7 +1711,7 @@ namespace mongo {
const BSONObj k = BSON("" << "c");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
@@ -1733,7 +1735,7 @@ namespace mongo {
this->markKeyUnused(prevChildBucket, 0);
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1743,7 +1745,7 @@ namespace mongo {
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
@@ -1764,7 +1766,7 @@ namespace mongo {
builder.makeTree("{a:null,_:{b:null}}");
long long unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1774,7 +1776,7 @@ namespace mongo {
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords());
@@ -1794,7 +1796,7 @@ namespace mongo {
builder.makeTree("{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}");
long long unused = 0;
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords());
@@ -1804,7 +1806,7 @@ namespace mongo {
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1824,7 +1826,7 @@ namespace mongo {
builder.makeTree("{a:null,_:{e:{c:null},_:{f:null}}}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1834,7 +1836,7 @@ namespace mongo {
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords());
@@ -1854,7 +1856,7 @@ namespace mongo {
builder.makeTree("{a:null,d:{c:{b:null}},e:null}");
long long unused = 0;
- ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1863,7 +1865,7 @@ namespace mongo {
const BSONObj k = BSON("" << "d");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1886,7 +1888,7 @@ namespace mongo {
builder.makeTree("{a:null,_:{c:null,_:{d:null}}}");
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1895,7 +1897,7 @@ namespace mongo {
const BSONObj k = BSON("" << "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords());
@@ -1919,7 +1921,7 @@ namespace mongo {
"$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1928,7 +1930,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1951,7 +1953,7 @@ namespace mongo {
"$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -1960,7 +1962,7 @@ namespace mongo {
const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&unused, true, true, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, true, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords());
@@ -2044,14 +2046,14 @@ namespace mongo {
}
long long unused = 0;
- ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, bt()->fullValidate(&txn, dl(), order(), &unused, true ) );
for ( long long i = 50000; i < 50100; ++i ) {
insert( i );
}
long long unused2 = 0;
- ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
+ ASSERT_EQUALS( 100, bt()->fullValidate(&txn, dl(), order(), &unused2, true ) );
// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
//
diff --git a/src/mongo/db/structure/btree/btree_test_help.cpp b/src/mongo/db/structure/btree/btree_test_help.cpp
index deaca1285ab..a2deff60d27 100644
--- a/src/mongo/db/structure/btree/btree_test_help.cpp
+++ b/src/mongo/db/structure/btree/btree_test_help.cpp
@@ -174,7 +174,8 @@ namespace mongo {
bool ArtificialTreeBuilder<OnDiskFormat>::isPresent(const BSONObj &key, int direction) const {
int pos;
DiskLoc loc;
- return _helper->btree.locate(key, _helper->dummyDiskLoc, direction, &pos, &loc);
+ OperationContextNoop txn;
+ return _helper->btree.locate(&txn, key, _helper->dummyDiskLoc, direction, &pos, &loc);
}
// Static
diff --git a/src/mongo/db/structure/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/structure/catalog/namespace_details_collection_entry.cpp
index 018d6a735c3..16bfb2cc5cc 100644
--- a/src/mongo/db/structure/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/structure/catalog/namespace_details_collection_entry.cpp
@@ -47,8 +47,8 @@ namespace mongo {
_db( db ) {
}
- CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions() const {
- return _db->getCollectionOptions( ns().ns() );
+ CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(OperationContext* txn) const {
+ return _db->getCollectionOptions( txn, ns().ns() );
}
int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount() const {
diff --git a/src/mongo/db/structure/catalog/namespace_details_collection_entry.h b/src/mongo/db/structure/catalog/namespace_details_collection_entry.h
index e034abf047a..78a5b96f181 100644
--- a/src/mongo/db/structure/catalog/namespace_details_collection_entry.h
+++ b/src/mongo/db/structure/catalog/namespace_details_collection_entry.h
@@ -52,7 +52,7 @@ namespace mongo {
virtual ~NamespaceDetailsCollectionCatalogEntry(){}
- virtual CollectionOptions getCollectionOptions() const;
+ virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
virtual int getTotalIndexCount() const;
diff --git a/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.cpp
index 88625f0104b..4b03af168c8 100644
--- a/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.cpp
@@ -101,7 +101,7 @@ namespace mongo {
}
}
- const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent() const {
+ const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent( OperationContext* txn ) const {
return _details->firstExtent;
}
@@ -109,7 +109,7 @@ namespace mongo {
*txn->recoveryUnit()->writing( &_details->firstExtent ) = loc;
}
- const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent() const {
+ const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent( OperationContext* txn ) const {
return _details->lastExtent;
}
@@ -156,7 +156,7 @@ namespace mongo {
return true;
}
- int NamespaceDetailsRSV1MetaData::lastExtentSize() const {
+ int NamespaceDetailsRSV1MetaData::lastExtentSize( OperationContext* txn ) const {
return _details->lastExtentSize;
}
@@ -191,7 +191,8 @@ namespace mongo {
if ( !_namespaceRecordStore )
return;
- scoped_ptr<RecordIterator> iterator( _namespaceRecordStore->getIterator( DiskLoc(),
+ scoped_ptr<RecordIterator> iterator( _namespaceRecordStore->getIterator( txn,
+ DiskLoc(),
false,
CollectionScanParams::FORWARD ) );
while ( !iterator->isEOF() ) {
diff --git a/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.h
index d4460de8bf2..7e8ab9af7a7 100644
--- a/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.h
+++ b/src/mongo/db/structure/catalog/namespace_details_rsv1_metadata.h
@@ -77,10 +77,10 @@ namespace mongo {
const DiskLoc& loc );
virtual void orphanDeletedList(OperationContext* txn);
- virtual const DiskLoc& firstExtent() const;
+ virtual const DiskLoc& firstExtent( OperationContext* txn ) const;
virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc );
- virtual const DiskLoc& lastExtent() const;
+ virtual const DiskLoc& lastExtent( OperationContext* txn ) const;
virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc );
virtual bool isCapped() const;
@@ -91,7 +91,7 @@ namespace mongo {
virtual bool clearUserFlag( OperationContext* txn, int flag );
virtual bool replaceUserFlags( OperationContext* txn, int flags );
- virtual int lastExtentSize() const;
+ virtual int lastExtentSize( OperationContext* txn ) const;
virtual void setLastExtentSize( OperationContext* txn, int newMax );
virtual long long maxCappedDocs() const;
diff --git a/src/mongo/db/structure/record_store.h b/src/mongo/db/structure/record_store.h
index c3805976959..1d6ca0098b0 100644
--- a/src/mongo/db/structure/record_store.h
+++ b/src/mongo/db/structure/record_store.h
@@ -46,6 +46,7 @@ namespace mongo {
class MAdvise;
class NamespaceDetails;
class OperationContext;
+ class Record;
class RecordStoreCompactAdaptor;
class RecordStore;
@@ -133,7 +134,9 @@ namespace mongo {
* @param extraInfo - optional more debug info
* @param level - optional, level of debug info to put in (higher is more)
*/
- virtual int64_t storageSize( BSONObjBuilder* extraInfo = NULL, int infoLevel = 0 ) const = 0;
+ virtual int64_t storageSize( OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0 ) const = 0;
// CRUD related
@@ -170,9 +173,10 @@ namespace mongo {
/**
* returned iterator owned by caller
* canonical to get all would be
- * getIterator( DiskLoc(), false, CollectionScanParams::FORWARD )
+ * getIterator( txn, DiskLoc(), false, CollectionScanParams::FORWARD )
*/
- virtual RecordIterator* getIterator( const DiskLoc& start = DiskLoc(),
+ virtual RecordIterator* getIterator( OperationContext* txn,
+ const DiskLoc& start = DiskLoc(),
bool tailable = false,
const CollectionScanParams::Direction& dir =
CollectionScanParams::FORWARD
@@ -183,13 +187,13 @@ namespace mongo {
* damaged records. The iterator might return every record in the store if all of them
* are reachable and not corrupted.
*/
- virtual RecordIterator* getIteratorForRepair() const = 0;
+ virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const = 0;
/**
* Returns many iterators that partition the RecordStore into many disjoint sets. Iterating
* all returned iterators is equivalent to Iterating the full store.
*/
- virtual std::vector<RecordIterator*> getManyIterators() const = 0;
+ virtual std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const = 0;
// higher level
@@ -233,7 +237,9 @@ namespace mongo {
* @param scaleSize - amount by which to scale size metrics
* appends any custom stats from the RecordStore or other unique stats
*/
- virtual void appendCustomStats( BSONObjBuilder* result, double scale ) const = 0;
+ virtual void appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const = 0;
/**
* Load all data into cache.
diff --git a/src/mongo/db/structure/record_store_heap.cpp b/src/mongo/db/structure/record_store_heap.cpp
index bbaa86ebea1..789b8e2fedd 100644
--- a/src/mongo/db/structure/record_store_heap.cpp
+++ b/src/mongo/db/structure/record_store_heap.cpp
@@ -29,6 +29,7 @@
* it in the license file.
*/
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/structure/record_store_heap.h"
namespace mongo {
@@ -217,29 +218,30 @@ namespace mongo {
return Status::OK();
}
- RecordIterator* HeapRecordStore::getIterator(const DiskLoc& start,
+ RecordIterator* HeapRecordStore::getIterator(OperationContext* txn,
+ const DiskLoc& start,
bool tailable,
const CollectionScanParams::Direction& dir) const {
if (tailable)
invariant(_isCapped && dir == CollectionScanParams::FORWARD);
if (dir == CollectionScanParams::FORWARD) {
- return new HeapRecordIterator(_records, *this, start, tailable);
+ return new HeapRecordIterator(txn, _records, *this, start, tailable);
}
else {
- return new HeapRecordIterator(_records, *this, start);
+ return new HeapRecordIterator(txn, _records, *this, start);
}
}
- RecordIterator* HeapRecordStore::getIteratorForRepair() const {
+ RecordIterator* HeapRecordStore::getIteratorForRepair(OperationContext* txn) const {
// TODO maybe make different from HeapRecordIterator
- return new HeapRecordIterator(_records, *this);
+ return new HeapRecordIterator(txn, _records, *this);
}
- std::vector<RecordIterator*> HeapRecordStore::getManyIterators() const {
+ std::vector<RecordIterator*> HeapRecordStore::getManyIterators(OperationContext* txn) const {
std::vector<RecordIterator*> out;
// TODO maybe find a way to return multiple iterators.
- out.push_back(new HeapRecordIterator(_records, *this));
+ out.push_back(new HeapRecordIterator(txn, _records, *this));
return out;
}
@@ -297,7 +299,9 @@ namespace mongo {
}
- void HeapRecordStore::appendCustomStats( BSONObjBuilder* result, double scale ) const {
+ void HeapRecordStore::appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const {
result->append( "note", "HeapRecordStore has no cusom stats yet" );
}
@@ -319,7 +323,9 @@ namespace mongo {
invariant(!"increaseStorageSize not yet implemented");
}
- int64_t HeapRecordStore::storageSize(BSONObjBuilder* extraInfo, int infoLevel) const {
+ int64_t HeapRecordStore::storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo,
+ int infoLevel) const {
// Note: not making use of extraInfo or infoLevel since we don't have extents
const int64_t recordOverhead = numRecords() * HeapRecord::HeaderSize;
return _dataSize + recordOverhead;
@@ -337,11 +343,13 @@ namespace mongo {
// Forward Iterator
//
- HeapRecordIterator::HeapRecordIterator(const HeapRecordStore::Records& records,
+ HeapRecordIterator::HeapRecordIterator(OperationContext* txn,
+ const HeapRecordStore::Records& records,
const HeapRecordStore& rs,
DiskLoc start,
bool tailable)
- : _tailable(tailable),
+ : _txn(txn),
+ _tailable(tailable),
_lastLoc(minDiskLoc),
_killedByInvalidate(false),
_records(records),
@@ -425,10 +433,12 @@ namespace mongo {
// Reverse Iterator
//
- HeapRecordReverseIterator::HeapRecordReverseIterator(const HeapRecordStore::Records& records,
+ HeapRecordReverseIterator::HeapRecordReverseIterator(OperationContext* txn,
+ const HeapRecordStore::Records& records,
const HeapRecordStore& rs,
DiskLoc start)
- : _killedByInvalidate(false),
+ : _txn(txn),
+ _killedByInvalidate(false),
_records(records),
_rs(rs) {
if (start.isNull()) {
diff --git a/src/mongo/db/structure/record_store_heap.h b/src/mongo/db/structure/record_store_heap.h
index 0ea43cc8d76..59959c4c968 100644
--- a/src/mongo/db/structure/record_store_heap.h
+++ b/src/mongo/db/structure/record_store_heap.h
@@ -79,12 +79,14 @@ namespace mongo {
const char* damangeSource,
const mutablebson::DamageVector& damages );
- virtual RecordIterator* getIterator( const DiskLoc& start, bool tailable,
+ virtual RecordIterator* getIterator( OperationContext* txn,
+ const DiskLoc& start,
+ bool tailable,
const CollectionScanParams::Direction& dir) const;
- virtual RecordIterator* getIteratorForRepair() const;
+ virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const;
- virtual std::vector<RecordIterator*> getManyIterators() const;
+ virtual std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
virtual Status truncate( OperationContext* txn );
@@ -102,7 +104,9 @@ namespace mongo {
ValidateAdaptor* adaptor,
ValidateResults* results, BSONObjBuilder* output ) const;
- virtual void appendCustomStats( BSONObjBuilder* result, double scale ) const;
+ virtual void appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const;
virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const;
@@ -112,7 +116,9 @@ namespace mongo {
virtual void increaseStorageSize( OperationContext* txn, int size, bool enforceQuota );
- virtual int64_t storageSize(BSONObjBuilder* extraInfo = NULL, int infoLevel = 0) const;
+ virtual int64_t storageSize( OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const;
virtual long long dataSize() const { return _dataSize; }
@@ -170,7 +176,8 @@ namespace mongo {
class HeapRecordIterator : public RecordIterator {
public:
- HeapRecordIterator(const HeapRecordStore::Records& records,
+ HeapRecordIterator(OperationContext* txn,
+ const HeapRecordStore::Records& records,
const HeapRecordStore& rs,
DiskLoc start = DiskLoc(),
bool tailable = false);
@@ -190,6 +197,7 @@ namespace mongo {
virtual RecordData dataFor( const DiskLoc& loc ) const;
private:
+ OperationContext* _txn; // not owned
HeapRecordStore::Records::const_iterator _it;
bool _tailable;
DiskLoc _lastLoc; // only for restarting tailable
@@ -201,7 +209,8 @@ namespace mongo {
class HeapRecordReverseIterator : public RecordIterator {
public:
- HeapRecordReverseIterator(const HeapRecordStore::Records& records,
+ HeapRecordReverseIterator(OperationContext* txn,
+ const HeapRecordStore::Records& records,
const HeapRecordStore& rs,
DiskLoc start = DiskLoc());
@@ -220,6 +229,7 @@ namespace mongo {
virtual RecordData dataFor( const DiskLoc& loc ) const;
private:
+ OperationContext* _txn; // not owned
HeapRecordStore::Records::const_reverse_iterator _it;
bool _killedByInvalidate;
diff --git a/src/mongo/db/structure/record_store_v1_base.cpp b/src/mongo/db/structure/record_store_v1_base.cpp
index f929b0ef984..784a391e204 100644
--- a/src/mongo/db/structure/record_store_v1_base.cpp
+++ b/src/mongo/db/structure/record_store_v1_base.cpp
@@ -1,7 +1,7 @@
// record_store_v1_base.cpp
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -31,6 +31,7 @@
#include "mongo/db/structure/record_store_v1_base.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/concurrency/lock_mgr.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/extent.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
@@ -71,13 +72,16 @@ namespace mongo {
}
- int64_t RecordStoreV1Base::storageSize( BSONObjBuilder* extraInfo, int level ) const {
+ int64_t RecordStoreV1Base::storageSize( OperationContext* txn,
+ BSONObjBuilder* extraInfo,
+ int level ) const {
BSONArrayBuilder extentInfo;
int64_t total = 0;
int n = 0;
- DiskLoc cur = _details->firstExtent();
+ DiskLoc cur = _details->firstExtent(txn);
+
while ( !cur.isNull() ) {
Extent* e = _extentManager->getExtent( cur );
@@ -87,8 +91,7 @@ namespace mongo {
if ( extraInfo && level > 0 ) {
extentInfo.append( BSON( "len" << e->length << "loc: " << e->myLoc.toBSONObj() ) );
}
-
- cur = e->xnext;
+ cur = e->xnext;
}
if ( extraInfo ) {
@@ -118,27 +121,28 @@ namespace mongo {
return reinterpret_cast<DeletedRecord*>( recordFor( loc ) );
}
- Extent* RecordStoreV1Base::_getExtent( const DiskLoc& loc ) const {
+ Extent* RecordStoreV1Base::_getExtent( OperationContext* txn, const DiskLoc& loc ) const {
return _extentManager->getExtent( loc );
}
- DiskLoc RecordStoreV1Base::_getExtentLocForRecord( const DiskLoc& loc ) const {
+ DiskLoc RecordStoreV1Base::_getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const {
return _extentManager->extentLocForV1( loc );
}
- DiskLoc RecordStoreV1Base::getNextRecord( const DiskLoc& loc ) const {
- DiskLoc next = getNextRecordInExtent( loc );
- if ( !next.isNull() )
+ DiskLoc RecordStoreV1Base::getNextRecord( OperationContext* txn, const DiskLoc& loc ) const {
+ DiskLoc next = getNextRecordInExtent( txn, loc );
+ if ( !next.isNull() ) {
return next;
+ }
// now traverse extents
- Extent* e = _getExtent( _getExtentLocForRecord(loc) );
+ Extent* e = _getExtent( txn, _getExtentLocForRecord(txn, loc) );
while ( 1 ) {
if ( e->xnext.isNull() )
return DiskLoc(); // end of collection
- e = _getExtent( e->xnext );
+ e = _getExtent( txn, e->xnext );
if ( !e->firstRecord.isNull() )
break;
// entire extent could be empty, keep looking
@@ -146,18 +150,19 @@ namespace mongo {
return e->firstRecord;
}
- DiskLoc RecordStoreV1Base::getPrevRecord( const DiskLoc& loc ) const {
- DiskLoc prev = getPrevRecordInExtent( loc );
- if ( !prev.isNull() )
+ DiskLoc RecordStoreV1Base::getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const {
+ DiskLoc prev = getPrevRecordInExtent( txn, loc );
+ if ( !prev.isNull() ) {
return prev;
+ }
// now traverse extents
- Extent *e = _getExtent(_getExtentLocForRecord(loc));
+ Extent *e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
while ( 1 ) {
if ( e->xprev.isNull() )
return DiskLoc(); // end of collection
- e = _getExtent( e->xprev );
+ e = _getExtent( txn, e->xprev );
if ( !e->firstRecord.isNull() )
break;
// entire extent could be empty, keep looking
@@ -167,7 +172,8 @@ namespace mongo {
}
DiskLoc RecordStoreV1Base::_findFirstSpot( OperationContext* txn,
- const DiskLoc& extDiskLoc, Extent* e ) {
+ const DiskLoc& extDiskLoc,
+ Extent* e ) {
DiskLoc emptyLoc = extDiskLoc;
emptyLoc.inc( Extent::HeaderSize() );
int delRecLength = e->length - Extent::HeaderSize();
@@ -188,25 +194,26 @@ namespace mongo {
}
- DiskLoc RecordStoreV1Base::getNextRecordInExtent( const DiskLoc& loc ) const {
+ DiskLoc RecordStoreV1Base::getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const {
int nextOffset = recordFor( loc )->nextOfs();
if ( nextOffset == DiskLoc::NullOfs )
return DiskLoc();
fassert( 17441, abs(nextOffset) >= 8 ); // defensive
- return DiskLoc( loc.a(), nextOffset );
+ DiskLoc result( loc.a(), nextOffset );
+ return result;
}
- DiskLoc RecordStoreV1Base::getPrevRecordInExtent( const DiskLoc& loc ) const {
+ DiskLoc RecordStoreV1Base::getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const {
int prevOffset = recordFor( loc )->prevOfs();
if ( prevOffset == DiskLoc::NullOfs )
return DiskLoc();
fassert( 17442, abs(prevOffset) >= 8 ); // defensive
- return DiskLoc( loc.a(), prevOffset );
-
+ DiskLoc result( loc.a(), prevOffset );
+ return result;
}
@@ -356,13 +363,13 @@ namespace mongo {
/* remove ourself from the record next/prev chain */
{
if ( todelete->prevOfs() != DiskLoc::NullOfs ) {
- DiskLoc prev = getPrevRecordInExtent( dl );
+ DiskLoc prev = getPrevRecordInExtent( txn, dl );
Record* prevRecord = recordFor( prev );
txn->recoveryUnit()->writingInt( prevRecord->nextOfs() ) = todelete->nextOfs();
}
if ( todelete->nextOfs() != DiskLoc::NullOfs ) {
- DiskLoc next = getNextRecord( dl );
+ DiskLoc next = getNextRecord( txn, dl );
Record* nextRecord = recordFor( next );
txn->recoveryUnit()->writingInt( nextRecord->prevOfs() ) = todelete->prevOfs();
}
@@ -370,7 +377,8 @@ namespace mongo {
/* remove ourself from extent pointers */
{
- Extent *e = _getExtent( todelete->myExtentLoc(dl) );
+ DiskLoc extentLoc = todelete->myExtentLoc(dl);
+ Extent *e = _getExtent( txn, extentLoc );
if ( e->firstRecord == dl ) {
txn->recoveryUnit()->writing(&e->firstRecord);
if ( todelete->nextOfs() == DiskLoc::NullOfs )
@@ -410,15 +418,16 @@ namespace mongo {
}
- RecordIterator* RecordStoreV1Base::getIteratorForRepair() const {
- return new RecordStoreV1RepairIterator(this);
+ RecordIterator* RecordStoreV1Base::getIteratorForRepair(OperationContext* txn) const {
+ return new RecordStoreV1RepairIterator(txn, this);
}
void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
Record *r,
DiskLoc loc) {
dassert( recordFor(loc) == r );
- Extent *e = _getExtent( _getExtentLocForRecord( loc ) );
+ DiskLoc extentLoc = _getExtentLocForRecord( txn, loc );
+ Extent *e = _getExtent( txn, extentLoc );
if ( e->lastRecord.isNull() ) {
*txn->recoveryUnit()->writing(&e->firstRecord) = loc;
*txn->recoveryUnit()->writing(&e->lastRecord) = loc;
@@ -440,7 +449,6 @@ namespace mongo {
isCapped(),
size,
enforceQuota );
-
Extent *e = _extentManager->getExtent( eloc );
invariant( e );
@@ -453,8 +461,8 @@ namespace mongo {
DiskLoc emptyLoc = _findFirstSpot( txn, eloc, e );
- if ( _details->lastExtent().isNull() ) {
- invariant( _details->firstExtent().isNull() );
+ if ( _details->lastExtent(txn).isNull() ) {
+ invariant( _details->firstExtent(txn).isNull() );
_details->setFirstExtent( txn, eloc );
_details->setLastExtent( txn, eloc );
_details->setCapExtent( txn, eloc );
@@ -462,9 +470,9 @@ namespace mongo {
invariant( e->xnext.isNull() );
}
else {
- invariant( !_details->firstExtent().isNull() );
- *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent();
- *txn->recoveryUnit()->writing(&_extentManager->getExtent(_details->lastExtent())->xnext) = eloc;
+ invariant( !_details->firstExtent(txn).isNull() );
+ *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
+ *txn->recoveryUnit()->writing(&_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
_details->setLastExtent( txn, eloc );
}
@@ -494,36 +502,37 @@ namespace mongo {
output->appendNumber("datasize", _details->dataSize());
output->appendNumber("nrecords", _details->numRecords());
- output->appendNumber("lastExtentSize", _details->lastExtentSize());
+ output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
output->appendNumber("padding", _details->paddingFactor());
- if ( _details->firstExtent().isNull() )
+ if ( _details->firstExtent(txn).isNull() )
output->append( "firstExtent", "null" );
else
output->append( "firstExtent",
- str::stream() << _details->firstExtent().toString()
+ str::stream() << _details->firstExtent(txn).toString()
<< " ns:"
- << _getExtent( _details->firstExtent() )->nsDiagnostic.toString());
- if ( _details->lastExtent().isNull() )
+ << _getExtent( txn, _details->firstExtent(txn) )->nsDiagnostic.toString());
+ if ( _details->lastExtent(txn).isNull() )
output->append( "lastExtent", "null" );
else
- output->append( "lastExtent", str::stream() << _details->lastExtent().toString()
+ output->append( "lastExtent", str::stream() << _details->lastExtent(txn).toString()
<< " ns:"
- << _getExtent( _details->lastExtent() )->nsDiagnostic.toString());
+ << _getExtent( txn, _details->lastExtent(txn) )->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
BSONArrayBuilder extentData;
int extentCount = 0;
+ DiskLoc extentDiskLoc;
try {
- if ( !_details->firstExtent().isNull() ) {
- _getExtent( _details->firstExtent() )->assertOk();
- _getExtent( _details->lastExtent() )->assertOk();
+ if ( !_details->firstExtent(txn).isNull() ) {
+ _getExtent( txn, _details->firstExtent(txn) )->assertOk();
+ _getExtent( txn, _details->lastExtent(txn) )->assertOk();
}
- DiskLoc extentDiskLoc = _details->firstExtent();
+ extentDiskLoc = _details->firstExtent(txn);
while (!extentDiskLoc.isNull()) {
- Extent* thisExtent = _getExtent( extentDiskLoc );
+ Extent* thisExtent = _getExtent( txn, extentDiskLoc );
if (full) {
extentData << thisExtent->dump();
}
@@ -531,18 +540,19 @@ namespace mongo {
results->valid = false;
}
DiskLoc nextDiskLoc = thisExtent->xnext;
+
if (extentCount > 0 && !nextDiskLoc.isNull()
- && _getExtent( nextDiskLoc )->xprev != extentDiskLoc) {
+ && _getExtent( txn, nextDiskLoc )->xprev != extentDiskLoc) {
StringBuilder sb;
- sb << "'xprev' pointer " << _getExtent( nextDiskLoc )->xprev.toString()
+ sb << "'xprev' pointer " << _getExtent( txn, nextDiskLoc )->xprev.toString()
<< " in extent " << nextDiskLoc.toString()
<< " does not point to extent " << extentDiskLoc.toString();
results->errors.push_back( sb.str() );
results->valid = false;
}
- if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent()) {
+ if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
StringBuilder sb;
- sb << "'lastExtent' pointer " << _details->lastExtent().toString()
+ sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
results->errors.push_back( sb.str() );
results->valid = false;
@@ -571,31 +581,33 @@ namespace mongo {
// 333333333333333333333333333
bool testingLastExtent = false;
try {
- if (_details->firstExtent().isNull()) {
+ DiskLoc firstExtentLoc = _details->firstExtent(txn);
+ if (firstExtentLoc.isNull()) {
// this is ok
}
else {
- output->append("firstExtentDetails", _getExtent(_details->firstExtent())->dump());
- if (!_getExtent(_details->firstExtent())->xprev.isNull()) {
+ output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
+ if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
StringBuilder sb;
- sb << "'xprev' pointer in 'firstExtent' " << _details->firstExtent().toString()
- << " is " << _getExtent(_details->firstExtent())->xprev.toString()
+ sb << "'xprev' pointer in 'firstExtent' " << _details->firstExtent(txn).toString()
+ << " is " << _getExtent(txn, firstExtentLoc)->xprev.toString()
<< ", should be null";
results->errors.push_back( sb.str() );
results->valid = false;
}
}
testingLastExtent = true;
- if (_details->lastExtent().isNull()) {
+ DiskLoc lastExtentLoc = _details->lastExtent(txn);
+ if (lastExtentLoc.isNull()) {
// this is ok
}
else {
- if (_details->firstExtent() != _details->lastExtent()) {
- output->append("lastExtentDetails", _getExtent(_details->lastExtent())->dump());
- if (!_getExtent(_details->lastExtent())->xnext.isNull()) {
+ if (firstExtentLoc != lastExtentLoc) {
+ output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
+ if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
StringBuilder sb;
- sb << "'xnext' pointer in 'lastExtent' " << _details->lastExtent().toString()
- << " is " << _getExtent(_details->lastExtent())->xnext.toString()
+ sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
+ << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
<< ", should be null";
results->errors.push_back( sb.str() );
results->valid = false;
@@ -626,7 +638,8 @@ namespace mongo {
int outOfOrder = 0;
DiskLoc cl_last;
- scoped_ptr<RecordIterator> iterator( getIterator( DiskLoc(),
+ scoped_ptr<RecordIterator> iterator( getIterator( txn,
+ DiskLoc(),
false,
CollectionScanParams::FORWARD ) );
DiskLoc cl;
@@ -771,8 +784,10 @@ namespace mongo {
return Status::OK();
}
- void RecordStoreV1Base::appendCustomStats( BSONObjBuilder* result, double scale ) const {
- result->append( "lastExtentSize", _details->lastExtentSize() / scale );
+ void RecordStoreV1Base::appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const {
+ result->append( "lastExtentSize", _details->lastExtentSize(txn) / scale );
result->append( "paddingFactor", _details->paddingFactor() );
result->append( "userFlags", _details->userFlags() );
@@ -793,21 +808,21 @@ namespace mongo {
Status RecordStoreV1Base::touch( OperationContext* txn, BSONObjBuilder* output ) const {
Timer t;
- // Note: when this class has document level locking, we'll need a lock to get extents
- // and then ideally only hold the collection lock from above while doing actual touching.
-
std::vector<touch_location> ranges;
{
- Extent* ext = _getExtent( _details->firstExtent() );
+ DiskLoc nextLoc = _details->firstExtent(txn);
+ Extent* ext = _getExtent( txn, nextLoc );
while ( ext ) {
touch_location tl;
tl.root = reinterpret_cast<const char*>(ext);
tl.length = ext->length;
ranges.push_back(tl);
- if ( ext->xnext.isNull() )
+
+ nextLoc = ext->xnext;
+ if ( nextLoc.isNull() )
ext = NULL;
else
- ext = _getExtent( ext->xnext );
+ ext = _getExtent( txn, nextLoc );
}
}
@@ -859,8 +874,9 @@ namespace mongo {
}
void RecordStoreV1Base::IntraExtentIterator::invalidate(const DiskLoc& dl) {
- if (dl == _curr)
+ if (dl == _curr) {
getNext();
+ }
}
/* @return the size for an allocated record quantized to 1/16th of the BucketSize
diff --git a/src/mongo/db/structure/record_store_v1_base.h b/src/mongo/db/structure/record_store_v1_base.h
index 3d393e355ee..7265528762a 100644
--- a/src/mongo/db/structure/record_store_v1_base.h
+++ b/src/mongo/db/structure/record_store_v1_base.h
@@ -1,7 +1,7 @@
// record_store_v1_base.h
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -72,10 +72,10 @@ namespace mongo {
const DiskLoc& loc ) = 0;
virtual void orphanDeletedList(OperationContext* txn) = 0;
- virtual const DiskLoc& firstExtent() const = 0;
+ virtual const DiskLoc& firstExtent( OperationContext* txn ) const = 0;
virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc ) = 0;
- virtual const DiskLoc& lastExtent() const = 0;
+ virtual const DiskLoc& lastExtent( OperationContext* txn ) const = 0;
virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc ) = 0;
virtual bool isCapped() const = 0;
@@ -86,7 +86,7 @@ namespace mongo {
virtual bool clearUserFlag( OperationContext* txn, int flag ) = 0;
virtual bool replaceUserFlags( OperationContext* txn, int flags ) = 0;
- virtual int lastExtentSize() const = 0;
+ virtual int lastExtentSize( OperationContext* txn) const = 0;
virtual void setLastExtentSize( OperationContext* txn, int newMax ) = 0;
virtual long long maxCappedDocs() const = 0;
@@ -127,7 +127,9 @@ namespace mongo {
virtual long long dataSize() const { return _details->dataSize(); }
virtual long long numRecords() const { return _details->numRecords(); }
- virtual int64_t storageSize( BSONObjBuilder* extraInfo = NULL, int level = 0 ) const;
+ virtual int64_t storageSize( OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int level = 0 ) const;
virtual RecordData dataFor( const DiskLoc& loc ) const;
@@ -155,7 +157,7 @@ namespace mongo {
const char* damangeSource,
const mutablebson::DamageVector& damages );
- virtual RecordIterator* getIteratorForRepair() const;
+ virtual RecordIterator* getIteratorForRepair( OperationContext* txn ) const;
void increaseStorageSize( OperationContext* txn, int size, bool enforceQuota );
@@ -164,7 +166,9 @@ namespace mongo {
ValidateAdaptor* adaptor,
ValidateResults* results, BSONObjBuilder* output ) const;
- virtual void appendCustomStats( BSONObjBuilder* result, double scale ) const;
+ virtual void appendCustomStats( OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale ) const;
virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const;
@@ -177,13 +181,13 @@ namespace mongo {
*/
int getRecordAllocationSize( int minRecordSize ) const;
- DiskLoc getExtentLocForRecord( const DiskLoc& loc ) const;
+ DiskLoc getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc getNextRecord( const DiskLoc& loc ) const;
- DiskLoc getPrevRecord( const DiskLoc& loc ) const;
+ DiskLoc getNextRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ DiskLoc getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc getNextRecordInExtent( const DiskLoc& loc ) const;
- DiskLoc getPrevRecordInExtent( const DiskLoc& loc ) const;
+ DiskLoc getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
+ DiskLoc getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
/* @return the size for an allocated record quantized to 1/16th of the BucketSize.
@param allocSize requested size to allocate
@@ -221,15 +225,15 @@ namespace mongo {
virtual DeletedRecord* drec( const DiskLoc& loc ) const;
// just a wrapper for _extentManager->getExtent( loc );
- Extent* _getExtent( const DiskLoc& loc ) const;
+ Extent* _getExtent( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc _getExtentLocForRecord( const DiskLoc& loc ) const;
+ DiskLoc _getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc _getNextRecord( const DiskLoc& loc ) const;
- DiskLoc _getPrevRecord( const DiskLoc& loc ) const;
+ DiskLoc _getNextRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ DiskLoc _getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc _getNextRecordInExtent( const DiskLoc& loc ) const;
- DiskLoc _getPrevRecordInExtent( const DiskLoc& loc ) const;
+ DiskLoc _getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
+ DiskLoc _getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
/**
* finds the first suitable DiskLoc for data
@@ -268,14 +272,17 @@ namespace mongo {
*/
class RecordStoreV1Base::IntraExtentIterator : public RecordIterator {
public:
- IntraExtentIterator(DiskLoc start, const RecordStoreV1Base* rs, bool forward = true)
- : _curr(start), _rs(rs), _forward(forward) {}
+ IntraExtentIterator(OperationContext* txn,
+ DiskLoc start,
+ const RecordStoreV1Base* rs,
+ bool forward = true)
+ : _txn(txn), _curr(start), _rs(rs), _forward(forward) {}
virtual bool isEOF() { return _curr.isNull(); }
virtual DiskLoc curr() { return _curr; }
- virtual DiskLoc getNext();
+ virtual DiskLoc getNext( );
virtual void invalidate(const DiskLoc& dl);
@@ -287,6 +294,7 @@ namespace mongo {
private:
virtual const Record* recordFor( const DiskLoc& loc ) const { return _rs->recordFor(loc); }
+ OperationContext* _txn;
DiskLoc _curr;
const RecordStoreV1Base* _rs;
bool _forward;
diff --git a/src/mongo/db/structure/record_store_v1_capped.cpp b/src/mongo/db/structure/record_store_v1_capped.cpp
index 57657a1f3b2..fbc6c2e8186 100644
--- a/src/mongo/db/structure/record_store_v1_capped.cpp
+++ b/src/mongo/db/structure/record_store_v1_capped.cpp
@@ -67,7 +67,7 @@ namespace mongo {
: RecordStoreV1Base( ns, details, em, isSystemIndexes ),
_deleteCallback( collection ) {
- DiskLoc extentLoc = details->firstExtent();
+ DiskLoc extentLoc = details->firstExtent(txn);
while ( !extentLoc.isNull() ) {
_extentAdvice.push_back( _extentManager->cacheHint( extentLoc,
ExtentManager::Sequential ) );
@@ -94,11 +94,11 @@ namespace mongo {
// the extent check is a way to try and improve performance
// since we have to iterate all the extents (for now) to get
// storage size
- if ( lenToAlloc > storageSize() ) {
+ if ( lenToAlloc > storageSize(txn) ) {
return StatusWith<DiskLoc>( ErrorCodes::BadValue,
mongoutils::str::stream()
<< "document is larger than capped size "
- << lenToAlloc << " > " << storageSize(),
+ << lenToAlloc << " > " << storageSize(txn),
16328 );
}
@@ -136,7 +136,7 @@ namespace mongo {
if ( !_details->capFirstNewRecord().isValid() ) {
advanceCapExtent( txn, _ns );
- if ( _details->capExtent() != _details->firstExtent() )
+ if ( _details->capExtent() != _details->firstExtent(txn) )
_details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() );
// else signal done with first iteration through extents.
continue;
@@ -155,7 +155,7 @@ namespace mongo {
firstEmptyExtent = _details->capExtent();
advanceCapExtent( txn, _ns );
if ( firstEmptyExtent == _details->capExtent() ) {
- _maybeComplain( lenToAlloc );
+ _maybeComplain( txn, lenToAlloc );
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
"no space in capped collection" );
}
@@ -220,7 +220,7 @@ namespace mongo {
setListOfAllDeletedRecords( txn, DiskLoc() );
// preserve firstExtent/lastExtent
- _details->setCapExtent( txn, _details->firstExtent() );
+ _details->setCapExtent( txn, _details->firstExtent(txn) );
_details->setStats( txn, 0, 0 );
// preserve lastExtentSize
// nIndexes preserve 0
@@ -234,7 +234,7 @@ namespace mongo {
// Reset all existing extents and recreate the deleted list.
Extent* ext;
- for( DiskLoc extLoc = _details->firstExtent();
+ for( DiskLoc extLoc = _details->firstExtent(txn);
!extLoc.isNull();
extLoc = ext->xnext ) {
ext = _extentManager->getExtent(extLoc);
@@ -340,7 +340,7 @@ namespace mongo {
// NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
// Last, in case we're killed before getting here
- _details->setCapExtent( txn, _details->firstExtent() );
+ _details->setCapExtent( txn, _details->firstExtent(txn) );
}
}
@@ -369,7 +369,7 @@ namespace mongo {
void CappedRecordStoreV1::advanceCapExtent( OperationContext* txn, const StringData& ns ) {
// We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
// (or DiskLoc() if new capExtent == firstExtent)
- if ( _details->capExtent() == _details->lastExtent() )
+ if ( _details->capExtent() == _details->lastExtent(txn) )
setLastDelRecLastExtent( txn, DiskLoc() );
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
@@ -378,7 +378,7 @@ namespace mongo {
}
_details->setCapExtent( txn,
- theCapExtent()->xnext.isNull() ? _details->firstExtent()
+ theCapExtent()->xnext.isNull() ? _details->firstExtent(txn)
: theCapExtent()->xnext );
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
@@ -415,7 +415,7 @@ namespace mongo {
}
void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
- if ( _details->capExtent() == _details->firstExtent() ) {
+ if ( _details->capExtent() == _details->firstExtent(txn) ) {
// Only one extent of the collection is in use, so there
// is no deleted record in a previous extent, so nullify
// cappedLastDelRecLastExtent().
@@ -522,8 +522,8 @@ namespace mongo {
DiskLoc newCapExtent = _details->capExtent();
do {
// Find the previous extent, looping if necessary.
- newCapExtent = ( newCapExtent == _details->firstExtent() ) ?
- _details->lastExtent() :
+ newCapExtent = ( newCapExtent == _details->firstExtent(txn) ) ?
+ _details->lastExtent(txn) :
_extentManager->getExtent(newCapExtent)->xprev;
_extentManager->getExtent(newCapExtent)->assertOk();
}
@@ -586,23 +586,26 @@ namespace mongo {
}
}
- RecordIterator* CappedRecordStoreV1::getIterator( const DiskLoc& start, bool tailable,
+ RecordIterator* CappedRecordStoreV1::getIterator( OperationContext* txn,
+ const DiskLoc& start,
+ bool tailable,
const CollectionScanParams::Direction& dir) const {
- return new CappedRecordStoreV1Iterator( this, start, tailable, dir );
+ return new CappedRecordStoreV1Iterator( txn, this, start, tailable, dir );
}
- vector<RecordIterator*> CappedRecordStoreV1::getManyIterators() const {
+ vector<RecordIterator*> CappedRecordStoreV1::getManyIterators( OperationContext* txn ) const {
OwnedPointerVector<RecordIterator> iterators;
if (!_details->capLooped()) {
// if we haven't looped yet, just spit out all extents (same as non-capped impl)
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(txn, extLoc);
if (ext->firstRecord.isNull())
continue;
- iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(ext->firstRecord,
+ iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(txn,
+ ext->firstRecord,
this));
}
}
@@ -616,28 +619,32 @@ namespace mongo {
// First do the "old" portion of capExtent if there is any
DiskLoc extLoc = capExtent;
{
- const Extent* ext = _getExtent(extLoc);
+ const Extent* ext = _getExtent(txn, extLoc);
if (ext->firstRecord != details()->capFirstNewRecord()) {
// this means there is old data in capExtent
- iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(ext->firstRecord,
+ iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(txn,
+ ext->firstRecord,
this));
}
- extLoc = ext->xnext.isNull() ? details()->firstExtent() : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
}
// Next handle all the other extents
while (extLoc != capExtent) {
- const Extent* ext = _getExtent(extLoc);
- iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(ext->firstRecord,
+ const Extent* ext = _getExtent(txn, extLoc);
+ iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(txn,
+ ext->firstRecord,
this));
- extLoc = ext->xnext.isNull() ? details()->firstExtent() : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
}
// Finally handle the "new" data in the capExtent
iterators.push_back(
- new RecordStoreV1Base::IntraExtentIterator(details()->capFirstNewRecord(), this));
+ new RecordStoreV1Base::IntraExtentIterator(txn,
+ details()->capFirstNewRecord(),
+ this));
}
return iterators.release();
@@ -650,13 +657,13 @@ namespace mongo {
invariant(false);
}
- void CappedRecordStoreV1::_maybeComplain( int len ) const {
+ void CappedRecordStoreV1::_maybeComplain( OperationContext* txn, int len ) const {
RARELY {
std::stringstream buf;
buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
buf << "numRecords: " << numRecords() << '\n';
int i = 0;
- for ( DiskLoc e = _details->firstExtent();
+ for ( DiskLoc e = _details->firstExtent(txn);
!e.isNull();
e = _extentManager->getExtent( e )->xnext, ++i ) {
buf << " Extent " << i;
@@ -676,12 +683,13 @@ namespace mongo {
warning() << buf.str();
// assume it is unusually large record; if not, something is broken
- fassert( 17438, len * 5 > _details->lastExtentSize() );
+ fassert( 17438, len * 5 > _details->lastExtentSize(txn) );
}
}
- DiskLoc CappedRecordStoreV1::firstRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? _details->firstExtent() : startExtent;
+ DiskLoc CappedRecordStoreV1::firstRecord( OperationContext* txn,
+ const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(txn) : startExtent;
!i.isNull();
i = _extentManager->getExtent( i )->xnext ) {
@@ -693,8 +701,9 @@ namespace mongo {
return DiskLoc();
}
- DiskLoc CappedRecordStoreV1::lastRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? _details->lastExtent() : startExtent;
+ DiskLoc CappedRecordStoreV1::lastRecord( OperationContext* txn,
+ const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(txn) : startExtent;
!i.isNull();
i = _extentManager->getExtent( i )->xprev ) {
diff --git a/src/mongo/db/structure/record_store_v1_capped.h b/src/mongo/db/structure/record_store_v1_capped.h
index dcc19cf0c08..02f28727f30 100644
--- a/src/mongo/db/structure/record_store_v1_capped.h
+++ b/src/mongo/db/structure/record_store_v1_capped.h
@@ -62,10 +62,11 @@ namespace mongo {
*/
virtual void temp_cappedTruncateAfter( OperationContext* txn, DiskLoc end, bool inclusive );
- virtual RecordIterator* getIterator( const DiskLoc& start, bool tailable,
+ virtual RecordIterator* getIterator( OperationContext* txn,
+ const DiskLoc& start, bool tailable,
const CollectionScanParams::Direction& dir) const;
- virtual std::vector<RecordIterator*> getManyIterators() const;
+ virtual std::vector<RecordIterator*> getManyIterators( OperationContext* txn ) const;
virtual bool compactSupported() const { return false; }
@@ -75,9 +76,11 @@ namespace mongo {
CompactStats* stats );
// Start from firstExtent by default.
- DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ DiskLoc firstRecord( OperationContext* txn,
+ const DiskLoc &startExtent = DiskLoc() ) const;
// Start from lastExtent by default.
- DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ DiskLoc lastRecord( OperationContext* txn,
+ const DiskLoc &startExtent = DiskLoc() ) const;
protected:
@@ -121,7 +124,7 @@ namespace mongo {
DiskLoc end,
bool inclusive);
- void _maybeComplain( int len ) const;
+ void _maybeComplain( OperationContext* txn, int len ) const;
// -- end copy from cap.cpp --
diff --git a/src/mongo/db/structure/record_store_v1_capped_iterator.cpp b/src/mongo/db/structure/record_store_v1_capped_iterator.cpp
index 12e6f945f20..c10557d6547 100644
--- a/src/mongo/db/structure/record_store_v1_capped_iterator.cpp
+++ b/src/mongo/db/structure/record_store_v1_capped_iterator.cpp
@@ -39,10 +39,11 @@ namespace mongo {
//
// Capped collection traversal
//
- CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator( const CappedRecordStoreV1* collection,
+ CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator( OperationContext* txn,
+ const CappedRecordStoreV1* collection,
const DiskLoc& start, bool tailable,
const CollectionScanParams::Direction& dir)
- : _recordStore(collection), _curr(start), _tailable(tailable),
+ : _txn(txn), _recordStore(collection), _curr(start), _tailable(tailable),
_direction(dir), _killedByInvalidate(false) {
if (_curr.isNull()) {
@@ -55,7 +56,7 @@ namespace mongo {
// Going forwards.
if (!nsd->capLooped()) {
// If our capped collection doesn't loop around, the first record is easy.
- _curr = collection->firstRecord();
+ _curr = collection->firstRecord(_txn);
}
else {
// Our capped collection has "looped' around.
@@ -72,7 +73,7 @@ namespace mongo {
// Going backwards
if (!nsd->capLooped()) {
// Start at the end.
- _curr = collection->lastRecord();
+ _curr = collection->lastRecord(_txn);
}
else {
_curr = _getExtent( nsd->capExtent() )->lastRecord;
@@ -205,7 +206,7 @@ namespace mongo {
if (!next.isNull()) {
return next;
}
- return _recordStore->firstRecord();
+ return _recordStore->firstRecord(_txn);
}
DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
@@ -214,7 +215,7 @@ namespace mongo {
if (!prev.isNull()) {
return prev;
}
- return _recordStore->lastRecord();
+ return _recordStore->lastRecord(_txn);
}
RecordData CappedRecordStoreV1Iterator::dataFor( const DiskLoc& loc ) const {
@@ -226,11 +227,11 @@ namespace mongo {
}
DiskLoc CappedRecordStoreV1Iterator::_getNextRecord( const DiskLoc& loc ) {
- return _recordStore->getNextRecord( loc );
+ return _recordStore->getNextRecord( _txn, loc );
}
DiskLoc CappedRecordStoreV1Iterator::_getPrevRecord( const DiskLoc& loc ) {
- return _recordStore->getPrevRecord( loc );
+ return _recordStore->getPrevRecord( _txn, loc );
}
} // namespace mongo
diff --git a/src/mongo/db/structure/record_store_v1_capped_iterator.h b/src/mongo/db/structure/record_store_v1_capped_iterator.h
index 041aef81c05..c63d265ec2e 100644
--- a/src/mongo/db/structure/record_store_v1_capped_iterator.h
+++ b/src/mongo/db/structure/record_store_v1_capped_iterator.h
@@ -47,7 +47,8 @@ namespace mongo {
*/
class CappedRecordStoreV1Iterator : public RecordIterator {
public:
- CappedRecordStoreV1Iterator( const CappedRecordStoreV1* collection,
+ CappedRecordStoreV1Iterator( OperationContext* txn,
+ const CappedRecordStoreV1* collection,
const DiskLoc& start,
bool tailable,
const CollectionScanParams::Direction& dir );
@@ -76,6 +77,9 @@ namespace mongo {
DiskLoc _getNextRecord( const DiskLoc& loc );
DiskLoc _getPrevRecord( const DiskLoc& loc );
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// The collection we're iterating over.
const CappedRecordStoreV1* _recordStore;
diff --git a/src/mongo/db/structure/record_store_v1_capped_test.cpp b/src/mongo/db/structure/record_store_v1_capped_test.cpp
index 1ab2c5fae07..6ec414ff87d 100644
--- a/src/mongo/db/structure/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/structure/record_store_v1_capped_test.cpp
@@ -70,7 +70,7 @@ namespace {
{
BSONObjBuilder b;
- int64_t storageSize = rs.storageSize( &b );
+ int64_t storageSize = rs.storageSize( &txn, &b );
BSONObj obj = b.obj();
ASSERT_EQUALS( 1, obj["numExtents"].numberInt() );
ASSERT_EQUALS( storageSize, em.quantizeExtentSize( 1024 ) );
@@ -127,7 +127,7 @@ namespace {
{DiskLoc(0, 1100), 900},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
@@ -173,7 +173,7 @@ namespace {
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -219,7 +219,7 @@ namespace {
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -268,7 +268,7 @@ namespace {
{DiskLoc(0, 1500), 123}, // gap at end of extent
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -315,7 +315,7 @@ namespace {
{DiskLoc(0, 1600), 24}, // gap at end of extent
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -362,7 +362,7 @@ namespace {
{DiskLoc(1, 1100), 900},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
@@ -413,7 +413,7 @@ namespace {
{DiskLoc(1, 1900), 100},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(1, 1000));
}
@@ -468,7 +468,7 @@ namespace {
{DiskLoc(0, 1920), 80},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -550,7 +550,7 @@ namespace {
{DiskLoc(0, 1628), 84},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
diff --git a/src/mongo/db/structure/record_store_v1_repair_iterator.cpp b/src/mongo/db/structure/record_store_v1_repair_iterator.cpp
index 373f8760bf7..aaa96fa2db8 100644
--- a/src/mongo/db/structure/record_store_v1_repair_iterator.cpp
+++ b/src/mongo/db/structure/record_store_v1_repair_iterator.cpp
@@ -35,8 +35,9 @@
namespace mongo {
- RecordStoreV1RepairIterator::RecordStoreV1RepairIterator(const RecordStoreV1Base* recordStore)
- : _recordStore(recordStore), _stage(FORWARD_SCAN) {
+ RecordStoreV1RepairIterator::RecordStoreV1RepairIterator(OperationContext* txn,
+ const RecordStoreV1Base* recordStore)
+ : _txn(txn), _recordStore(recordStore), _stage(FORWARD_SCAN) {
// Position the iterator at the first record
//
@@ -73,10 +74,10 @@ namespace mongo {
else {
switch (_stage) {
case FORWARD_SCAN:
- _currRecord = _recordStore->getNextRecordInExtent(_currRecord);
+ _currRecord = _recordStore->getNextRecordInExtent(_txn, _currRecord);
break;
case BACKWARD_SCAN:
- _currRecord = _recordStore->getPrevRecordInExtent(_currRecord);
+ _currRecord = _recordStore->getPrevRecordInExtent(_txn, _currRecord);
break;
default:
invariant(!"This should never be reached.");
@@ -113,10 +114,10 @@ namespace mongo {
if (_currExtent.isNull()) {
switch (_stage) {
case FORWARD_SCAN:
- _currExtent = _recordStore->details()->firstExtent();
+ _currExtent = _recordStore->details()->firstExtent(_txn);
break;
case BACKWARD_SCAN:
- _currExtent = _recordStore->details()->lastExtent();
+ _currExtent = _recordStore->details()->lastExtent(_txn);
break;
default:
invariant(DONE == _stage);
diff --git a/src/mongo/db/structure/record_store_v1_repair_iterator.h b/src/mongo/db/structure/record_store_v1_repair_iterator.h
index 01170617b78..a0319982fda 100644
--- a/src/mongo/db/structure/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/structure/record_store_v1_repair_iterator.h
@@ -42,7 +42,8 @@ namespace mongo {
*/
class RecordStoreV1RepairIterator : public RecordIterator {
public:
- RecordStoreV1RepairIterator(const RecordStoreV1Base* recordStore);
+ RecordStoreV1RepairIterator(OperationContext* txn,
+ const RecordStoreV1Base* recordStore);
virtual ~RecordStoreV1RepairIterator() { }
virtual bool isEOF();
@@ -68,6 +69,9 @@ namespace mongo {
*/
bool _advanceToNextValidExtent();
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
// Reference to the owning RecordStore. The store must not be deleted while there are
// active iterators on it.
//
diff --git a/src/mongo/db/structure/record_store_v1_simple.cpp b/src/mongo/db/structure/record_store_v1_simple.cpp
index 7e99a1b7640..21dd0d7363e 100644
--- a/src/mongo/db/structure/record_store_v1_simple.cpp
+++ b/src/mongo/db/structure/record_store_v1_simple.cpp
@@ -1,7 +1,7 @@
// record_store_v1_simple.cpp
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -96,6 +96,7 @@ namespace mongo {
int bestmatchlen = INT_MAX; // sentinel meaning we haven't found a record big enough
int b = bucket(lenToAlloc);
DiskLoc cur = _details->deletedListEntry(b);
+
int extra = 5; // look for a better fit, a little.
int chain = 0;
while ( 1 ) {
@@ -220,9 +221,7 @@ namespace mongo {
newDelW->nextDeleted().Null();
addDeletedRec( txn, newDelLoc );
-
return loc;
-
}
StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord( OperationContext* txn,
@@ -236,7 +235,7 @@ namespace mongo {
increaseStorageSize( txn,
_extentManager->followupSize( lengthWithHeaders,
- _details->lastExtentSize()),
+ _details->lastExtentSize(txn)),
enforceQuota );
loc = _allocFromExistingExtents( txn, lengthWithHeaders );
@@ -247,14 +246,14 @@ namespace mongo {
log() << "warning: alloc() failed after allocating new extent. "
<< "lengthWithHeaders: " << lengthWithHeaders << " last extent size:"
- << _details->lastExtentSize() << "; trying again";
+ << _details->lastExtentSize(txn) << "; trying again";
- for ( int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(); z++ ) {
+ for ( int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(txn); z++ ) {
log() << "try #" << z << endl;
increaseStorageSize( txn,
_extentManager->followupSize( lengthWithHeaders,
- _details->lastExtentSize()),
+ _details->lastExtentSize(txn)),
enforceQuota );
loc = _allocFromExistingExtents( txn, lengthWithHeaders );
@@ -280,20 +279,22 @@ namespace mongo {
_details->setDeletedListEntry(txn, b, dloc);
}
- RecordIterator* SimpleRecordStoreV1::getIterator( const DiskLoc& start, bool tailable,
+ RecordIterator* SimpleRecordStoreV1::getIterator( OperationContext* txn,
+ const DiskLoc& start,
+ bool tailable,
const CollectionScanParams::Direction& dir) const {
- return new SimpleRecordStoreV1Iterator( this, start, dir );
+ return new SimpleRecordStoreV1Iterator( txn, this, start, dir );
}
- vector<RecordIterator*> SimpleRecordStoreV1::getManyIterators() const {
+ vector<RecordIterator*> SimpleRecordStoreV1::getManyIterators( OperationContext* txn ) const {
OwnedPointerVector<RecordIterator> iterators;
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(txn, extLoc);
if (ext->firstRecord.isNull())
continue;
-
- iterators.push_back(new RecordStoreV1Base::IntraExtentIterator(ext->firstRecord, this));
+ iterators.push_back(
+ new RecordStoreV1Base::IntraExtentIterator(txn, ext->firstRecord, this));
}
return iterators.release();
@@ -370,7 +371,7 @@ namespace mongo {
while( 1 ) {
Record *recOld = recordFor(L);
RecordData oldData = recOld->toRecordData();
- L = getNextRecordInExtent(L);
+ L = getNextRecordInExtent(txn, L);
if ( compactOptions->validateDocuments && !adaptor->isDataValid( oldData ) ) {
// object is corrupt!
@@ -432,8 +433,8 @@ namespace mongo {
}
} // if !L.isNull()
- invariant( _details->firstExtent() == diskloc );
- invariant( _details->lastExtent() != diskloc );
+ invariant( _details->firstExtent(txn) == diskloc );
+ invariant( _details->lastExtent(txn) != diskloc );
DiskLoc newFirst = e->xnext;
_details->setFirstExtent( txn, newFirst );
*txn->recoveryUnit()->writing(&_extentManager->getExtent( newFirst )->xprev) = DiskLoc();
@@ -462,7 +463,7 @@ namespace mongo {
txn->recoveryUnit()->commitIfNeeded();
list<DiskLoc> extents;
- for( DiskLoc extLocation = _details->firstExtent();
+ for( DiskLoc extLocation = _details->firstExtent(txn);
!extLocation.isNull();
extLocation = _extentManager->getExtent( extLocation )->xnext ) {
extents.push_back( extLocation );
@@ -476,7 +477,7 @@ namespace mongo {
_details->setLastExtentSize( txn, 0 );
// create a new extent so new records go there
- increaseStorageSize( txn, _details->lastExtentSize(), true );
+ increaseStorageSize( txn, _details->lastExtentSize(txn), true );
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
@@ -492,8 +493,8 @@ namespace mongo {
pm.hit();
}
- invariant( _extentManager->getExtent( _details->firstExtent() )->xprev.isNull() );
- invariant( _extentManager->getExtent( _details->lastExtent() )->xnext.isNull() );
+ invariant( _extentManager->getExtent( _details->firstExtent(txn) )->xprev.isNull() );
+ invariant( _extentManager->getExtent( _details->lastExtent(txn) )->xnext.isNull() );
// indexes will do their own progress meter
pm.finished();
diff --git a/src/mongo/db/structure/record_store_v1_simple.h b/src/mongo/db/structure/record_store_v1_simple.h
index b6b11c9366d..786be3c8056 100644
--- a/src/mongo/db/structure/record_store_v1_simple.h
+++ b/src/mongo/db/structure/record_store_v1_simple.h
@@ -1,7 +1,7 @@
// record_store_v1_simple.h
/**
-* Copyright (C) 2013 10gen Inc.
+* Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -50,10 +50,10 @@ namespace mongo {
const char* name() const { return "SimpleRecordStoreV1"; }
- virtual RecordIterator* getIterator( const DiskLoc& start, bool tailable,
+ virtual RecordIterator* getIterator( OperationContext* txn, const DiskLoc& start, bool tailable,
const CollectionScanParams::Direction& dir) const;
- virtual std::vector<RecordIterator*> getManyIterators() const;
+ virtual std::vector<RecordIterator*> getManyIterators(OperationContext* txn) const;
virtual Status truncate(OperationContext* txn);
diff --git a/src/mongo/db/structure/record_store_v1_simple_iterator.cpp b/src/mongo/db/structure/record_store_v1_simple_iterator.cpp
index 19163be6aeb..675042d3948 100644
--- a/src/mongo/db/structure/record_store_v1_simple_iterator.cpp
+++ b/src/mongo/db/structure/record_store_v1_simple_iterator.cpp
@@ -39,23 +39,24 @@ namespace mongo {
// Regular / non-capped collection traversal
//
- SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(const SimpleRecordStoreV1* collection,
+ SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
+ const SimpleRecordStoreV1* collection,
const DiskLoc& start,
const CollectionScanParams::Direction& dir)
- : _curr(start), _recordStore(collection), _direction(dir) {
+ : _txn(txn), _curr(start), _recordStore(collection), _direction(dir) {
if (_curr.isNull()) {
const ExtentManager* em = _recordStore->_extentManager;
- if ( _recordStore->details()->firstExtent().isNull() ) {
+ if ( _recordStore->details()->firstExtent(txn).isNull() ) {
// nothing in the collection
- verify( _recordStore->details()->lastExtent().isNull() );
+ verify( _recordStore->details()->lastExtent(txn).isNull() );
}
else if (CollectionScanParams::FORWARD == _direction) {
// Find a non-empty extent and start with the first record in it.
- Extent* e = em->getExtent( _recordStore->details()->firstExtent() );
+ Extent* e = em->getExtent( _recordStore->details()->firstExtent(txn) );
while (e->firstRecord.isNull() && !e->xnext.isNull()) {
e = em->getExtent( e->xnext );
@@ -68,7 +69,7 @@ namespace mongo {
else {
// Walk backwards, skipping empty extents, and use the last record in the first
// non-empty extent we see.
- Extent* e = em->getExtent( _recordStore->details()->lastExtent() );
+ Extent* e = em->getExtent( _recordStore->details()->lastExtent(txn) );
// TODO ELABORATE
// Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
@@ -95,10 +96,10 @@ namespace mongo {
// Move to the next thing.
if (!isEOF()) {
if (CollectionScanParams::FORWARD == _direction) {
- _curr = _recordStore->getNextRecord( _curr );
+ _curr = _recordStore->getNextRecord( _txn, _curr );
}
else {
- _curr = _recordStore->getPrevRecord( _curr );
+ _curr = _recordStore->getPrevRecord( _txn, _curr );
}
}
diff --git a/src/mongo/db/structure/record_store_v1_simple_iterator.h b/src/mongo/db/structure/record_store_v1_simple_iterator.h
index 33746f7e655..7688b15be85 100644
--- a/src/mongo/db/structure/record_store_v1_simple_iterator.h
+++ b/src/mongo/db/structure/record_store_v1_simple_iterator.h
@@ -42,7 +42,8 @@ namespace mongo {
*/
class SimpleRecordStoreV1Iterator : public RecordIterator {
public:
- SimpleRecordStoreV1Iterator( const SimpleRecordStoreV1* records,
+ SimpleRecordStoreV1Iterator( OperationContext* txn,
+ const SimpleRecordStoreV1* records,
const DiskLoc& start,
const CollectionScanParams::Direction& dir );
virtual ~SimpleRecordStoreV1Iterator() { }
@@ -58,6 +59,9 @@ namespace mongo {
virtual RecordData dataFor( const DiskLoc& loc ) const;
private:
+ // for getNext, not owned
+ OperationContext* _txn;
+
// The result returned on the next call to getNext().
DiskLoc _curr;
diff --git a/src/mongo/db/structure/record_store_v1_simple_test.cpp b/src/mongo/db/structure/record_store_v1_simple_test.cpp
index 31659f89030..4788e46b1cf 100644
--- a/src/mongo/db/structure/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/structure/record_store_v1_simple_test.cpp
@@ -231,7 +231,7 @@ namespace {
LocAndSize drecs[] = {
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -262,7 +262,7 @@ namespace {
LocAndSize drecs[] = {
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -296,7 +296,7 @@ namespace {
LocAndSize drecs[] = {
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -334,7 +334,7 @@ namespace {
{DiskLoc(0, 1320), 24},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -372,7 +372,7 @@ namespace {
LocAndSize drecs[] = {
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -497,7 +497,7 @@ namespace {
{DiskLoc(1, 1000), 1000},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -550,7 +550,7 @@ namespace {
{DiskLoc(0, 1700), 999},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -657,7 +657,7 @@ namespace {
{DiskLoc(0, 4100), 75},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
@@ -769,7 +769,7 @@ namespace {
{DiskLoc(0, 9000), 140},
{}
};
- assertStateV1RS(recs, drecs, &em, md);
+ assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
}
diff --git a/src/mongo/db/structure/record_store_v1_test_help.cpp b/src/mongo/db/structure/record_store_v1_test_help.cpp
index 3b4767e9dad..fb25786d54d 100644
--- a/src/mongo/db/structure/record_store_v1_test_help.cpp
+++ b/src/mongo/db/structure/record_store_v1_test_help.cpp
@@ -121,7 +121,7 @@ namespace mongo {
invariant( false );
}
- const DiskLoc& DummyRecordStoreV1MetaData::firstExtent() const {
+ const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* txn) const {
return _firstExtent;
}
@@ -130,7 +130,7 @@ namespace mongo {
_firstExtent = loc;
}
- const DiskLoc& DummyRecordStoreV1MetaData::lastExtent() const {
+ const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* txn) const {
return _lastExtent;
}
@@ -171,7 +171,7 @@ namespace mongo {
}
- int DummyRecordStoreV1MetaData::lastExtentSize() const {
+ int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* txn) const {
return _lastExtentSize;
}
@@ -302,9 +302,11 @@ namespace {
}
}
- void printRecList(const ExtentManager* em, const RecordStoreV1MetaData* md) {
+ void printRecList(OperationContext* txn,
+ const ExtentManager* em,
+ const RecordStoreV1MetaData* md) {
log() << " *** BEGIN ACTUAL RECORD LIST *** ";
- DiskLoc extLoc = md->firstExtent();
+ DiskLoc extLoc = md->firstExtent(txn);
std::set<DiskLoc> seenLocs;
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc, true);
@@ -371,7 +373,7 @@ namespace {
// Need to start with a blank slate
invariant(em->numFiles() == 0);
- invariant(md->firstExtent().isNull());
+ invariant(md->firstExtent(txn).isNull());
// pre-allocate extents (even extents that aren't part of this RS)
{
@@ -410,7 +412,7 @@ namespace {
if (records && !records[0].loc.isNull()) {
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent();
+ DiskLoc extLoc = md->firstExtent(txn);
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc);
int prevOfs = DiskLoc::NullOfs;
@@ -492,10 +494,11 @@ namespace {
}
// Make sure we set everything up as requested.
- assertStateV1RS(records, drecs, em, md);
+ assertStateV1RS(txn, records, drecs, em, md);
}
- void assertStateV1RS(const LocAndSize* records,
+ void assertStateV1RS(OperationContext* txn,
+ const LocAndSize* records,
const LocAndSize* drecs,
const ExtentManager* em,
const DummyRecordStoreV1MetaData* md) {
@@ -508,7 +511,7 @@ namespace {
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent();
+ DiskLoc extLoc = md->firstExtent(txn);
while (!extLoc.isNull()) { // for each Extent
Extent* ext = em->getExtent(extLoc, true);
int expectedPrevOfs = DiskLoc::NullOfs;
@@ -534,7 +537,7 @@ namespace {
}
if (ext->xnext.isNull()) {
- ASSERT_EQUALS(md->lastExtent(), extLoc);
+ ASSERT_EQUALS(md->lastExtent(txn), extLoc);
}
extLoc = ext->xnext;
@@ -557,7 +560,7 @@ namespace {
// the first drec in the capExtent. If the capExtent is the first Extent,
// it should be Null.
- if (md->capExtent() == md->firstExtent()) {
+ if (md->capExtent() == md->firstExtent(txn)) {
ASSERT_EQUALS(actualLoc, DiskLoc());
}
else {
@@ -597,7 +600,7 @@ namespace {
}
catch (...) {
// If a test fails, provide extra info to make debugging easier
- printRecList(em, md);
+ printRecList(txn, em, md);
printDRecList(em, md);
throw;
}
diff --git a/src/mongo/db/structure/record_store_v1_test_help.h b/src/mongo/db/structure/record_store_v1_test_help.h
index ed65e3d0a40..75c62205b72 100644
--- a/src/mongo/db/structure/record_store_v1_test_help.h
+++ b/src/mongo/db/structure/record_store_v1_test_help.h
@@ -65,10 +65,10 @@ namespace mongo {
const DiskLoc& loc );
virtual void orphanDeletedList(OperationContext* txn);
- virtual const DiskLoc& firstExtent() const;
+ virtual const DiskLoc& firstExtent( OperationContext* txn ) const;
virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc );
- virtual const DiskLoc& lastExtent() const;
+ virtual const DiskLoc& lastExtent( OperationContext* txn ) const;
virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc );
virtual bool isCapped() const;
@@ -80,7 +80,7 @@ namespace mongo {
virtual bool replaceUserFlags( OperationContext* txn, int flags );
- virtual int lastExtentSize() const;
+ virtual int lastExtentSize( OperationContext* txn ) const;
virtual void setLastExtentSize( OperationContext* txn, int newMax );
virtual long long maxCappedDocs() const;
@@ -189,7 +189,8 @@ namespace mongo {
* List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer means don't check
* that list.
*/
- void assertStateV1RS(const LocAndSize* records,
+ void assertStateV1RS(OperationContext* txn,
+ const LocAndSize* records,
const LocAndSize* drecs,
const ExtentManager* em,
const DummyRecordStoreV1MetaData* md);
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 6972e8d7262..1c37fa6db4c 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -1,7 +1,7 @@
// documentsourcetests.cpp : Unit tests for DocumentSource classes.
/**
- * Copyright (C) 2012 10gen Inc.
+ * Copyright (C) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -178,7 +178,7 @@ namespace DocumentSourceTests {
CanonicalQuery* cq;
uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
Runner* runnerBare;
- uassertStatusOK(getRunner(ctx.ctx().db()->getCollection(&_opCtx, ns), cq, &runnerBare));
+ uassertStatusOK(getRunner(&_opCtx, ctx.ctx().db()->getCollection(&_opCtx, ns), cq, &runnerBare));
_runner.reset(runnerBare);
_runner->saveState();
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index a5f8ff997b6..605e171e2c9 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -2,7 +2,7 @@
//
/**
- * Copyright (C) 2009 10gen Inc.
+ * Copyright (C) 2009-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -2010,7 +2010,6 @@ namespace JSTests {
OperationContextImpl txn;
DBDirectClient client(&txn);
-
client.update( "test.system.js" , query.obj() , update.obj() , true /* upsert */ );
scoped_ptr<Scope> s( globalScriptEngine->newScope() );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 4309b9c2356..698678be8ec 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -2,7 +2,7 @@
//
/**
- * Copyright (C) 2008 10gen Inc.
+ * Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -363,14 +363,16 @@ namespace NamespaceTests {
DiskLoc last, first;
{
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(),
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
collection(),
InternalPlanner::BACKWARD));
runner->getNext(NULL, &last);
ASSERT( !last.isNull() );
}
{
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(),
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
collection(),
InternalPlanner::FORWARD));
runner->getNext(NULL, &first);
@@ -383,14 +385,16 @@ namespace NamespaceTests {
{
DiskLoc loc;
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(),
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
collection(),
InternalPlanner::FORWARD));
runner->getNext(NULL, &loc);
ASSERT( first == loc);
}
{
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(),
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
collection(),
InternalPlanner::BACKWARD));
DiskLoc loc;
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 4f4f88c1ccc..0c6b5c0f20b 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 MongoDB Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -77,7 +77,7 @@ namespace OplogStartTests {
ASSERT(s.isOK());
_cq.reset(cq);
_oplogws.reset(new WorkingSet());
- _stage.reset(new OplogStart(collection(), _cq->root(), _oplogws.get()));
+ _stage.reset(new OplogStart(&_txn, collection(), _cq->root(), _oplogws.get()));
}
void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) {
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 7b8f01add03..4908eb180ef 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -114,7 +114,7 @@ namespace PlanRankingTests {
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
PlanStage* root;
- ASSERT(StageBuilder::build(collection, *solutions[i], ws, &root));
+ ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws, &root));
// Takes ownership of all arguments.
_mps->addPlan(solutions[i], root, ws);
}
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 81982df6768..e9256246757 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -118,7 +118,7 @@ namespace QueryMultiPlanRunner {
const Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
auto_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(ixparams, sharedWs.get(), NULL);
+ IndexScan* ix = new IndexScan(&_txn, ixparams, sharedWs.get(), NULL);
auto_ptr<PlanStage> firstRoot(new FetchStage(sharedWs.get(), ix, NULL, coll));
// Plan 1: CollScan with matcher.
@@ -132,7 +132,7 @@ namespace QueryMultiPlanRunner {
verify(swme.isOK());
auto_ptr<MatchExpression> filter(swme.getValue());
// Make the stage.
- auto_ptr<PlanStage> secondRoot(new CollectionScan(csparams, sharedWs.get(),
+ auto_ptr<PlanStage> secondRoot(new CollectionScan(&_txn, csparams, sharedWs.get(),
filter.get()));
// Hand the plans off to the runner.
diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp
index 91596c5cf49..4f4ddd09570 100644
--- a/src/mongo/dbtests/query_single_solution_runner.cpp
+++ b/src/mongo/dbtests/query_single_solution_runner.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 MongoDB Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -91,7 +91,7 @@ namespace QuerySingleSolutionRunner {
verify(swme.isOK());
auto_ptr<MatchExpression> filter(swme.getValue());
// Make the stage.
- auto_ptr<PlanStage> root(new CollectionScan(csparams, ws.get(), filter.release()));
+ auto_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), filter.release()));
CanonicalQuery* cq;
verify(CanonicalQuery::canonicalize(ns(), filterObj, &cq).isOK());
@@ -133,7 +133,7 @@ namespace QuerySingleSolutionRunner {
const Collection* coll = context.db()->getCollection(&_txn, ns());
auto_ptr<WorkingSet> ws(new WorkingSet());
- IndexScan* ix = new IndexScan(ixparams, ws.get(), NULL);
+ IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL);
auto_ptr<PlanStage> root(new FetchStage(ws.get(), ix, NULL, coll));
CanonicalQuery* cq;
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 75b144c7e45..a7b77876237 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -72,8 +72,8 @@ namespace QueryStageAnd {
}
void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(DiskLoc(), false,
- CollectionScanParams::FORWARD);
+ RecordIterator* it = coll->getIterator(&_txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD);
while (!it->isEOF()) {
DiskLoc nextLoc = it->getNext();
out->insert(nextLoc);
@@ -154,7 +154,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -162,7 +162,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// ah reads the first child into its hash table.
// ah should read foo=20, foo=19, ..., foo=0 in that order.
@@ -258,12 +258,12 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar <= 19 (descending)
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 19);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// First call to work reads the first result from the children.
// The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}.
@@ -344,7 +344,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -352,7 +352,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
@@ -399,7 +399,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -407,7 +407,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// Stage execution should fail.
@@ -452,7 +452,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -460,7 +460,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
@@ -500,7 +500,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -508,7 +508,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -516,7 +516,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 15);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
@@ -567,7 +567,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -575,7 +575,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -583,7 +583,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 15);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// Stage execution should fail.
@@ -621,7 +621,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar == 5. Index scan should be eof.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -629,7 +629,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 5);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
int count = 0;
int works = 0;
@@ -682,7 +682,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar <= 100
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -693,7 +693,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << "");
params.bounds.endKeyInclusive = false;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
@@ -734,7 +734,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = -1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar >= 95
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -742,7 +742,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSONObj();
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// Bar == 97
@@ -787,11 +787,11 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Scan over bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Get the set of disklocs in our collection to use later.
set<DiskLoc> data;
@@ -914,15 +914,15 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// baz == 1
params.descriptor = getIndex(BSON("baz" << 1), coll);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
ASSERT_EQUALS(50, countResults(ah.get()));
@@ -960,7 +960,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 7);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Bar == 20, not EOF.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -968,7 +968,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 20);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
@@ -1009,7 +1009,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 7);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// bar == 20.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -1017,7 +1017,7 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 20);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
@@ -1058,11 +1058,11 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
ctx.commit();
// Filter drops everything.
@@ -1100,13 +1100,13 @@ namespace QueryStageAnd {
params.bounds.endKey = BSON("" << 1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
// Intersect with 7 <= bar < 10000
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(new IndexScan(params, &ws, NULL));
+ ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
WorkingSetID lastId = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 06088bf425a..415a651babc 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -352,7 +352,7 @@ namespace QueryStageCollectionScan {
// Make a scan and have the runner own it.
WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(params, ws, filterExpr.get());
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, filterExpr.get());
PlanExecutor runner(ws, ps, params.collection);
// Use the runner to count the number of objects scanned.
@@ -371,7 +371,7 @@ namespace QueryStageCollectionScan {
params.direction = direction;
params.tailable = false;
- scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -457,7 +457,7 @@ namespace QueryStageCollectionScan {
// Make a scan and have the runner own it.
WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(params, ws, NULL);
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
PlanExecutor runner(ws, ps, params.collection);
int count = 0;
@@ -486,7 +486,7 @@ namespace QueryStageCollectionScan {
params.tailable = false;
WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(params, ws, NULL);
+ PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
PlanExecutor runner(ws, ps, params.collection);
int count = 0;
@@ -522,7 +522,7 @@ namespace QueryStageCollectionScan {
params.tailable = false;
WorkingSet ws;
- scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
int count = 0;
while (count < 10) {
@@ -584,7 +584,7 @@ namespace QueryStageCollectionScan {
params.tailable = false;
WorkingSet ws;
- scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+ scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
int count = 0;
while (count < 10) {
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index b4a86309568..6a7a039008e 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -129,7 +129,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(2, numCounted);
@@ -162,7 +162,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(5, numCounted);
@@ -195,7 +195,7 @@ namespace QueryStageCount {
params.endKeyInclusive = false;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(3, numCounted);
@@ -224,7 +224,7 @@ namespace QueryStageCount {
params.endKeyInclusive = false;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -254,7 +254,7 @@ namespace QueryStageCount {
params.endKeyInclusive = false;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -285,7 +285,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -317,7 +317,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -369,7 +369,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -424,7 +424,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -482,7 +482,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -541,7 +541,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(7, numCounted);
@@ -575,7 +575,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true; // yes?
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(9, numCounted);
@@ -606,7 +606,7 @@ namespace QueryStageCount {
params.endKeyInclusive = true;
WorkingSet ws;
- Count count(params, &ws);
+ Count count(&_txn, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index ac61fc55fce..ccf03b58bf3 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -145,7 +145,7 @@ namespace QueryStageDistinct {
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan* distinct = new DistinctScan(params, &ws);
+ DistinctScan* distinct = new DistinctScan(&_txn, params, &ws);
WorkingSetID wsid;
// Get our first result.
@@ -210,7 +210,7 @@ namespace QueryStageDistinct {
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan* distinct = new DistinctScan(params, &ws);
+ DistinctScan* distinct = new DistinctScan(&_txn, params, &ws);
// We should see each number in the range [1, 6] exactly once.
std::set<int> seen;
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index bc95554164a..e758afe8518 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -57,8 +57,8 @@ namespace QueryStageFetch {
}
void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(DiskLoc(), false,
- CollectionScanParams::FORWARD);
+ RecordIterator* it = coll->getIterator(&_txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD);
while (!it->isEOF()) {
DiskLoc nextLoc = it->getNext();
out->insert(nextLoc);
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index eca675d6966..9eced015261 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -61,8 +61,8 @@ namespace QueryStageKeep {
}
void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(DiskLoc(), false,
- CollectionScanParams::FORWARD);
+ RecordIterator* it = coll->getIterator(&_txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD);
while (!it->isEOF()) {
DiskLoc nextLoc = it->getNext();
out->insert(nextLoc);
@@ -135,7 +135,7 @@ namespace QueryStageKeep {
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
params.start = DiskLoc();
- CollectionScan* cs = new CollectionScan(params, &ws, NULL);
+ CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL);
// Create a KeepMutations stage to merge in the 10 flagged objects.
// Takes ownership of 'cs'
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 2aa83912515..0a1492c4b0d 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -74,8 +74,10 @@ namespace QueryStageMergeSortTests {
}
void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(DiskLoc(), false,
- CollectionScanParams::FORWARD);
+ RecordIterator* it = coll->getIterator(&_txn,
+ DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD);
while (!it->isEOF()) {
DiskLoc nextLoc = it->getNext();
out->insert(nextLoc);
@@ -145,11 +147,11 @@ namespace QueryStageMergeSortTests {
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
ctx.commit();
// Must fetch if we want to easily pull out an obj.
@@ -209,11 +211,11 @@ namespace QueryStageMergeSortTests {
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -272,11 +274,11 @@ namespace QueryStageMergeSortTests {
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -338,11 +340,11 @@ namespace QueryStageMergeSortTests {
params.bounds.endKeyInclusive = true;
// This is the direction along the index.
params.direction = 1;
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -401,13 +403,13 @@ namespace QueryStageMergeSortTests {
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
// b:51 (EOF)
params.descriptor = getIndex(secondIndex, coll);
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -459,7 +461,7 @@ namespace QueryStageMergeSortTests {
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws, NULL));
}
ctx.commit();
@@ -514,7 +516,7 @@ namespace QueryStageMergeSortTests {
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(params, &ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
}
set<DiskLoc> locs;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 60838f5df77..81e500cb238 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -66,8 +66,8 @@ namespace QueryStageSortTests {
}
void getLocs(set<DiskLoc>* out, Collection* coll) {
- RecordIterator* it = coll->getIterator(DiskLoc(), false,
- CollectionScanParams::FORWARD);
+ RecordIterator* it = coll->getIterator(&_txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD);
while (!it->isEOF()) {
DiskLoc nextLoc = it->getNext();
out->insert(nextLoc);
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 91640817f74..24b0ef3db1e 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -83,7 +83,7 @@ namespace QueryStageTests {
WorkingSet* ws = new WorkingSet();
PlanExecutor runner(ws,
- new IndexScan(params, ws, filterExpr.get()),
+ new IndexScan(&_txn, params, ws, filterExpr.get()),
ctx.ctx().db()->getCollection(&_txn, ns()));
int count = 0;
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 4694c942f73..4580b5682d2 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -2,7 +2,7 @@
//
/**
- * Copyright (C) 2009 10gen Inc.
+ * Copyright (C) 2009-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -133,8 +133,8 @@ namespace ReplTests {
}
int count = 0;
- RecordIterator* it = coll->getIterator( DiskLoc(), false,
- CollectionScanParams::FORWARD );
+ RecordIterator* it = coll->getIterator( &_txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD );
for ( ; !it->isEOF(); it->getNext() ) {
++count;
}
@@ -154,8 +154,8 @@ namespace ReplTests {
}
int count = 0;
- RecordIterator* it = coll->getIterator( DiskLoc(), false,
- CollectionScanParams::FORWARD );
+ RecordIterator* it = coll->getIterator( &txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD );
for ( ; !it->isEOF(); it->getNext() ) {
++count;
}
@@ -173,8 +173,8 @@ namespace ReplTests {
Database* db = ctx.db();
Collection* coll = db->getCollection( &txn, cllNS() );
- RecordIterator* it = coll->getIterator( DiskLoc(), false,
- CollectionScanParams::FORWARD );
+ RecordIterator* it = coll->getIterator( &txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD );
while ( !it->isEOF() ) {
DiskLoc currLoc = it->getNext();
ops.push_back(coll->docFor(currLoc));
@@ -208,8 +208,8 @@ namespace ReplTests {
coll = db->createCollection( &txn, ns );
}
- RecordIterator* it = coll->getIterator( DiskLoc(), false,
- CollectionScanParams::FORWARD );
+ RecordIterator* it = coll->getIterator( &txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD );
::mongo::log() << "all for " << ns << endl;
while ( !it->isEOF() ) {
DiskLoc currLoc = it->getNext();
@@ -231,8 +231,8 @@ namespace ReplTests {
}
vector< DiskLoc > toDelete;
- RecordIterator* it = coll->getIterator( DiskLoc(), false,
- CollectionScanParams::FORWARD );
+ RecordIterator* it = coll->getIterator( &txn, DiskLoc(), false,
+ CollectionScanParams::FORWARD );
while ( !it->isEOF() ) {
toDelete.push_back( it->getNext() );
}
diff --git a/src/mongo/dbtests/runner_registry.cpp b/src/mongo/dbtests/runner_registry.cpp
index a6c2b4dfac6..970027d9450 100644
--- a/src/mongo/dbtests/runner_registry.cpp
+++ b/src/mongo/dbtests/runner_registry.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -75,7 +75,7 @@ namespace RunnerRegistry {
params.collection = collection();
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- auto_ptr<CollectionScan> scan(new CollectionScan(params, ws.get(), NULL));
+ auto_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, ws.get(), NULL));
// Create a runner to hold it
CanonicalQuery* cq;
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 2d4cc1eacca..2fb6aa8be8f 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1,7 +1,7 @@
// d_migrate.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -417,7 +417,7 @@ namespace mongo {
BSONObj min = Helpers::toKeyFormat( kp.extendRangeBound( _min, false ) );
BSONObj max = Helpers::toKeyFormat( kp.extendRangeBound( _max, false ) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 7dde165002c..3233cfefd88 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -1,7 +1,7 @@
// @file d_split.cpp
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -146,7 +146,7 @@ namespace mongo {
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
}
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, idx, min, max,
false, InternalPlanner::FORWARD));
// Find the 'missingField' value used to represent a missing document field in a key of
@@ -377,7 +377,7 @@ namespace mongo {
long long currCount = 0;
long long numChunks = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, idx, min, max,
false, InternalPlanner::FORWARD));
BSONObj currKey;
@@ -435,7 +435,7 @@ namespace mongo {
currCount = 0;
log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl;
- runner.reset(InternalPlanner::indexScan(collection, idx, min, max,
+ runner.reset(InternalPlanner::indexScan(txn, collection, idx, min, max,
false, InternalPlanner::FORWARD));
state = runner->getNext(&currKey, NULL);
@@ -879,7 +879,7 @@ namespace mongo {
BSONObj newmin = Helpers::toKeyFormat( kp.extendRangeBound( chunk.min, false) );
BSONObj newmax = Helpers::toKeyFormat( kp.extendRangeBound( chunk.max, false) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(txn, collection, idx,
newmin, newmax, false));
// check if exactly one document found
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index ba3ff655092..0b6eafb2e58 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -1,5 +1,5 @@
/**
-* Copyright (C) 2008 10gen Inc.
+* Copyright (C) 2008-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -292,8 +292,8 @@ public:
return _repairByName(toolGlobalParams.db);
}
- void _repairExtents(Collection* coll, Writer& writer) {
- scoped_ptr<RecordIterator> iter(coll->getRecordStore()->getIteratorForRepair());
+ void _repairExtents(OperationContext* opCtx, Collection* coll, Writer& writer) {
+ scoped_ptr<RecordIterator> iter(coll->getRecordStore()->getIteratorForRepair(opCtx));
for (DiskLoc currLoc = iter->getNext(); !currLoc.isNull(); currLoc = iter->getNext()) {
if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
@@ -359,7 +359,7 @@ public:
Writer w( f , &m );
try {
- _repairExtents(collection, w);
+ _repairExtents(opCtx, collection, w);
}
catch ( DBException& e ){
toolError() << "Repair scan failed: " << e.toString() << std::endl;