summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-05-29 15:36:28 -0400
committerMathias Stearn <mathias@10gen.com>2014-05-30 12:10:16 -0400
commita78d754b67040c19714bc4696dd7feb5ce10d412 (patch)
treeaaf640483eff976ff22fdaceb9ebfa78f7659af2 /src
parent4edbe14669b7804180d8b58549e257ceb679bb1d (diff)
downloadmongo-a78d754b67040c19714bc4696dd7feb5ce10d412.tar.gz
SERVER-13641 Plumb OperationContext through to getCollection and all Helpers
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp8
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp4
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp5
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.h1
-rw-r--r--src/mongo/db/catalog/database.cpp28
-rw-r--r--src/mongo/db/catalog/database.h12
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp7
-rw-r--r--src/mongo/db/cloner.cpp4
-rw-r--r--src/mongo/db/commands.cpp3
-rw-r--r--src/mongo/db/commands.h3
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp13
-rw-r--r--src/mongo/db/commands/compact.cpp9
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/dbhash.cpp6
-rw-r--r--src/mongo/db/commands/dbhash.h2
-rw-r--r--src/mongo/db/commands/distinct.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp18
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/geonear.cpp2
-rw-r--r--src/mongo/db/commands/group.cpp7
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp10
-rw-r--r--src/mongo/db/commands/mr.cpp20
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp10
-rw-r--r--src/mongo/db/commands/rename_collection.cpp17
-rw-r--r--src/mongo/db/commands/test_commands.cpp13
-rw-r--r--src/mongo/db/commands/touch.cpp2
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp2
-rw-r--r--src/mongo/db/db.cpp8
-rw-r--r--src/mongo/db/dbcommands.cpp36
-rw-r--r--src/mongo/db/dbhelpers.cpp72
-rw-r--r--src/mongo/db/dbhelpers.h36
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp2
-rw-r--r--src/mongo/db/geo/haystack.cpp2
-rw-r--r--src/mongo/db/index_builder.cpp4
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/instance.cpp11
-rw-r--r--src/mongo/db/ops/count.cpp8
-rw-r--r--src/mongo/db/ops/count.h8
-rw-r--r--src/mongo/db/ops/delete_executor.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp4
-rw-r--r--src/mongo/db/pdfile.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp2
-rw-r--r--src/mongo/db/prefetch.cpp4
-rw-r--r--src/mongo/db/query/new_find.cpp6
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp2
-rw-r--r--src/mongo/db/repl/health.cpp4
-rw-r--r--src/mongo/db/repl/master_slave.cpp24
-rw-r--r--src/mongo/db/repl/master_slave.h2
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/repl_set.h2
-rw-r--r--src/mongo/db/repl/repl_set_impl.cpp9
-rw-r--r--src/mongo/db/repl/repl_set_impl.h2
-rw-r--r--src/mongo/db/repl/repl_settings.cpp6
-rw-r--r--src/mongo/db/repl/replset_web_handler.cpp6
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/rs_initiate.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp13
-rw-r--r--src/mongo/db/repl/sync.cpp8
-rw-r--r--src/mongo/db/repl/sync.h3
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp2
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/dbtests/clienttests.cpp2
-rw-r--r--src/mongo/dbtests/counttests.cpp31
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp2
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp10
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp14
-rw-r--r--src/mongo/dbtests/query_single_solution_runner.cpp25
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp52
-rw-r--r--src/mongo/dbtests/replsettests.cpp24
-rw-r--r--src/mongo/dbtests/repltests.cpp19
-rw-r--r--src/mongo/dbtests/runner_registry.cpp9
-rw-r--r--src/mongo/s/d_logic.h4
-rw-r--r--src/mongo/s/d_migrate.cpp38
-rw-r--r--src/mongo/s/d_split.cpp6
-rw-r--r--src/mongo/tools/dump.cpp9
93 files changed, 441 insertions, 422 deletions
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index b8ed201226e..07f692149fe 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -84,16 +84,16 @@ namespace {
NamespaceString systemUsers(dbname, "system.users");
// Make sure the old unique index from v2.4 on system.users doesn't exist.
- OperationContextImpl txn;
- Client::WriteContext wctx(&txn, systemUsers);
- Collection* collection = wctx.ctx().db()->getCollection(NamespaceString(systemUsers));
+ Client::WriteContext wctx(txn, systemUsers);
+ Collection* collection = wctx.ctx().db()->getCollection(txn,
+ NamespaceString(systemUsers));
if (!collection) {
return;
}
IndexCatalog* indexCatalog = collection->getIndexCatalog();
IndexDescriptor* oldIndex = NULL;
while ((oldIndex = indexCatalog->findIndexByKeyPattern(v1SystemUsersKeyPattern))) {
- indexCatalog->dropIndex(&txn, oldIndex);
+ indexCatalog->dropIndex(txn, oldIndex);
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 852f6d96b71..0f80745769d 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/instance.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
@@ -112,7 +113,8 @@ namespace mongo {
Client::ReadContext ctx(txn, collectionName.ns());
BSONObj found;
- if (Helpers::findOne(ctx.ctx().db()->getCollection(collectionName),
+ if (Helpers::findOne(txn,
+ ctx.ctx().db()->getCollection(txn, collectionName),
query,
found)) {
*result = found.getOwned();
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index 766bb3cd4a0..dea303ed0e7 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/runner.h"
#include "mongo/platform/random.h"
#include "mongo/util/startup_test.h"
@@ -191,7 +192,7 @@ namespace mongo {
if ( !db )
return false;
Client::Context context( ns, db );
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
if ( checkAuth )
audit::logKillCursorsAuthzCheck( currentClient.get(),
@@ -221,7 +222,7 @@ namespace mongo {
if ( !db )
continue;
Client::Context context( ns, db );
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( collection == NULL ) {
continue;
}
diff --git a/src/mongo/db/catalog/collection_cursor_cache.h b/src/mongo/db/catalog/collection_cursor_cache.h
index d08800d4d7b..c1f3647b021 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.h
+++ b/src/mongo/db/catalog/collection_cursor_cache.h
@@ -39,6 +39,7 @@
namespace mongo {
+ class OperationContext;
class PseudoRandom;
class Runner;
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 969eda34e85..756b0871c75 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -233,7 +233,8 @@ namespace mongo {
return true;
}
- long long Database::getIndexSizeForCollection(Collection* coll,
+ long long Database::getIndexSizeForCollection(OperationContext* opCtx,
+ Collection* coll,
BSONObjBuilder* details,
int scale ) {
if ( !coll )
@@ -247,7 +248,9 @@ namespace mongo {
while ( ii.more() ) {
IndexDescriptor* d = ii.next();
string indNS = d->indexNamespace();
- Collection* indColl = getCollection( indNS ); // XXX
+
+ // XXX creating a Collection for an index which isn't a Collection
+ Collection* indColl = getCollection( opCtx, indNS );
if ( ! indColl ) {
log() << "error: have index descriptor [" << indNS
<< "] but no entry in the index collection." << endl;
@@ -262,7 +265,7 @@ namespace mongo {
return totalSize;
}
- void Database::getStats( BSONObjBuilder* output, double scale ) {
+ void Database::getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale ) {
bool empty = isEmpty() || getExtentManager()->numFiles() == 0;
list<string> collections;
@@ -280,7 +283,7 @@ namespace mongo {
for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
const string ns = *it;
- Collection* collection = getCollection( ns );
+ Collection* collection = getCollection( opCtx, ns );
if ( !collection )
continue;
@@ -293,7 +296,7 @@ namespace mongo {
numExtents += temp.obj()["numExtents"].numberInt(); // XXX
indexes += collection->getIndexCatalog()->numIndexesTotal();
- indexSize += getIndexSizeForCollection(collection);
+ indexSize += getIndexSizeForCollection(opCtx, collection);
}
output->append ( "db" , _name );
@@ -428,11 +431,6 @@ namespace mongo {
_collections.erase( it );
}
- Collection* Database::getCollection( const StringData& ns ) {
- OperationContextImpl txn; // TODO remove once we require reads to have transactions
- return getCollection(&txn, ns);
- }
-
Collection* Database::getCollection( OperationContext* txn, const StringData& ns ) {
invariant( _name == nsToDatabaseSubstring( ns ) );
@@ -489,7 +487,7 @@ namespace mongo {
// move index namespaces
BSONObj oldIndexSpec;
- while (Helpers::findOne(systemIndexCollection, BSON("ns" << fromNS), oldIndexSpec)) {
+ while (Helpers::findOne(txn, systemIndexCollection, BSON("ns" << fromNS), oldIndexSpec)) {
oldIndexSpec = oldIndexSpec.getOwned();
BSONObj newIndexSpec;
@@ -597,7 +595,7 @@ namespace mongo {
{
BSONObj oldSpec;
- if ( !Helpers::findOne( getCollection( txn, _namespacesName ),
+ if ( !Helpers::findOne( txn, getCollection( txn, _namespacesName ),
BSON( "name" << fromNS ),
oldSpec ) )
return Status( ErrorCodes::InternalError, "can't find system.namespaces entry" );
@@ -624,10 +622,6 @@ namespace mongo {
return Status::OK();
}
- Collection* Database::getOrCreateCollection( const StringData& ns ) {
- OperationContextImpl txn; // TODO remove once we require reads to have transactions
- return getOrCreateCollection(&txn, ns);
- }
Collection* Database::getOrCreateCollection(OperationContext* txn, const StringData& ns) {
Collection* c = getCollection( txn, ns );
if ( !c ) {
@@ -641,7 +635,7 @@ namespace mongo {
const CollectionOptions& options,
bool allocateDefaultSpace,
bool createIdIndex ) {
- massert( 17399, "collection already exists", getCollection( ns ) == NULL );
+ massert( 17399, "collection already exists", getCollection( txn, ns ) == NULL );
massertNamespaceNotIndex( ns, "createCollection" );
if ( serverGlobalParams.configsvr &&
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index e3a0b37f28c..a54c8bd8364 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -105,9 +105,10 @@ namespace mongo {
int getProfilingLevel() const { return _profile; }
const char* getProfilingNS() const { return _profileName.c_str(); }
- void getStats( BSONObjBuilder* output, double scale = 1 );
+ void getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale = 1 );
- long long getIndexSizeForCollection( Collection* collections,
+ long long getIndexSizeForCollection( OperationContext* opCtx,
+ Collection* collections,
BSONObjBuilder* details = NULL,
int scale = 1 );
@@ -127,20 +128,13 @@ namespace mongo {
/**
* @param ns - this is fully qualified, which is maybe not ideal ???
- * The methods without a transaction are deprecated.
- * TODO remove deprecated method once we require reads to have Transaction objects.
*/
- Collection* getCollection( const StringData& ns );
-
- Collection* getCollection( const NamespaceString& ns ) { return getCollection( ns.ns() ); }
-
Collection* getCollection( OperationContext* txn, const StringData& ns );
Collection* getCollection( OperationContext* txn, const NamespaceString& ns ) {
return getCollection( txn, ns.ns() );
}
- Collection* getOrCreateCollection( const StringData& ns );
Collection* getOrCreateCollection( OperationContext* txn, const StringData& ns );
Status renameCollection( OperationContext* txn,
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index acef0995881..68559fc67ef 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -227,8 +227,9 @@ namespace mongo {
fassert(16737, dfh->versionMinor == PDFILE_VERSION_MINOR_22_AND_OLDER);
- auto_ptr<Runner> runner( InternalPlanner::collectionScan( db->_indexesName,
- db->getCollection( db->_indexesName ) ) );
+ auto_ptr<Runner> runner(
+ InternalPlanner::collectionScan(db->_indexesName,
+ db->getCollection(txn, db->_indexesName)));
BSONObj index;
Runner::RunnerState state;
@@ -689,7 +690,7 @@ namespace mongo {
long long numSystemIndexesEntries = 0;
{
Collection* systemIndexes =
- _collection->_database->getCollection( _collection->_database->_indexesName );
+ _collection->_database->getCollection( txn, _collection->_database->_indexesName );
if ( systemIndexes ) {
EqualityMatchExpression expr;
BSONObj nsBSON = BSON( "ns" << _collection->ns() );
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 519a1f6b9e8..bf6d55cb3be 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -111,7 +111,7 @@ namespace mongo {
Collection* collection = NULL;
if ( isindex == false ) {
- collection = context.db()->getCollection( to_collection );
+ collection = context.db()->getCollection( txn, to_collection );
if ( !collection ) {
massert( 17321,
str::stream()
@@ -491,7 +491,7 @@ namespace mongo {
bool old = inDBRepair;
try {
inDBRepair = true;
- Collection* c = context.db()->getCollection( to_name );
+ Collection* c = context.db()->getCollection( txn, to_name );
if ( c )
c->getIndexCatalog()->ensureHaveIdIndex(txn);
inDBRepair = old;
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 524a7f916de..ed487950309 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -196,7 +196,8 @@ namespace mongo {
help << "no help defined";
}
- std::vector<BSONObj> Command::stopIndexBuilds(Database* db,
+ std::vector<BSONObj> Command::stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
return std::vector<BSONObj>();
}
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 4446020a94c..485deeb61e4 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -198,7 +198,8 @@ namespace mutablebson {
public:
// Stops all index builds required to run this command and returns index builds killed.
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj);
static const std::map<std::string,Command*>* commandsByBestName() { return _commandsByBestName; }
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 0186a17b643..ceaaebefdcf 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -56,7 +56,7 @@ namespace mongo {
return Status( ErrorCodes::NamespaceNotFound,
str::stream() << "source collection " << fromNs << " does not exist" );
- if ( db->getCollection( toNs ) )
+ if ( db->getCollection( txn, toNs ) )
return Status( ErrorCodes::NamespaceExists, "to collection already exists" );
// create new collection
@@ -182,14 +182,15 @@ namespace mongo {
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string collName = cmdObj.firstElement().valuestr();
std::string ns = db->name() + "." + collName;
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
- Collection* coll = db->getCollection(ns);
+ Collection* coll = db->getCollection(opCtx, ns);
if (coll) {
return IndexBuilder::killMatchingIndexBuilds(coll, criteria);
}
@@ -204,7 +205,7 @@ namespace mongo {
Database* db = ctx.db();
- stopIndexBuilds(db, jsobj);
+ stopIndexBuilds(txn, db, jsobj);
BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str());
string shortSource = jsobj.getStringField( "convertToCapped" );
@@ -219,7 +220,7 @@ namespace mongo {
string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
string longTmpName = str::stream() << dbname << "." << shortTmpName;
- if ( db->getCollection( longTmpName ) ) {
+ if ( db->getCollection( txn, longTmpName ) ) {
Status status = db->dropCollection( txn, longTmpName );
if ( !status.isOK() )
return appendCommandStatus( result, status );
@@ -230,7 +231,7 @@ namespace mongo {
if ( !status.isOK() )
return appendCommandStatus( result, status );
- verify( db->getCollection( longTmpName ) );
+ verify( db->getCollection( txn, longTmpName ) );
status = db->dropCollection( txn, longSource );
if ( !status.isOK() )
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 1002ec82ffd..d5e400e7c6b 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -71,14 +71,15 @@ namespace mongo {
}
CompactCmd() : Command("compact") { }
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string coll = cmdObj.firstElement().valuestr();
std::string ns = db->name() + "." + coll;
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, ns), criteria);
}
virtual bool run(OperationContext* txn, const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
@@ -144,7 +145,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(ns);
- Collection* collection = ctx.db()->getCollection(ns.ns());
+ Collection* collection = ctx.db()->getCollection(txn, ns.ns());
if( ! collection ) {
errmsg = "namespace does not exist";
return false;
@@ -157,7 +158,7 @@ namespace mongo {
log() << "compact " << ns << " begin, options: " << compactOptions.toString();
- std::vector<BSONObj> indexesInProg = stopIndexBuilds(ctx.db(), cmdObj);
+ std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), cmdObj);
StatusWith<CompactStats> status = collection->compact( txn, &compactOptions );
if ( !status.isOK() )
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 841f9522ace..089bf040731 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -133,7 +133,7 @@ namespace mongo {
// lock for common calls. We only take write lock if needed.
// Note: createIndexes command does not currently respect shard versioning.
Client::ReadContext readContext(txn, ns, false /* doVersion */);
- const Collection* collection = readContext.ctx().db()->getCollection( ns.ns() );
+ const Collection* collection = readContext.ctx().db()->getCollection(txn, ns.ns());
if ( collection ) {
for ( size_t i = 0; i < specs.size(); i++ ) {
BSONObj spec = specs[i];
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 3899e7f522d..9b4a2a385aa 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -62,7 +62,7 @@ namespace mongo {
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- string DBHashCmd::hashCollection( Database* db, const string& fullCollectionName, bool* fromCache ) {
+ string DBHashCmd::hashCollection( OperationContext* opCtx, Database* db, const string& fullCollectionName, bool* fromCache ) {
scoped_ptr<scoped_lock> cachedHashedLock;
@@ -76,7 +76,7 @@ namespace mongo {
}
*fromCache = false;
- Collection* collection = db->getCollection( fullCollectionName );
+ Collection* collection = db->getCollection( opCtx, fullCollectionName );
if ( !collection )
return "";
@@ -176,7 +176,7 @@ namespace mongo {
continue;
bool fromCache = false;
- string hash = hashCollection( db, fullCollectionName, &fromCache );
+ string hash = hashCollection( txn, db, fullCollectionName, &fromCache );
bb.append( shortCollectionName, hash );
diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h
index 71885f17697..383c7fb9d80 100644
--- a/src/mongo/db/commands/dbhash.h
+++ b/src/mongo/db/commands/dbhash.h
@@ -54,7 +54,7 @@ namespace mongo {
bool isCachable( const StringData& ns ) const;
- std::string hashCollection( Database* db, const std::string& fullCollectionName, bool* fromCache );
+ std::string hashCollection( OperationContext* opCtx, Database* db, const std::string& fullCollectionName, bool* fromCache );
std::map<std::string,std::string> _cachedHashed;
mutex _cachedHashedMutex;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 4c3a6bb5955..4f05700f65c 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -92,7 +92,7 @@ namespace mongo {
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if (!collection) {
result.appendArray( "values" , BSONObj() );
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index c68aede5bc3..330e856e7db 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -61,10 +61,11 @@ namespace mongo {
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string toDeleteNs = db->name() + "." + cmdObj.firstElement().valuestr();
- Collection* collection = db->getCollection(toDeleteNs);
+ Collection* collection = db->getCollection(opCtx, toDeleteNs);
IndexCatalog::IndexKillCriteria criteria;
// Get index name to drop
@@ -113,13 +114,13 @@ namespace mongo {
Client::Context ctx(toDeleteNs);
Database* db = ctx.db();
- Collection* collection = db->getCollection( toDeleteNs );
+ Collection* collection = db->getCollection( txn, toDeleteNs );
if ( ! collection ) {
errmsg = "ns not found";
return false;
}
- stopIndexBuilds(db, jsobj);
+ stopIndexBuilds(txn, db, jsobj);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() );
@@ -204,12 +205,13 @@ namespace mongo {
}
CmdReIndex() : Command("reIndex") { }
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string ns = db->name() + '.' + cmdObj["reIndex"].valuestrsafe();
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, ns), criteria);
}
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
@@ -223,7 +225,7 @@ namespace mongo {
Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(toDeleteNs);
- Collection* collection = ctx.db()->getCollection( toDeleteNs );
+ Collection* collection = ctx.db()->getCollection( txn, toDeleteNs );
if ( !collection ) {
errmsg = "ns not found";
@@ -232,7 +234,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs );
- std::vector<BSONObj> indexesInProg = stopIndexBuilds(ctx.db(), jsobj);
+ std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj);
list<BSONObj> all;
auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk );
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 37d777b2da3..fe283804073 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -255,7 +255,7 @@ namespace mongo {
UpdateResult res = mongo::update(txn, cx.db(), request, &cc().curop()->debug());
if ( !collection ) {
// collection created by an upsert
- collection = cx.db()->getCollection( ns );
+ collection = cx.db()->getCollection( txn, ns );
}
LOG(3) << "update result: " << res ;
@@ -272,7 +272,7 @@ namespace mongo {
}
LOG(3) << "using modified query to return the new doc: " << queryModified;
- if ( ! Helpers::findOne( collection, queryModified, doc ) ) {
+ if ( ! Helpers::findOne( txn, collection, queryModified, doc ) ) {
errmsg = str::stream() << "can't find object after modification "
<< " ns: " << ns
<< " queryModified: " << queryModified
diff --git a/src/mongo/db/commands/geonear.cpp b/src/mongo/db/commands/geonear.cpp
index ebbdd6efd69..42a956bf862 100644
--- a/src/mongo/db/commands/geonear.cpp
+++ b/src/mongo/db/commands/geonear.cpp
@@ -84,7 +84,7 @@ namespace mongo {
return false;
}
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
errmsg = "can't find ns";
return false;
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 94920e4d61e..b65f4f85fcd 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -87,7 +87,8 @@ namespace mongo {
return obj.extractFields( keyPattern , true ).getOwned();
}
- bool group( Database* db,
+ bool group( OperationContext* txn,
+ Database* db,
const std::string& ns,
const BSONObj& query,
BSONObj keyPattern,
@@ -131,7 +132,7 @@ namespace mongo {
double keysize = keyPattern.objsize() * 3;
double keynum = 1;
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
const WhereCallbackReal whereCallback(StringData(db->name()));
@@ -256,7 +257,7 @@ namespace mongo {
const string ns = parseNs(dbname, jsobj);
Client::ReadContext ctx(txn, ns);
- return group( ctx.ctx().db() , ns , q ,
+ return group( txn, ctx.ctx().db() , ns , q ,
key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeDataUnsafe() ,
initial.embeddedObject() , finalize ,
errmsg , result );
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index efc92edd3a3..b07d6086c35 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -65,12 +65,12 @@ namespace {
/**
* Retrieves a collection's query settings and plan cache from the database.
*/
- Status getQuerySettingsAndPlanCache(Database* db, const string& ns,
+ Status getQuerySettingsAndPlanCache(OperationContext* txn, Database* db, const string& ns,
QuerySettings** querySettingsOut,
PlanCache** planCacheOut) {
invariant(db);
- Collection* collection = db->getCollection(ns);
+ Collection* collection = db->getCollection(txn, ns);
if (NULL == collection) {
return Status(ErrorCodes::BadValue, "no such collection");
}
@@ -169,7 +169,7 @@ namespace mongo {
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* unused;
- Status status = getQuerySettingsAndPlanCache(ctx.db(), ns, &querySettings, &unused);
+ Status status = getQuerySettingsAndPlanCache(txn, ctx.db(), ns, &querySettings, &unused);
if (!status.isOK()) {
// No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
@@ -231,7 +231,7 @@ namespace mongo {
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* planCache;
- Status status = getQuerySettingsAndPlanCache(ctx.db(), ns, &querySettings, &planCache);
+ Status status = getQuerySettingsAndPlanCache(txn, ctx.db(), ns, &querySettings, &planCache);
if (!status.isOK()) {
// No collection - do nothing.
return Status::OK();
@@ -322,7 +322,7 @@ namespace mongo {
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* planCache;
- Status status = getQuerySettingsAndPlanCache(ctx.db(), ns, &querySettings, &planCache);
+ Status status = getQuerySettingsAndPlanCache(txn, ctx.db(), ns, &querySettings, &planCache);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 97c5d1fb9b5..53368a50cf7 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -366,7 +366,7 @@ namespace mongo {
// copy indexes into temporary storage
Client::WriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
Collection* finalColl =
- finalCtx.ctx().db()->getCollection( _config.outputOptions.finalNamespace );
+ finalCtx.ctx().db()->getCollection(_txn, _config.outputOptions.finalNamespace);
if ( finalColl ) {
IndexCatalog::IndexIterator ii =
finalColl->getIndexCatalog()->getIndexIterator( true );
@@ -584,8 +584,10 @@ namespace mongo {
bool found;
{
Client::Context tx( _config.outputOptions.finalNamespace );
- Collection* coll = tx.db()->getCollection( _config.outputOptions.finalNamespace );
- found = Helpers::findOne(coll,
+ Collection* coll =
+ tx.db()->getCollection(_txn, _config.outputOptions.finalNamespace);
+ found = Helpers::findOne(_txn,
+ coll,
temp["_id"].wrap(),
old,
true);
@@ -620,7 +622,7 @@ namespace mongo {
verify( _onDisk );
Client::WriteContext ctx(_txn, ns );
- Collection* coll = ctx.ctx().db()->getCollection( ns );
+ Collection* coll = ctx.ctx().db()->getCollection( _txn, ns );
if ( !coll )
uasserted(13630, str::stream() << "attempted to insert into nonexistent" <<
" collection during a mr operation." <<
@@ -646,7 +648,7 @@ namespace mongo {
verify( _onDisk );
Client::WriteContext ctx(_txn, _config.incLong );
- Collection* coll = ctx.ctx().db()->getCollection( _config.incLong );
+ Collection* coll = ctx.ctx().db()->getCollection( _txn, _config.incLong );
if ( !coll )
uasserted(13631, str::stream() << "attempted to insert into nonexistent"
" collection during a mr operation." <<
@@ -922,7 +924,7 @@ namespace mongo {
{
Client::WriteContext incCtx(_txn, _config.incLong );
- Collection* incColl = incCtx.ctx().db()->getCollection( _config.incLong );
+ Collection* incColl = incCtx.ctx().db()->getCollection( _txn, _config.incLong );
bool foundIndex = false;
IndexCatalog::IndexIterator ii =
@@ -961,7 +963,7 @@ namespace mongo {
whereCallback).isOK());
Runner* rawRunner;
- verify(getRunner(ctx->ctx().db()->getCollection(_config.incLong),
+ verify(getRunner(ctx->ctx().db()->getCollection(_txn, _config.incLong),
cq, &rawRunner, QueryPlannerParams::NO_TABLE_SCAN).isOK());
auto_ptr<Runner> runner(rawRunner);
@@ -1217,7 +1219,7 @@ namespace mongo {
auto_ptr<RangePreserver> rangePreserver;
{
Client::ReadContext ctx(txn, config.ns);
- Collection* collection = ctx.ctx().db()->getCollection( config.ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, config.ns );
if ( collection )
rangePreserver.reset(new RangePreserver(collection));
@@ -1299,7 +1301,7 @@ namespace mongo {
}
Runner* rawRunner;
- if (!getRunner(ctx->db()->getCollection( config.ns), cq, &rawRunner).isOK()) {
+ if (!getRunner(ctx->db()->getCollection(txn, config.ns), cq, &rawRunner).isOK()) {
uasserted(17239, "Can't get runner for query " + config.filter.toString());
return 0;
}
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 593912cf0d4..4f41daa71f1 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -168,7 +168,7 @@ namespace mongo {
Client::ReadContext ctx(txn, ns.ns());
Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection )
return appendCommandStatus( result,
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index a43d77eeda0..4e40b49287c 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -319,7 +319,7 @@ namespace {
// on ShardFilterStage for more details.
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(ns);
+ Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
// This does mongod-specific stuff like creating the input Runner and adding to the
// front of the pipeline if needed.
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index a8d32641646..cd572562f58 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -62,10 +62,10 @@ namespace {
/**
* Retrieves a collection's plan cache from the database.
*/
- Status getPlanCache(Database* db, const string& ns, PlanCache** planCacheOut) {
+ Status getPlanCache(OperationContext* txn, Database* db, const string& ns, PlanCache** planCacheOut) {
invariant(db);
- Collection* collection = db->getCollection(ns);
+ Collection* collection = db->getCollection(txn, ns);
if (NULL == collection) {
return Status(ErrorCodes::BadValue, "no such collection");
}
@@ -215,7 +215,7 @@ namespace mongo {
Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
- Status status = getPlanCache(ctx.db(), ns, &planCache);
+ Status status = getPlanCache(txn, ctx.db(), ns, &planCache);
if (!status.isOK()) {
// No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
@@ -263,7 +263,7 @@ namespace mongo {
Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
- Status status = getPlanCache(ctx.db(), ns, &planCache);
+ Status status = getPlanCache(txn, ctx.db(), ns, &planCache);
if (!status.isOK()) {
// No collection - nothing to do. Return OK status.
return Status::OK();
@@ -334,7 +334,7 @@ namespace mongo {
Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
- Status status = getPlanCache(ctx.db(), ns, &planCache);
+ Status status = getPlanCache(txn, ctx.db(), ns, &planCache);
if (!status.isOK()) {
// No collection - return empty plans array.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index 851b783aa99..1b9e61339b9 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -63,7 +63,8 @@ namespace mongo {
help << " example: { renameCollection: foo.a, to: bar.b }";
}
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
@@ -71,7 +72,7 @@ namespace mongo {
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = source;
std::vector<BSONObj> prelim =
- IndexBuilder::killMatchingIndexBuilds(db->getCollection(source), criteria);
+ IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, source), criteria);
std::vector<BSONObj> indexes;
@@ -139,7 +140,7 @@ namespace mongo {
{
Client::Context srcCtx( source );
- Collection* sourceColl = srcCtx.db()->getCollection( source );
+ Collection* sourceColl = srcCtx.db()->getCollection( txn, source );
if ( !sourceColl ) {
errmsg = "source namespace does not exist";
@@ -172,7 +173,7 @@ namespace mongo {
{
- indexesInProg = stopIndexBuilds( srcCtx.db(), cmdObj );
+ indexesInProg = stopIndexBuilds( txn, srcCtx.db(), cmdObj );
capped = sourceColl->isCapped();
if ( capped ) {
size = sourceColl->getRecordStore()->storageSize();
@@ -185,7 +186,7 @@ namespace mongo {
// Check if the target namespace exists and if dropTarget is true.
// If target exists and dropTarget is not true, return false.
- if ( ctx.db()->getCollection( target ) ) {
+ if ( ctx.db()->getCollection( txn, target ) ) {
if ( !cmdObj["dropTarget"].trueValue() ) {
errmsg = "target namespace exists";
return false;
@@ -245,7 +246,7 @@ namespace mongo {
{
Client::Context srcCtx( source );
- sourceColl = srcCtx.db()->getCollection( source );
+ sourceColl = srcCtx.db()->getCollection( txn, source );
sourceIt.reset( sourceColl->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) );
}
@@ -260,7 +261,7 @@ namespace mongo {
{
Client::Context ctx( target );
if ( !targetColl )
- targetColl = ctx.db()->getCollection( target );
+ targetColl = ctx.db()->getCollection( txn, target );
// No logOp necessary because the entire renameCollection command is one logOp.
Status s = targetColl->insertDocument( txn, o, true ).getStatus();
if ( !s.isOK() ) {
@@ -314,7 +315,7 @@ namespace mongo {
{
Client::Context ctx( target );
if ( !targetColl )
- targetColl = ctx.db()->getCollection( target );
+ targetColl = ctx.db()->getCollection( txn, target );
for ( vector<BSONObj>::iterator it = copiedIndexes.begin();
it != copiedIndexes.end(); ++it ) {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 7265270beef..f3ba8efe67c 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -65,7 +65,7 @@ namespace mongo {
Lock::DBWrite lk(txn->lockState(), ns);
Client::Context ctx( ns );
Database* db = ctx.db();
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
collection = db->createCollection( txn, ns );
if ( !collection ) {
@@ -141,7 +141,7 @@ namespace mongo {
bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
Client::WriteContext ctx(txn, nss.ns() );
- Collection* collection = ctx.ctx().db()->getCollection( nss.ns() );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, nss.ns() );
massert( 13417, "captrunc collection not found or empty", collection);
boost::scoped_ptr<Runner> runner(InternalPlanner::collectionScan(nss.ns(),
@@ -170,14 +170,15 @@ namespace mongo {
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string coll = cmdObj[ "emptycapped" ].valuestrsafe();
std::string ns = db->name() + '.' + coll;
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, ns), criteria);
}
virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
@@ -187,10 +188,10 @@ namespace mongo {
Client::WriteContext ctx(txn, nss.ns() );
Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( nss.ns() );
+ Collection* collection = db->getCollection( txn, nss.ns() );
massert( 13429, "emptycapped no such collection", collection );
- std::vector<BSONObj> indexes = stopIndexBuilds(db, cmdObj);
+ std::vector<BSONObj> indexes = stopIndexBuilds(txn, db, cmdObj);
Status status = collection->truncate(txn);
if ( !status.isOK() )
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index ec2fc972659..e7f2c7eeef2 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -107,7 +107,7 @@ namespace mongo {
Client::ReadContext context(txn, nss.ns());
Database* db = context.ctx().db();
- Collection* collection = db->getCollection( nss.ns() );
+ Collection* collection = db->getCollection( txn, nss.ns() );
if ( !collection ) {
errmsg = "collection not found";
return false;
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 9b539a8b954..7e69e6315c4 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -83,7 +83,7 @@ namespace mongo {
return false;
}
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
errmsg = "collection not found";
return false;
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index b147446a93d..71d5d4538bd 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -917,7 +917,7 @@ namespace mongo {
false /* don't check version */));
Database* database = _context->db();
dassert(database);
- _collection = database->getCollection(request->getTargetingNS());
+ _collection = database->getCollection(txn, request->getTargetingNS());
if (!_collection) {
// Implicitly create if it doesn't exist
_collection = database->createCollection(txn, request->getTargetingNS());
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 93408e92a23..c4fb3221ff5 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -320,7 +320,7 @@ namespace mongo {
fassert( 17401, repairDatabase( &txn, dbName ) );
}
- void checkForIdIndexes( Database* db ) {
+ void checkForIdIndexes( OperationContext* txn, Database* db ) {
if ( db->name() == "local") {
// we do not need an _id index on anything in the local database
@@ -337,7 +337,7 @@ namespace mongo {
if ( ns.isSystem() )
continue;
- Collection* coll = db->getCollection( collectionName );
+ Collection* coll = db->getCollection( txn, collectionName );
if ( !coll )
continue;
@@ -373,7 +373,7 @@ namespace mongo {
if (repl::replSettings.usingReplSets()) {
// we only care about the _id index if we are in a replset
- checkForIdIndexes(ctx.db());
+ checkForIdIndexes(&txn, ctx.db());
}
if (shouldClearNonLocalTmpCollections || dbName == "local")
@@ -412,7 +412,7 @@ namespace mongo {
}
else {
const string systemIndexes = ctx.db()->name() + ".system.indexes";
- Collection* coll = ctx.db()->getCollection( systemIndexes );
+ Collection* coll = ctx.db()->getCollection( &txn, systemIndexes );
auto_ptr<Runner> runner(InternalPlanner::collectionScan(systemIndexes,coll));
BSONObj index;
Runner::RunnerState state;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index e751dbaeebb..a202667d7d4 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -164,7 +164,8 @@ namespace mongo {
virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
invariant(db);
std::list<std::string> collections;
@@ -179,7 +180,7 @@ namespace mongo {
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
std::vector<BSONObj> killedIndexes =
- IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, ns), criteria);
allKilledIndexes.insert(allKilledIndexes.end(),
killedIndexes.begin(),
killedIndexes.end());
@@ -211,7 +212,7 @@ namespace mongo {
log() << "dropDatabase " << dbname << " starting" << endl;
- stopIndexBuilds(context.db(), cmdObj);
+ stopIndexBuilds(txn, context.db(), cmdObj);
dropDatabase(txn, context.db());
log() << "dropDatabase " << dbname << " finished";
@@ -250,7 +251,8 @@ namespace mongo {
}
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
invariant(db);
std::list<std::string> collections;
@@ -265,7 +267,7 @@ namespace mongo {
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
std::vector<BSONObj> killedIndexes =
- IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, ns), criteria);
allKilledIndexes.insert(allKilledIndexes.end(),
killedIndexes.begin(),
killedIndexes.end());
@@ -286,7 +288,7 @@ namespace mongo {
Client::Context context( dbname );
log() << "repairDatabase " << dbname;
- std::vector<BSONObj> indexesInProg = stopIndexBuilds(context.db(), cmdObj);
+ std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, context.db(), cmdObj);
e = cmdObj.getField( "preserveClonedFilesOnFailure" );
bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
@@ -437,13 +439,14 @@ namespace mongo {
virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
+ virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
const BSONObj& cmdObj) {
std::string nsToDrop = db->name() + '.' + cmdObj.firstElement().valuestr();
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = nsToDrop;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(nsToDrop), criteria);
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(opCtx, nsToDrop), criteria);
}
virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
@@ -470,7 +473,7 @@ namespace mongo {
int numIndexes = coll->getIndexCatalog()->numIndexesTotal();
- stopIndexBuilds(db, cmdObj);
+ stopIndexBuilds(txn, db, cmdObj);
result.append( "ns", nsToDrop );
result.append( "nIndexesWas", numIndexes );
@@ -532,7 +535,7 @@ namespace mongo {
string err;
int errCode;
- long long n = runCount(ns, cmdObj, err, errCode);
+ long long n = runCount(txn, ns, cmdObj, err, errCode);
long long retVal = n;
bool ok = true;
@@ -813,7 +816,7 @@ namespace mongo {
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
Client::ReadContext ctx(txn, ns);
- Collection* coll = ctx.ctx().db()->getCollection(ns);
+ Collection* coll = ctx.ctx().db()->getCollection(txn, ns);
CanonicalQuery* cq;
if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) {
@@ -922,7 +925,7 @@ namespace mongo {
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection || collection->numRecords() == 0 ) {
result.appendNumber( "size" , 0 );
@@ -1039,7 +1042,7 @@ namespace mongo {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
Client::ReadContext cx(txn, ns);
Database* db = cx.ctx().db();
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
errmsg = "Collection [" + ns + "] not found.";
return false;
@@ -1078,7 +1081,8 @@ namespace mongo {
collection->getRecordStore()->appendCustomStats( &result, scale );
BSONObjBuilder indexSizes;
- result.appendNumber( "totalIndexSize" , db->getIndexSizeForCollection(collection,
+ result.appendNumber( "totalIndexSize" , db->getIndexSizeForCollection(txn,
+ collection,
&indexSizes,
scale) / scale );
result.append("indexSizes", indexSizes.obj());
@@ -1117,7 +1121,7 @@ namespace mongo {
Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx( ns );
- Collection* coll = ctx.db()->getCollection( ns );
+ Collection* coll = ctx.db()->getCollection( txn, ns );
if ( !coll ) {
errmsg = "ns does not exist";
return false;
@@ -1247,7 +1251,7 @@ namespace mongo {
Client::ReadContext ctx(txn, ns);
Database* d = ctx.ctx().db();
- d->getStats( &result, scale );
+ d->getStats( txn, &result, scale );
return true;
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 1a2e0c4652f..f36ea3c745a 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -81,11 +81,12 @@ namespace mongo {
/* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
- bool Helpers::findOne(Collection* collection,
+ bool Helpers::findOne(OperationContext* txn,
+ Collection* collection,
const BSONObj &query,
BSONObj& result,
bool requireIndex) {
- DiskLoc loc = findOne( collection, query, requireIndex );
+ DiskLoc loc = findOne( txn, collection, query, requireIndex );
if ( loc.isNull() )
return false;
result = collection->docFor(loc);
@@ -95,7 +96,10 @@ namespace mongo {
/* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
- DiskLoc Helpers::findOne(Collection* collection, const BSONObj &query, bool requireIndex) {
+ DiskLoc Helpers::findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj &query,
+ bool requireIndex) {
if ( !collection )
return DiskLoc();
@@ -119,12 +123,17 @@ namespace mongo {
return DiskLoc();
}
- bool Helpers::findById(Database* database, const char *ns, BSONObj query, BSONObj& result ,
- bool* nsFound , bool* indexFound ) {
+ bool Helpers::findById(OperationContext* txn,
+ Database* database,
+ const char *ns,
+ BSONObj query,
+ BSONObj& result,
+ bool* nsFound,
+ bool* indexFound) {
Lock::assertAtLeastReadLocked(ns);
invariant( database );
- Collection* collection = database->getCollection( ns );
+ Collection* collection = database->getCollection( txn, ns );
if ( !collection ) {
return false;
}
@@ -152,7 +161,9 @@ namespace mongo {
return true;
}
- DiskLoc Helpers::findById(Collection* collection, const BSONObj& idquery) {
+ DiskLoc Helpers::findById(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& idquery) {
verify(collection);
IndexCatalog* catalog = collection->getIndexCatalog();
const IndexDescriptor* desc = catalog->findIdIndex();
@@ -163,57 +174,24 @@ namespace mongo {
return accessMethod->findSingle( idquery["_id"].wrap() );
}
- vector<BSONObj> Helpers::findAll( const string& ns , const BSONObj& query ) {
- Lock::assertAtLeastReadLocked(ns);
- Client::Context ctx(ns);
-
- CanonicalQuery* cq;
- const NamespaceString nss(ns);
- const WhereCallbackReal whereCallback(nss.db());
-
- uassert(17236, "Could not canonicalize " + query.toString(),
- CanonicalQuery::canonicalize(ns, query, &cq, whereCallback).isOK());
-
- Runner* rawRunner;
- uassert(17237, "Could not get runner for query " + query.toString(),
- getRunner(ctx.db()->getCollection( ns ), cq, &rawRunner).isOK());
-
- vector<BSONObj> all;
-
- auto_ptr<Runner> runner(rawRunner);
- Runner::RunnerState state;
- BSONObj obj;
- while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
- all.push_back(obj);
- }
-
- return all;
- }
-
- bool Helpers::isEmpty(const char *ns) {
- Client::Context context(ns, storageGlobalParams.dbpath);
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,
- context.db()->getCollection(ns)));
- return Runner::RUNNER_EOF == runner->getNext(NULL, NULL);
- }
-
/* Get the first object from a collection. Generally only useful if the collection
only ever has a single object -- which is a "singleton collection.
Returns: true if object exists.
*/
- bool Helpers::getSingleton(const char *ns, BSONObj& result) {
+ bool Helpers::getSingleton(OperationContext* txn, const char *ns, BSONObj& result) {
Client::Context context(ns);
auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,
- context.db()->getCollection(ns)));
+ context.db()->getCollection(txn,
+ ns)));
Runner::RunnerState state = runner->getNext(&result, NULL);
context.getClient()->curop()->done();
return Runner::RUNNER_ADVANCED == state;
}
- bool Helpers::getLast(const char *ns, BSONObj& result) {
+ bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
Client::Context ctx(ns);
- Collection* coll = ctx.db()->getCollection( ns );
+ Collection* coll = ctx.db()->getCollection( txn, ns );
auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,
coll,
InternalPlanner::BACKWARD));
@@ -303,7 +281,7 @@ namespace mongo {
BSONObj* indexPattern ) {
Client::ReadContext context(txn, ns);
- Collection* collection = context.ctx().db()->getCollection( ns );
+ Collection* collection = context.ctx().db()->getCollection( txn, ns );
if ( !collection )
return false;
@@ -492,7 +470,7 @@ namespace mongo {
*numDocs = 0;
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection ) return Status( ErrorCodes::NamespaceNotFound, ns );
// Require single key
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 2cba18345d5..be0ca859248 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -79,29 +79,30 @@ namespace mongo {
@return true if object found
*/
- static bool findOne(Collection* collection,
+ static bool findOne(OperationContext* txn,
+ Collection* collection,
const BSONObj &query,
BSONObj& result,
bool requireIndex = false);
- static DiskLoc findOne(Collection* collection, const BSONObj &query, bool requireIndex);
-
- /**
- * have to be locked already
- */
- static std::vector<BSONObj> findAll( const std::string& ns , const BSONObj& query );
+ static DiskLoc findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj &query,
+ bool requireIndex);
/**
* @param foundIndex if passed in will be set to 1 if ns and index found
* @return true if object found
*/
- static bool findById(Database* db, const char *ns, BSONObj query, BSONObj& result,
+ static bool findById(OperationContext* txn,
+ Database* db, const char *ns, BSONObj query, BSONObj& result,
bool* nsFound = 0, bool* indexFound = 0 );
/* TODO: should this move into Collection?
* uasserts if no _id index.
* @return null loc if not found */
- static DiskLoc findById(Collection* collection, const BSONObj& query);
+ static DiskLoc findById(OperationContext* txn,
+ Collection* collection, const BSONObj& query);
/** Get/put the first (or last) object from a collection. Generally only useful if the collection
only ever has a single object -- which is a "singleton collection".
@@ -110,11 +111,17 @@ namespace mongo {
@return true if object exists.
*/
- static bool getSingleton(const char *ns, BSONObj& result);
+ static bool getSingleton(OperationContext* txn, const char *ns, BSONObj& result);
static void putSingleton(OperationContext* txn, const char *ns, BSONObj obj);
static void putSingletonGod(OperationContext* txn, const char *ns, BSONObj obj, bool logTheOp);
- static bool getFirst(const char *ns, BSONObj& result) { return getSingleton(ns, result); }
- static bool getLast(const char *ns, BSONObj& result); // get last object int he collection; e.g. {$natural : -1}
+ static bool getFirst(OperationContext* txn, const char *ns, BSONObj& result) {
+ return getSingleton(txn, ns, result);
+ }
+
+ /**
+ * get last object int he collection; e.g. {$natural : -1}
+ */
+ static bool getLast(OperationContext* txn, const char *ns, BSONObj& result);
/**
* you have to lock
@@ -126,11 +133,6 @@ namespace mongo {
const BSONObj& o,
bool fromMigrate = false );
- /** You do not need to set the database before calling.
- @return true if collection is empty.
- */
- static bool isEmpty(const char *ns);
-
// TODO: this should be somewhere else probably
/* Takes object o, and returns a new object with the
* same field elements but the names stripped out.
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index ca10066c960..c2f6698414d 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -120,7 +120,7 @@ namespace mongo {
// Make sure the collection is valid.
Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection(db->name() + '.' + collName);
+ Collection* collection = db->getCollection(txn, db->name() + '.' + collName);
uassert(17446, "Couldn't find the collection " + collName, NULL != collection);
// Pull out the plan
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index c422d9d8863..0a26ddc6240 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -112,7 +112,7 @@ namespace mongo {
}
Runner* rawRunner;
- Status getRunnerStatus = getRunner(ctx.ctx().db()->getCollection(ns), cq, &rawRunner);
+ Status getRunnerStatus = getRunner(ctx.ctx().db()->getCollection(txn, ns), cq, &rawRunner);
if (!getRunnerStatus.isOK()) {
errmsg = getRunnerStatus.reason();
return false;
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 644c2ba60d2..db88a10a35c 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -79,7 +79,7 @@ namespace mongo {
return false;
}
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
errmsg = "can't find ns";
return false;
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 1e97c3da193..c3b3af8eb5a 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -79,9 +79,9 @@ namespace mongo {
Status IndexBuilder::build(OperationContext* txn, Database* db) const {
const string ns = _index["ns"].String();
- Collection* c = db->getCollection( ns );
+ Collection* c = db->getCollection( txn, ns );
if ( !c ) {
- c = db->getOrCreateCollection( ns );
+ c = db->getOrCreateCollection( txn, ns );
verify(c);
}
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index b9c89ab8b04..e7a74014be3 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -96,7 +96,7 @@ namespace mongo {
// for this namespace.
Client::WriteContext ctx(&txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( &txn, ns );
if ( collection == NULL )
continue;
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 3e2566bde51..eabcc0ec0c4 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -793,7 +793,7 @@ namespace mongo {
string targetNS = js["ns"].String();
uassertStatusOK( userAllowedWriteNS( targetNS ) );
- Collection* collection = ctx.db()->getCollection( targetNS );
+ Collection* collection = ctx.db()->getCollection( txn, targetNS );
if ( !collection ) {
// implicitly create
collection = ctx.db()->createCollection( txn, targetNS );
@@ -822,7 +822,7 @@ namespace mongo {
if ( !fixed.getValue().isEmpty() )
js = fixed.getValue();
- Collection* collection = ctx.db()->getCollection( ns );
+ Collection* collection = ctx.db()->getCollection( txn, ns );
if ( !collection ) {
collection = ctx.db()->createCollection( txn, ns );
verify( collection );
@@ -937,7 +937,7 @@ namespace mongo {
{
Lock::DBRead lk(txn->lockState(), repl::rsoplog);
BSONObj o;
- if( Helpers::getFirst(repl::rsoplog, o) )
+ if( Helpers::getFirst(txn, repl::rsoplog, o) )
return true;
}
}
@@ -1018,11 +1018,10 @@ namespace {
skip = 0;
}
- OperationContextImpl txn;
- Lock::DBRead lk(txn.lockState(), ns);
+ Lock::DBRead lk(_txn->lockState(), ns);
string errmsg;
int errCode;
- long long res = runCount( ns, _countCmd( ns , query , options , limit , skip ) , errmsg, errCode );
+ long long res = runCount( _txn, ns, _countCmd( ns , query , options , limit , skip ) , errmsg, errCode );
if ( res == -1 ) {
// namespace doesn't exist
return 0;
diff --git a/src/mongo/db/ops/count.cpp b/src/mongo/db/ops/count.cpp
index 5ae6f8ed9c0..a602a1c4478 100644
--- a/src/mongo/db/ops/count.cpp
+++ b/src/mongo/db/ops/count.cpp
@@ -66,10 +66,14 @@ namespace mongo {
return num;
}
- long long runCount( const string& ns, const BSONObj &cmd, string &err, int &errCode ) {
+ long long runCount(OperationContext* txn,
+ const string& ns,
+ const BSONObj &cmd,
+ string &err,
+ int &errCode) {
// Lock 'ns'.
Client::Context cx(ns);
- Collection* collection = cx.db()->getCollection(ns);
+ Collection* collection = cx.db()->getCollection(txn, ns);
if (NULL == collection) {
err = "ns missing";
diff --git a/src/mongo/db/ops/count.h b/src/mongo/db/ops/count.h
index 94384529d6c..8040efb6028 100644
--- a/src/mongo/db/ops/count.h
+++ b/src/mongo/db/ops/count.h
@@ -32,6 +32,8 @@
namespace mongo {
+ class OperationContext;
+
/**
* 'ns' is the namespace we're counting on.
*
@@ -40,6 +42,10 @@ namespace mongo {
* @return -1 on ns does not exist error and other errors, 0 on other errors, otherwise the
* match count.
*/
- long long runCount(const std::string& ns, const BSONObj& cmd, std::string& err, int& errCode );
+ long long runCount(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& cmd,
+ std::string& err,
+ int& errCode);
} // namespace mongo
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index 1a0d3aa19aa..d891fb97e26 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -99,7 +99,7 @@ namespace mongo {
}
}
- Collection* collection = db->getCollection(ns.ns());
+ Collection* collection = db->getCollection(txn, ns.ns());
if (NULL == collection) {
return 0;
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 9a1fd4e7f35..b47c8235fad 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -439,7 +439,7 @@ namespace mongo {
const NamespaceString& nsString = request.getNamespaceString();
UpdateLifecycle* lifecycle = request.getLifecycle();
- Collection* collection = db->getCollection(nsString.ns());
+ Collection* collection = db->getCollection(txn, nsString.ns());
validateUpdate(nsString.ns().c_str(), request.getUpdates(), request.getQuery());
@@ -765,7 +765,7 @@ namespace mongo {
// Only create the collection if the doc will be inserted.
if (!collection) {
- collection = db->getCollection(request.getNamespaceString().ns());
+ collection = db->getCollection(txn, request.getNamespaceString().ns());
if (!collection) {
collection = db->createCollection(txn, request.getNamespaceString().ns());
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 8624ab2ee42..182493e802a 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -111,7 +111,7 @@ namespace mongo {
return Status( ErrorCodes::InvalidNamespace,
str::stream() << "invalid ns: " << ns );
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( collection )
return Status( ErrorCodes::NamespaceExists,
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 4409b899c4c..b790fe4b4d3 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -65,7 +65,7 @@ namespace {
bool isCapped(const NamespaceString& ns) {
Client::ReadContext ctx(_ctx->opCtx, ns.ns());
- Collection* collection = ctx.ctx().db()->getCollection(ns);
+ Collection* collection = ctx.ctx().db()->getCollection(_ctx->opCtx, ns);
return collection && collection->isCapped();
}
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 36ad06ef964..7e463a592f6 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -79,7 +79,7 @@ namespace mongo {
BSONObj obj = op.getObjectField(opField);
const char *ns = op.getStringField("ns");
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection )
return;
@@ -187,7 +187,7 @@ namespace mongo {
// we can probably use Client::Context here instead of ReadContext as we
// have locked higher up the call stack already
Client::ReadContext ctx(txn, ns);
- if( Helpers::findById(ctx.ctx().db(), ns, builder.done(), result) ) {
+ if( Helpers::findById(txn, ctx.ctx().db(), ns, builder.done(), result) ) {
// do we want to use Record::touch() here? it's pretty similar.
volatile char _dummy_char = '\0';
// Touch the first word on every page in order to fault it into memory
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 7ed222b9f06..7834d7902e5 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -153,7 +153,7 @@ namespace mongo {
// This is a read lock.
scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(txn, ns));
- Collection* collection = ctx->ctx().db()->getCollection(ns);
+ Collection* collection = ctx->ctx().db()->getCollection(txn, ns);
uassert( 17356, "collection dropped between getMore calls", collection );
QLOG() << "Running getMore, cursorid: " << cursorid << endl;
@@ -460,7 +460,7 @@ namespace mongo {
// where-specific parsing code assumes we have a lock and creates execution machinery that
// requires it.
Client::ReadContext ctx(txn, q.ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
// Parse the qm into a CanonicalQuery.
CanonicalQuery* cq;
@@ -675,7 +675,7 @@ namespace mongo {
// If we're tailing a capped collection, we don't bother saving the cursor if the
// collection is empty. Otherwise, the semantics of the tailable cursor is that the
// client will keep trying to read from it. So we'll keep it around.
- Collection* collection = ctx.ctx().db()->getCollection(cq->ns());
+ Collection* collection = ctx.ctx().db()->getCollection(txn, cq->ns());
if (collection && collection->numRecords() != 0 && pq.getNumToReturn() != 1) {
saveClientCursor = true;
}
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 39e0c81a465..0088b66ffe3 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -160,7 +160,7 @@ namespace mongo {
const StringData& ns,
std::set<CursorId>* openCursors) {
Client::ReadContext ctx(txn, ns.toString());
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection )
return;
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 52ecec48a95..aac1c1b3eca 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -326,7 +326,7 @@ namespace mongo {
{
string ns = dbName + ".system.namespaces";
Client::Context ctx( ns );
- Collection* coll = originalDatabase->getCollection( ns );
+ Collection* coll = originalDatabase->getCollection( txn, ns );
if ( coll ) {
scoped_ptr<RecordIterator> it( coll->getIterator( DiskLoc(),
false,
diff --git a/src/mongo/db/repl/health.cpp b/src/mongo/db/repl/health.cpp
index aed8a4baa9b..cfb1bd780de 100644
--- a/src/mongo/db/repl/health.cpp
+++ b/src/mongo/db/repl/health.cpp
@@ -217,7 +217,7 @@ namespace repl {
}
}
- void ReplSetImpl::_summarizeAsHtml(stringstream& s) const {
+ void ReplSetImpl::_summarizeAsHtml(OperationContext* txn, stringstream& s) const {
s << table(0, false);
s << tr("Set name:", _name);
s << tr("Majority up:", elect.aMajoritySeemsToBeUp()?"yes":"no" );
@@ -252,7 +252,7 @@ namespace repl {
readlocktry lk(/*"local.replset.minvalid", */300);
if( lk.got() ) {
BSONObj mv;
- if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ if( Helpers::getSingleton(txn, "local.replset.minvalid", mv) ) {
myMinValid = "minvalid:" + mv["ts"]._opTime().toString();
}
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index ff2ff4921f5..93681b32d28 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -165,7 +165,7 @@ namespace repl {
OperationContextImpl txn;
Client::WriteContext ctx(&txn, "local");
// local.me is an identifier for a server for getLastError w:2+
- if (!Helpers::getSingleton("local.me", _me) ||
+ if (!Helpers::getSingleton(&txn, "local.me", _me) ||
!_me.hasField("host") ||
_me["host"].String() != myname) {
@@ -231,7 +231,7 @@ namespace repl {
/* we reuse our existing objects so that we can keep our existing connection
and cursor in effect.
*/
- void ReplSource::loadAll(SourceVector &v) {
+ void ReplSource::loadAll(OperationContext* txn, SourceVector &v) {
const char* localSources = "local.sources";
Client::Context ctx(localSources);
SourceVector old = v;
@@ -242,8 +242,9 @@ namespace repl {
// check that no items are in sources other than that
// add if missing
int n = 0;
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(localSources,
- ctx.db()->getCollection(localSources)));
+ auto_ptr<Runner> runner(
+ InternalPlanner::collectionScan(localSources,
+ ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
@@ -285,8 +286,9 @@ namespace repl {
}
}
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(localSources,
- ctx.db()->getCollection(localSources)));
+ auto_ptr<Runner> runner(
+ InternalPlanner::collectionScan(localSources,
+ ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
@@ -318,7 +320,7 @@ namespace repl {
if ( !replAllDead )
return;
SourceVector sources;
- ReplSource::loadAll(sources);
+ ReplSource::loadAll(txn, sources);
for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) {
log() << requester << " forcing resync from " << (*i)->hostName << endl;
(*i)->forceResync( txn, requester );
@@ -1020,10 +1022,11 @@ namespace repl {
1 = special sentinel indicating adaptive sleep recommended
*/
int _replMain(ReplSource::SourceVector& sources, int& nApplied) {
+ OperationContextImpl txn;
{
ReplInfo r("replMain load sources");
Lock::GlobalWrite lk;
- ReplSource::loadAll(sources);
+ ReplSource::loadAll(&txn, sources);
replSettings.fastsync = false; // only need this param for initial reset
}
@@ -1245,6 +1248,7 @@ namespace repl {
c = &cc();
}
+ OperationContextImpl txn; // XXX
Lock::GlobalRead lk;
for( unsigned i = a; i <= b; i++ ) {
const BSONObj& op = v[i];
@@ -1267,7 +1271,7 @@ namespace repl {
b.append(_id);
BSONObj result;
Client::Context ctx( ns );
- if( Helpers::findById(ctx.db(), ns, b.done(), result) )
+ if( Helpers::findById(&txn, ctx.db(), ns, b.done(), result) )
_dummy_z += result.objsize(); // touch
}
}
@@ -1301,7 +1305,7 @@ namespace repl {
b.append(_id);
BSONObj result;
Client::ReadContext ctx(txn, ns );
- if( Helpers::findById(ctx.ctx().db(), ns, b.done(), result) )
+ if( Helpers::findById(txn, ctx.ctx().db(), ns, b.done(), result) )
_dummy_z += result.objsize(); // touch
}
}
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index ca21d180111..15445f68ede 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -138,7 +138,7 @@ namespace repl {
int nClonedThisPass;
typedef std::vector< shared_ptr< ReplSource > > SourceVector;
- static void loadAll(SourceVector&);
+ static void loadAll(OperationContext* txn, SourceVector&);
explicit ReplSource(BSONObj);
/* -1 = error */
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 474416dd250..1c40bbdfa6f 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -422,7 +422,7 @@ namespace repl {
_logOp(txn, opstr, ns, 0, obj, patt, b, fromMigrate);
}
- logOpForSharding(opstr, ns, obj, patt, fromMigrate);
+ logOpForSharding(txn, opstr, ns, obj, patt, fromMigrate);
logOpForDbHash(ns);
getGlobalAuthorizationManager()->logOp(opstr, ns, obj, patt, b);
@@ -672,9 +672,9 @@ namespace repl {
// thus this is not ideal.
else {
if (collection == NULL ||
- (indexCatalog->haveIdIndex() && Helpers::findById(collection, updateCriteria).isNull()) ||
+ (indexCatalog->haveIdIndex() && Helpers::findById(txn, collection, updateCriteria).isNull()) ||
// capped collections won't have an _id index
- (!indexCatalog->haveIdIndex() && Helpers::findOne(collection, updateCriteria, false).isNull())) {
+ (!indexCatalog->haveIdIndex() && Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
failedUpdate = true;
log() << "replication couldn't find doc: " << op.toString() << endl;
}
diff --git a/src/mongo/db/repl/repl_set.h b/src/mongo/db/repl/repl_set.h
index 914f13751cf..981e2ee708a 100644
--- a/src/mongo/db/repl/repl_set.h
+++ b/src/mongo/db/repl/repl_set.h
@@ -63,7 +63,7 @@ namespace repl {
string name() const { return ReplSetImpl::name(); }
virtual const ReplSetConfig& config() { return ReplSetImpl::config(); }
void getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const { _getOplogDiagsAsHtml(server_id,ss); }
- void summarizeAsHtml(stringstream& ss) const { _summarizeAsHtml(ss); }
+ void summarizeAsHtml(OperationContext* txn, stringstream& ss) const { _summarizeAsHtml(txn, ss); }
void summarizeStatus(BSONObjBuilder& b) const { _summarizeStatus(b); }
void fillIsMaster(BSONObjBuilder& b) { _fillIsMaster(b); }
threadpool::ThreadPool& getPrefetchPool() { return ReplSetImpl::getPrefetchPool(); }
diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp
index 5274e461f07..5d7a37d8806 100644
--- a/src/mongo/db/repl/repl_set_impl.cpp
+++ b/src/mongo/db/repl/repl_set_impl.cpp
@@ -438,7 +438,7 @@ namespace {
OperationContextImpl txn; // XXX?
Lock::DBRead lk(txn.lockState(), rsoplog);
BSONObj o;
- if (Helpers::getLast(rsoplog, o)) {
+ if (Helpers::getLast(&txn, rsoplog, o)) {
lastH = o["h"].numberLong();
lastOpTimeWritten = o["ts"]._opTime();
uassert(13290, "bad replSet oplog entry?", quiet || !lastOpTimeWritten.isNull());
@@ -449,7 +449,8 @@ namespace {
OperationContextImpl txn; // XXX?
Lock::DBRead lk(txn.lockState(), rsoplog);
BSONObj o;
- uassert(17347, "Problem reading earliest entry from oplog", Helpers::getFirst(rsoplog, o));
+ uassert(17347, "Problem reading earliest entry from oplog",
+ Helpers::getFirst(&txn, rsoplog, o));
return o["ts"]._opTime();
}
@@ -876,7 +877,7 @@ namespace {
OperationContextImpl txn; // XXX?
Lock::DBRead lk (txn.lockState(), "local");
BSONObj mv;
- if (Helpers::getSingleton("local.replset.minvalid", mv)) {
+ if (Helpers::getSingleton(&txn, "local.replset.minvalid", mv)) {
return mv[_initialSyncFlagString].trueValue();
}
return false;
@@ -897,7 +898,7 @@ namespace {
OperationContextImpl txn; // XXX?
Lock::DBRead lk(txn.lockState(), "local.replset.minvalid");
BSONObj mv;
- if (Helpers::getSingleton("local.replset.minvalid", mv)) {
+ if (Helpers::getSingleton(&txn, "local.replset.minvalid", mv)) {
return mv["ts"]._opTime();
}
return OpTime();
diff --git a/src/mongo/db/repl/repl_set_impl.h b/src/mongo/db/repl/repl_set_impl.h
index 1fc7529dd2d..b5b6254826d 100644
--- a/src/mongo/db/repl/repl_set_impl.h
+++ b/src/mongo/db/repl/repl_set_impl.h
@@ -198,7 +198,7 @@ namespace repl {
MemberState state() const { return box.getState(); }
void _fatal();
void _getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const;
- void _summarizeAsHtml(stringstream&) const;
+ void _summarizeAsHtml(OperationContext* txn, stringstream&) const;
void _summarizeStatus(BSONObjBuilder&) const; // for replSetGetStatus command
/* call afer constructing to start - returns fairly quickly after launching its threads */
diff --git a/src/mongo/db/repl/repl_settings.cpp b/src/mongo/db/repl/repl_settings.cpp
index 48dad4218cc..65db46755ab 100644
--- a/src/mongo/db/repl/repl_settings.cpp
+++ b/src/mongo/db/repl/repl_settings.cpp
@@ -90,8 +90,10 @@ namespace repl {
{
const char* localSources = "local.sources";
Client::ReadContext ctx(txn, localSources);
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(localSources,
- ctx.ctx().db()->getCollection(localSources)));
+ auto_ptr<Runner> runner(
+ InternalPlanner::collectionScan(localSources,
+ ctx.ctx().db()->getCollection(txn,
+ localSources)));
BSONObj obj;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
diff --git a/src/mongo/db/repl/replset_web_handler.cpp b/src/mongo/db/repl/replset_web_handler.cpp
index f9d271a1e09..8c71fa2748b 100644
--- a/src/mongo/db/repl/replset_web_handler.cpp
+++ b/src/mongo/db/repl/replset_web_handler.cpp
@@ -59,7 +59,7 @@ namespace repl {
responseMsg = _replSetOplog(params);
}
else
- responseMsg = _replSet();
+ responseMsg = _replSet(txn);
responseCode = 200;
}
@@ -93,7 +93,7 @@ namespace repl {
}
/* /_replSet show replica set status in html format */
- string _replSet() {
+ string _replSet(OperationContext* txn) {
stringstream s;
s << start("Replica Set Status " + prettyHostName());
s << p( a("/", "back", "Home") + " | " +
@@ -112,7 +112,7 @@ namespace repl {
}
else {
try {
- theReplSet->summarizeAsHtml(s);
+ theReplSet->summarizeAsHtml(txn, s);
}
catch(...) { s << "error summarizing replset status\n"; }
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 32d9d7e6e3c..be6c8aaf75a 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -133,7 +133,7 @@ namespace repl {
OperationContextImpl txn;
Client::WriteContext ctx(&txn, rsoplog);
- Collection* collection = ctx.ctx().db()->getCollection(rsoplog);
+ Collection* collection = ctx.ctx().db()->getCollection(&txn, rsoplog);
// temp
if( collection->numRecords() == 0 )
diff --git a/src/mongo/db/repl/rs_initiate.cpp b/src/mongo/db/repl/rs_initiate.cpp
index 23ec6697a57..e50d2a80568 100644
--- a/src/mongo/db/repl/rs_initiate.cpp
+++ b/src/mongo/db/repl/rs_initiate.cpp
@@ -207,7 +207,7 @@ namespace repl {
it is ok if the initiating member has *other* data than that.
*/
BSONObj o;
- if( Helpers::getFirst(rsoplog, o) ) {
+ if( Helpers::getFirst(txn, rsoplog, o) ) {
errmsg = rsoplog + string(" is not empty on the initiating member. cannot initiate.");
return false;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index b8bc672753f..efdcee062a2 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -231,12 +231,13 @@ namespace repl {
int getRBID(DBClientConnection*);
static void syncRollbackFindCommonPoint(DBClientConnection* them, FixUpInfo& fixUpInfo) {
+ OperationContextImpl txn; // XXX
verify(Lock::isLocked());
Client::Context ctx(rsoplog);
boost::scoped_ptr<Runner> runner(
InternalPlanner::collectionScan(rsoplog,
- ctx.db()->getCollection(rsoplog),
+ ctx.db()->getCollection(&txn, rsoplog),
InternalPlanner::BACKWARD));
BSONObj ourObj;
@@ -484,7 +485,7 @@ namespace repl {
sethbmsg("rollback 4.7");
Client::Context ctx(rsoplog);
- Collection* oplogCollection = ctx.db()->getCollection(rsoplog);
+ Collection* oplogCollection = ctx.db()->getCollection(&txn, rsoplog);
uassert(13423,
str::stream() << "replSet error in rollback can't find " << rsoplog,
oplogCollection);
@@ -516,7 +517,7 @@ namespace repl {
// Add the doc to our rollback file
BSONObj obj;
- bool found = Helpers::findOne(ctx.db()->getCollection(doc.ns), pattern, obj, false);
+ bool found = Helpers::findOne(&txn, ctx.db()->getCollection(&txn, doc.ns), pattern, obj, false);
if (found) {
removeSaver->goingToDelete(obj);
}
@@ -529,7 +530,7 @@ namespace repl {
// TODO 1.6 : can't delete from a capped collection. need to handle that here.
deletes++;
- Collection* collection = ctx.db()->getCollection(doc.ns);
+ Collection* collection = ctx.db()->getCollection(&txn, doc.ns);
if (collection) {
if (collection->isCapped()) {
// can't delete from a capped collection - so we truncate instead. if
@@ -538,7 +539,7 @@ namespace repl {
// TODO: IIRC cappedTruncateAfter does not handle completely empty.
// this will crazy slow if no _id index.
long long start = Listener::getElapsedTimeMillis();
- DiskLoc loc = Helpers::findOne(collection, pattern, false);
+ DiskLoc loc = Helpers::findOne(&txn, collection, pattern, false);
if (Listener::getElapsedTimeMillis() - start > 200)
log() << "replSet warning roll back slow no _id index for "
<< doc.ns << " perhaps?" << rsLog;
@@ -657,7 +658,7 @@ namespace repl {
OperationContextImpl txn;
Lock::DBRead lk(txn.lockState(), "local.replset.minvalid");
BSONObj mv;
- if (Helpers::getSingleton("local.replset.minvalid", mv)) {
+ if (Helpers::getSingleton(&txn, "local.replset.minvalid", mv)) {
OpTime minvalid = mv["ts"]._opTime();
if (minvalid > lastOpTimeWritten) {
log() << "replSet need to rollback, but in inconsistent state";
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index 888edbeb99a..8ca15ed9386 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -48,12 +48,12 @@ namespace repl {
hn = hostname;
}
- BSONObj Sync::getMissingDoc(Database* db, const BSONObj& o) {
+ BSONObj Sync::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char *ns = o.getStringField("ns");
// capped collections
- Collection* collection = db->getCollection(ns);
+ Collection* collection = db->getCollection(txn, ns);
if ( collection && collection->isCapped() ) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return BSONObj();
@@ -115,7 +115,7 @@ namespace repl {
// we don't have the object yet, which is possible on initial sync. get it.
log() << "replication info adding missing object" << endl; // rare enough we can log
- BSONObj missingObj = getMissingDoc(ctx.db(), o);
+ BSONObj missingObj = getMissingDoc(&txn, ctx.db(), o);
if( missingObj.isEmpty() ) {
log() << "replication missing object not found on source. presumably deleted later in oplog" << endl;
@@ -125,7 +125,7 @@ namespace repl {
return false;
}
else {
- Collection* collection = ctx.db()->getOrCreateCollection( ns );
+ Collection* collection = ctx.db()->getOrCreateCollection( &txn, ns );
verify( collection ); // should never happen
StatusWith<DiskLoc> result = collection->insertDocument( &txn, missingObj, true );
uassert(15917,
diff --git a/src/mongo/db/repl/sync.h b/src/mongo/db/repl/sync.h
index e86997cad6c..67cb5e63a60 100644
--- a/src/mongo/db/repl/sync.h
+++ b/src/mongo/db/repl/sync.h
@@ -34,6 +34,7 @@
namespace mongo {
class Database;
+ class OperationContext;
namespace repl {
@@ -43,7 +44,7 @@ namespace repl {
public:
Sync(const std::string& hostname) : hn(hostname) {}
virtual ~Sync() {}
- virtual BSONObj getMissingDoc(Database* db, const BSONObj& o);
+ virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o);
/**
* If applyOperation_inlock should be called again after an update fails.
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index d30eb517898..774748288b5 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -70,7 +70,7 @@ namespace repl {
Client::WriteContext ctx(&txn, "local");
// local.me is an identifier for a server for getLastError w:2+
- if (!Helpers::getSingleton("local.me", _me) ||
+ if (!Helpers::getSingleton(&txn, "local.me", _me) ||
!_me.hasField("host") ||
_me["host"].String() != myname) {
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c497c8ed7e4..cb861e48ec5 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -117,7 +117,7 @@ namespace mongo {
OperationContextImpl txn;
Client::WriteContext ctx(&txn, ns );
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( &txn, ns );
if ( !collection ) {
// collection was dropped
continue;
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 194cc048046..2affdf56b85 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -128,7 +128,7 @@ namespace ClientTests {
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
- Collection* collection = ctx.ctx().db()->getCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getCollection( &txn, ns() );
ASSERT( collection );
IndexCatalog* indexCatalog = collection->getIndexCatalog();
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 729443e2835..7ece0d6e0ee 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -41,18 +41,10 @@
namespace CountTests {
class Base {
- OperationContextImpl _txn;
- Lock::DBWrite lk;
-
- Client::Context _context;
-
- Database* _database;
- Collection* _collection;
-
public:
Base() : lk(_txn.lockState(), ns()), _context( ns() ) {
_database = _context.db();
- _collection = _database->getCollection( ns() );
+ _collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
_database->dropCollection( &_txn, ns() );
}
@@ -100,6 +92,17 @@ namespace CountTests {
static BSONObj countCommand( const BSONObj &query ) {
return BSON( "query" << query );
}
+
+ OperationContextImpl _txn;
+
+ private:
+ Lock::DBWrite lk;
+
+ Client::Context _context;
+
+ Database* _database;
+ Collection* _collection;
+
};
class Basic : public Base {
@@ -109,7 +112,7 @@ namespace CountTests {
BSONObj cmd = fromjson( "{\"query\":{}}" );
string err;
int errCode;
- ASSERT_EQUALS( 1, runCount( ns(), cmd, err, errCode ) );
+ ASSERT_EQUALS( 1, runCount( &_txn, ns(), cmd, err, errCode ) );
}
};
@@ -122,7 +125,7 @@ namespace CountTests {
BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"}}" );
string err;
int errCode;
- ASSERT_EQUALS( 2, runCount( ns(), cmd, err, errCode ) );
+ ASSERT_EQUALS( 2, runCount( &_txn, ns(), cmd, err, errCode ) );
}
};
@@ -134,7 +137,7 @@ namespace CountTests {
BSONObj cmd = fromjson( "{\"query\":{},\"fields\":{\"a\":1}}" );
string err;
int errCode;
- ASSERT_EQUALS( 2, runCount( ns(), cmd, err, errCode ) );
+ ASSERT_EQUALS( 2, runCount( &_txn, ns(), cmd, err, errCode ) );
}
};
@@ -147,7 +150,7 @@ namespace CountTests {
BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"},\"fields\":{\"a\":1}}" );
string err;
int errCode;
- ASSERT_EQUALS( 1, runCount( ns(), cmd, err, errCode ) );
+ ASSERT_EQUALS( 1, runCount( &_txn, ns(), cmd, err, errCode ) );
}
};
@@ -159,7 +162,7 @@ namespace CountTests {
BSONObj cmd = fromjson( "{\"query\":{\"a\":/^b/}}" );
string err;
int errCode;
- ASSERT_EQUALS( 1, runCount( ns(), cmd, err, errCode ) );
+ ASSERT_EQUALS( 1, runCount( &_txn, ns(), cmd, err, errCode ) );
}
};
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 0cee60de170..3c74b2af526 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -151,7 +151,7 @@ namespace mongo {
ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );
Database* db = dbHolder().get(nsToDatabase(range.ns), storageGlobalParams.dbpath);
- const Collection* collection = db->getCollection(ns);
+ const Collection* collection = db->getCollection(&txn, ns);
// Make sure all the disklocs actually correspond to the right info
for ( set<DiskLoc>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 652958c2efe..a6425e1748c 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -170,7 +170,7 @@ namespace DocumentSourceTests {
CanonicalQuery* cq;
uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
Runner* runnerBare;
- uassertStatusOK(getRunner(ctx.ctx().db()->getCollection(ns), cq, &runnerBare));
+ uassertStatusOK(getRunner(ctx.ctx().db()->getCollection(&_opCtx, ns), cq, &runnerBare));
_runner.reset(runnerBare);
_runner->saveState();
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 792c1071d3e..755c996c5c0 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -63,7 +63,7 @@ namespace IndexUpdateTests {
killCurrentOp.reset();
}
Collection* collection() {
- return _ctx.ctx().db()->getCollection( _ns );
+ return _ctx.ctx().db()->getCollection( &_txn, _ns );
}
protected:
// QUERY_MIGRATION
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index ab4acc8b90f..d04b7d19c6e 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -38,12 +38,11 @@ namespace OplogStartTests {
class Base {
public:
Base() : _context(ns()) {
- OperationContextImpl txn;
- Collection* c = _context.db()->getCollection(&txn, ns());
+ Collection* c = _context.db()->getCollection(&_txn, ns());
if (!c) {
- c = _context.db()->createCollection(&txn, ns());
+ c = _context.db()->createCollection(&_txn, ns());
}
- c->getIndexCatalog()->ensureHaveIdIndex(&txn);
+ c->getIndexCatalog()->ensureHaveIdIndex(&_txn);
}
~Base() {
@@ -62,7 +61,7 @@ namespace OplogStartTests {
}
Collection* collection() {
- return _context.db()->getCollection( ns() );
+ return _context.db()->getCollection( &_txn, ns() );
}
DBDirectClient *client() const { return &_client; }
@@ -89,6 +88,7 @@ namespace OplogStartTests {
scoped_ptr<OplogStart> _stage;
private:
+ OperationContextImpl _txn;
Lock::GlobalWrite lk;
Client::Context _context;
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 5ef98e0b08b..e62bce62067 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -60,7 +60,7 @@ namespace PdfileTests {
return "unittests.pdfiletests.Insert";
}
Collection* collection() {
- return _context.db()->getCollection( ns() );
+ return _context.db()->getCollection( &_txn, ns() );
}
Lock::GlobalWrite lk_;
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 02939707de3..fbf8a507b6c 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -88,7 +88,7 @@ namespace PlanRankingTests {
*/
QuerySolution* pickBestPlan(CanonicalQuery* cq) {
Client::ReadContext ctx(&_txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(ns);
+ Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns);
QueryPlannerParams plannerParams;
fillOutPlannerParams(collection, cq, &plannerParams);
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 99d1f61d172..30393e396f4 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -67,8 +67,8 @@ namespace QueryMultiPlanRunner {
_client.ensureIndex(ns(), obj);
}
- IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- const Collection* collection = db->getCollection( ns() );
+ IndexDescriptor* getIndex(OperationContext* txn, Database* db, const BSONObj& obj) {
+ const Collection* collection = db->getCollection( txn, ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern(obj);
}
@@ -107,14 +107,14 @@ namespace QueryMultiPlanRunner {
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
IndexScanParams ixparams;
- ixparams.descriptor = getIndex(ctx.ctx().db(), BSON("foo" << 1));
+ ixparams.descriptor = getIndex(&txn, ctx.ctx().db(), BSON("foo" << 1));
ixparams.bounds.isSimpleRange = true;
ixparams.bounds.startKey = BSON("" << 7);
ixparams.bounds.endKey = BSON("" << 7);
ixparams.bounds.endKeyInclusive = true;
ixparams.direction = 1;
- const Collection* coll = ctx.ctx().db()->getCollection(ns());
+ const Collection* coll = ctx.ctx().db()->getCollection(&txn, ns());
auto_ptr<WorkingSet> sharedWs(new WorkingSet());
IndexScan* ix = new IndexScan(ixparams, sharedWs.get(), NULL);
@@ -122,7 +122,7 @@ namespace QueryMultiPlanRunner {
// Plan 1: CollScan with matcher.
CollectionScanParams csparams;
- csparams.collection = ctx.ctx().db()->getCollection( ns() );
+ csparams.collection = ctx.ctx().db()->getCollection( &txn, ns() );
csparams.direction = CollectionScanParams::FORWARD;
// Make the filter.
@@ -139,7 +139,7 @@ namespace QueryMultiPlanRunner {
verify(CanonicalQuery::canonicalize(ns(), BSON("foo" << 7), &cq).isOK());
verify(NULL != cq);
- MultiPlanStage* mps = new MultiPlanStage(ctx.ctx().db()->getCollection(ns()),cq);
+ MultiPlanStage* mps = new MultiPlanStage(ctx.ctx().db()->getCollection(&txn, ns()),cq);
mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
@@ -149,7 +149,7 @@ namespace QueryMultiPlanRunner {
ASSERT_EQUALS(0, mps->bestPlanIdx());
SingleSolutionRunner sr(
- ctx.ctx().db()->getCollection(ns()),
+ ctx.ctx().db()->getCollection(&txn, ns()),
cq,
mps->bestSolution(),
mps,
diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp
index 3e2e1330323..246cf66df1a 100644
--- a/src/mongo/dbtests/query_single_solution_runner.cpp
+++ b/src/mongo/dbtests/query_single_solution_runner.cpp
@@ -36,10 +36,9 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/single_solution_runner.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/dbtests.h"
namespace QuerySingleSolutionRunner {
@@ -82,7 +81,7 @@ namespace QuerySingleSolutionRunner {
SingleSolutionRunner* makeCollScanRunner(Client::Context& ctx,
BSONObj& filterObj) {
CollectionScanParams csparams;
- csparams.collection = ctx.db()->getCollection( ns() );
+ csparams.collection = ctx.db()->getCollection( &_txn, ns() );
csparams.direction = CollectionScanParams::FORWARD;
auto_ptr<WorkingSet> ws(new WorkingSet());
// Parse the filter.
@@ -97,7 +96,7 @@ namespace QuerySingleSolutionRunner {
verify(NULL != cq);
// Hand the plan off to the single solution runner.
- SingleSolutionRunner* ssr = new SingleSolutionRunner(ctx.db()->getCollection(ns()),
+ SingleSolutionRunner* ssr = new SingleSolutionRunner(ctx.db()->getCollection(&_txn, ns()),
cq,
new QuerySolution(),
root.release(),
@@ -129,7 +128,7 @@ namespace QuerySingleSolutionRunner {
ixparams.bounds.endKeyInclusive = true;
ixparams.direction = 1;
- const Collection* coll = context.db()->getCollection(ns());
+ const Collection* coll = context.db()->getCollection(&_txn, ns());
auto_ptr<WorkingSet> ws(new WorkingSet());
IndexScan* ix = new IndexScan(ixparams, ws.get(), NULL);
@@ -149,7 +148,7 @@ namespace QuerySingleSolutionRunner {
size_t numCursors() {
Client::ReadContext ctx(&_txn, ns() );
- Collection* collection = ctx.ctx().db()->getCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
if ( !collection )
return 0;
return collection->cursorCache()->numCursors();
@@ -157,13 +156,13 @@ namespace QuerySingleSolutionRunner {
void registerRunner( Runner* runner ) {
Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getOrCreateCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getOrCreateCollection( &_txn, ns() );
return collection->cursorCache()->registerRunner( runner );
}
void deregisterRunner( Runner* runner ) {
Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getOrCreateCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getOrCreateCollection( &_txn, ns() );
return collection->cursorCache()->deregisterRunner( runner );
}
@@ -172,7 +171,7 @@ namespace QuerySingleSolutionRunner {
private:
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = db->getCollection( ns() );
+ Collection* collection = db->getCollection( &_txn, ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern(obj);
}
@@ -348,13 +347,13 @@ namespace QuerySingleSolutionRunner {
SingleSolutionRunner* ssr = makeCollScanRunner(ctx.ctx(),filterObj);
// Make a client cursor from the runner.
- new ClientCursor(ctx.ctx().db()->getCollection(ns()),
+ new ClientCursor(ctx.ctx().db()->getCollection(&_txn, ns()),
ssr, 0, BSONObj());
// There should be one cursor before invalidation,
// and zero cursors after invalidation.
ASSERT_EQUALS(1U, numCursors());
- ctx.ctx().db()->getCollection( ns() )->cursorCache()->invalidateAll(false);
+ ctx.ctx().db()->getCollection( &_txn, ns() )->cursorCache()->invalidateAll(false);
ASSERT_EQUALS(0U, numCursors());
}
};
@@ -369,7 +368,7 @@ namespace QuerySingleSolutionRunner {
Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
- Collection* collection = ctx.ctx().db()->getCollection(ns());
+ Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
SingleSolutionRunner* ssr = makeCollScanRunner(ctx.ctx(),filterObj);
@@ -410,7 +409,7 @@ namespace QuerySingleSolutionRunner {
{
Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection(ns());
+ Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
SingleSolutionRunner* ssr = makeCollScanRunner(ctx.ctx(),filterObj);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index f1d33ff7fb8..529476b94b1 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -130,7 +130,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -232,7 +232,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -318,7 +318,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -367,7 +367,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -419,7 +419,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -470,7 +470,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -531,7 +531,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -590,7 +590,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -649,7 +649,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -697,7 +697,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -753,7 +753,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -870,7 +870,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -923,7 +923,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -968,7 +968,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1016,7 +1016,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1061,7 +1061,7 @@ namespace QueryStageAnd {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index fc30e82ad99..f1f6e33e9ed 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -162,7 +162,7 @@ namespace QueryStageCollectionScan {
Database* db() { return _context.db(); }
ExtentManager* extentManager() { return db()->getExtentManager(); }
- Collection* collection() { return db()->getCollection( ns() ); }
+ Collection* collection() { return db()->getCollection( &_txn, ns() ); }
NamespaceDetails *nsd() { return collection()->detailsWritable(); }
protected:
@@ -338,7 +338,7 @@ namespace QueryStageCollectionScan {
// Configure the scan.
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( ns() );
+ params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
params.direction = direction;
params.tailable = false;
@@ -448,7 +448,7 @@ namespace QueryStageCollectionScan {
// Configure the scan.
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( ns() );
+ params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
@@ -478,7 +478,7 @@ namespace QueryStageCollectionScan {
Client::ReadContext ctx(&_txn, ns());
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( ns() );
+ params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
params.direction = CollectionScanParams::BACKWARD;
params.tailable = false;
@@ -506,7 +506,7 @@ namespace QueryStageCollectionScan {
void run() {
Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.ctx().db()->getCollection( ns() );
+ Collection* coll = ctx.ctx().db()->getCollection( &_txn, ns() );
// Get the DiskLocs that would be returned by an in-order scan.
vector<DiskLoc> locs;
@@ -567,7 +567,7 @@ namespace QueryStageCollectionScan {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.ctx().db()->getCollection(ns());
+ Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
// Get the DiskLocs that would be returned by an in-order scan.
vector<DiskLoc> locs;
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 0d0dcbc0673..ac0e7e80705 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -87,7 +87,7 @@ namespace QueryStageCount {
}
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = db->getCollection(ns());
+ Collection* collection = db->getCollection(&_txn, ns());
return collection->getIndexCatalog()->findIndexByKeyPattern(obj);
}
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index ecbbbaf5561..c8799f73fcb 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -65,7 +65,7 @@ namespace QueryStageDistinct {
IndexDescriptor* getIndex(const BSONObj& obj) {
Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern( obj );
}
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index b99a95b1d87..5a97b5eb254 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -91,7 +91,7 @@ namespace QueryStageFetch {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -150,7 +150,7 @@ namespace QueryStageFetch {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 5ea488a37f2..8df8f1a5f28 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -108,7 +108,7 @@ namespace QueryStageKeep {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 43319528bb2..ff52f9c6ffc 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -111,7 +111,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -175,7 +175,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -238,7 +238,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -302,7 +302,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -367,7 +367,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -430,7 +430,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -483,7 +483,7 @@ namespace QueryStageMergeSortTests {
Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 2440f67948f..1d9b4806db3 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -183,7 +183,7 @@ namespace QueryStageSortTests {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -203,7 +203,7 @@ namespace QueryStageSortTests {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -232,7 +232,7 @@ namespace QueryStageSortTests {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -252,7 +252,7 @@ namespace QueryStageSortTests {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -343,7 +343,7 @@ namespace QueryStageSortTests {
Client::WriteContext ctx(&txn, ns());
Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 7e0b0f20c6e..23525ca265d 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -81,7 +81,7 @@ namespace QueryStageTests {
WorkingSet* ws = new WorkingSet();
PlanExecutor runner(ws,
new IndexScan(params, ws, filterExpr.get()),
- ctx.ctx().db()->getCollection(ns()));
+ ctx.ctx().db()->getCollection(&_txn, ns()));
int count = 0;
for (DiskLoc dl; Runner::RUNNER_ADVANCED == runner.getNext(NULL, &dl); ) {
@@ -103,7 +103,7 @@ namespace QueryStageTests {
IndexDescriptor* getIndex(const BSONObj& obj) {
Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection( ns() );
+ Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern( obj );
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 0e73c01e3b5..2fe15ba0741 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -62,7 +62,7 @@ namespace QueryTests {
public:
Base() : _context( ns() ) {
_database = _context.db();
- _collection = _database->getCollection( ns() );
+ _collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
_database->dropCollection( &_txn, ns() );
}
@@ -118,10 +118,10 @@ namespace QueryTests {
BSONObj query = fromjson( "{$or:[{b:2},{c:3}]}" );
BSONObj ret;
// Check findOne() returning object.
- ASSERT( Helpers::findOne( _collection, query, ret, true ) );
+ ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
ASSERT_EQUALS( string( "b" ), ret.firstElement().fieldName() );
// Cross check with findOne() returning location.
- ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(_collection, query, true)));
+ ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(&_txn, _collection, query, true)));
}
};
@@ -133,20 +133,20 @@ namespace QueryTests {
BSONObj ret;
// Check findOne() returning object, allowing unindexed scan.
- ASSERT( Helpers::findOne( _collection, query, ret, false ) );
+ ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
// Check findOne() returning location, allowing unindexed scan.
- ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(_collection, query, false)));
+ ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(&_txn, _collection, query, false)));
// Check findOne() returning object, requiring indexed scan without index.
- ASSERT_THROWS( Helpers::findOne( _collection, query, ret, true ), MsgAssertionException );
+ ASSERT_THROWS( Helpers::findOne( &_txn, _collection, query, ret, true ), MsgAssertionException );
// Check findOne() returning location, requiring indexed scan without index.
- ASSERT_THROWS( Helpers::findOne( _collection, query, true ), MsgAssertionException );
+ ASSERT_THROWS( Helpers::findOne( &_txn, _collection, query, true ), MsgAssertionException );
addIndex( BSON( "b" << 1 ) );
// Check findOne() returning object, requiring indexed scan with index.
- ASSERT( Helpers::findOne( _collection, query, ret, true ) );
+ ASSERT( Helpers::findOne( &_txn, _collection, query, ret, true ) );
// Check findOne() returning location, requiring indexed scan with index.
- ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(_collection, query, true)));
+ ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(&_txn, _collection, query, true)));
}
};
@@ -159,7 +159,7 @@ namespace QueryTests {
Client::Context ctx( "unittests.querytests" );
Database* db = ctx.db();
- if ( db->getCollection( ns() ) ) {
+ if ( db->getCollection( &_txn, ns() ) ) {
_collection = NULL;
db->dropCollection( &_txn, ns() );
}
@@ -174,9 +174,9 @@ namespace QueryTests {
insert( BSONObj() );
BSONObj query;
BSONObj ret;
- ASSERT( Helpers::findOne( _collection, query, ret, false ) );
+ ASSERT( Helpers::findOne( &_txn, _collection, query, ret, false ) );
ASSERT( ret.isEmpty() );
- ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(_collection, query, false)));
+ ASSERT_EQUALS(ret, _collection->docFor(Helpers::findOne(&_txn, _collection, query, false)));
}
};
@@ -244,7 +244,7 @@ namespace QueryTests {
// Check internal server handoff to getmore.
Lock::DBWrite lk(_txn.lockState(), ns);
Client::Context ctx( ns );
- ClientCursorPin clientCursor( ctx.db()->getCollection(ns), cursorId );
+ ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
// pq doesn't exist if it's a runner inside of the clientcursor.
// ASSERT( clientCursor.c()->pq );
// ASSERT_EQUALS( 2, clientCursor.c()->pq->getNumToReturn() );
@@ -301,7 +301,7 @@ namespace QueryTests {
// Check that the cursor has been removed.
{
Client::ReadContext ctx(&_txn, ns);
- ASSERT( 0 == ctx.ctx().db()->getCollection( ns )->cursorCache()->numCursors() );
+ ASSERT(0 == ctx.ctx().db()->getCollection(&_txn, ns)->cursorCache()->numCursors());
}
ASSERT_FALSE(CollectionCursorCache::eraseCursorGlobal(&_txn, cursorId));
@@ -351,8 +351,8 @@ namespace QueryTests {
// Check that the cursor still exists
{
Client::ReadContext ctx(&_txn, ns);
- ASSERT( 1 == ctx.ctx().db()->getCollection( ns )->cursorCache()->numCursors() );
- ASSERT( ctx.ctx().db()->getCollection( ns )->cursorCache()->find( cursorId, false ) );
+ ASSERT( 1 == ctx.ctx().db()->getCollection( &_txn, ns )->cursorCache()->numCursors() );
+ ASSERT( ctx.ctx().db()->getCollection( &_txn, ns )->cursorCache()->find( cursorId, false ) );
}
// Check that the cursor can be iterated until all documents are returned.
@@ -612,7 +612,7 @@ namespace QueryTests {
ASSERT_EQUALS( two, c->next()["ts"].Date() );
long long cursorId = c->getCursorId();
- ClientCursorPin clientCursor( ctx.db()->getCollection( ns ), cursorId );
+ ClientCursorPin clientCursor( ctx.db()->getCollection( &_txn, ns ), cursorId );
ASSERT_EQUALS( three.millis, clientCursor.c()->getSlaveReadTill().asDate() );
}
};
@@ -1138,7 +1138,7 @@ namespace QueryTests {
size_t numCursorsOpen() {
Client::ReadContext ctx(&_txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ Collection* collection = ctx.ctx().db()->getCollection( &_txn, _ns );
if ( !collection )
return 0;
return collection->cursorCache()->numCursors();
@@ -1237,14 +1237,14 @@ namespace QueryTests {
ASSERT_EQUALS( 50 , count() );
BSONObj res;
- ASSERT( Helpers::findOne( ctx.ctx().db()->getCollection( ns() ),
+ ASSERT( Helpers::findOne( &_txn, ctx.ctx().db()->getCollection( &_txn, ns() ),
BSON( "_id" << 20 ) , res , true ) );
ASSERT_EQUALS( 40 , res["x"].numberInt() );
- ASSERT( Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
+ ASSERT( Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
ASSERT_EQUALS( 40 , res["x"].numberInt() );
- ASSERT( ! Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 200 ) , res ) );
+ ASSERT( ! Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 200 ) , res ) );
long long slow;
long long fast;
@@ -1254,7 +1254,7 @@ namespace QueryTests {
{
Timer t;
for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findOne( ctx.ctx().db()->getCollection(ns()),
+ ASSERT( Helpers::findOne( &_txn, ctx.ctx().db()->getCollection(&_txn, ns()),
BSON( "_id" << 20 ), res, true ) );
}
slow = t.micros();
@@ -1262,7 +1262,7 @@ namespace QueryTests {
{
Timer t;
for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
+ ASSERT( Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
}
fast = t.micros();
}
@@ -1290,7 +1290,7 @@ namespace QueryTests {
BSONObj res;
for ( int i=0; i<1000; i++ ) {
- bool found = Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << i ) , res );
+ bool found = Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << i ) , res );
ASSERT_EQUALS( i % 2 , int(found) );
}
@@ -1463,7 +1463,7 @@ namespace QueryTests {
ClientCursor *clientCursor = 0;
{
Client::ReadContext ctx(&_txn, ns());
- ClientCursorPin clientCursorPointer( ctx.ctx().db()->getCollection( ns() ),
+ ClientCursorPin clientCursorPointer( ctx.ctx().db()->getCollection( &_txn, ns() ),
cursorId );
clientCursor = clientCursorPointer.c();
// clientCursorPointer destructor unpins the cursor.
@@ -1501,7 +1501,7 @@ namespace QueryTests {
{
Client::WriteContext ctx(&_txn, ns() );
- ClientCursorPin pinCursor( ctx.ctx().db()->getCollection( ns() ), cursorId );
+ ClientCursorPin pinCursor( ctx.ctx().db()->getCollection( &_txn, ns() ), cursorId );
ASSERT_THROWS(CollectionCursorCache::eraseCursorGlobal(&_txn, cursorId),
MsgAssertionException);
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index deb82f41e17..d7c1c11b7f9 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -152,7 +152,7 @@ namespace ReplSetTests {
Client::Context ctx(ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(ns());
+ Collection* coll = db->getCollection(&txn, ns());
if (!coll) {
coll = db->createCollection(&txn, ns());
}
@@ -180,7 +180,7 @@ namespace ReplSetTests {
Database* db = c.ctx().db();
- if ( db->getCollection( ns() ) == NULL ) {
+ if ( db->getCollection( &txn, ns() ) == NULL ) {
return;
}
@@ -391,35 +391,36 @@ namespace ReplSetTests {
verify(apply(b.obj()));
}
- void insert() {
+ void insert(OperationContext* txn) {
Client::Context ctx(cappedNs());
- OperationContextImpl txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection(&txn, cappedNs());
+ Collection* coll = db->getCollection(txn, cappedNs());
if (!coll) {
- coll = db->createCollection(&txn, cappedNs());
+ coll = db->createCollection(txn, cappedNs());
}
BSONObj o = BSON(GENOID << "x" << 456);
- DiskLoc loc = coll->insertDocument(&txn, o, true).getValue();
+ DiskLoc loc = coll->insertDocument(txn, o, true).getValue();
verify(!loc.isNull());
}
public:
virtual ~CappedUpdate() {}
void run() {
+ OperationContextImpl txn;
+
// RARELY shoud be once/128x
for (int i=0; i<150; i++) {
- insert();
+ insert(&txn);
updateSucceed();
}
- DBDirectClient client;
+ DBDirectClient client(&txn);
int count = (int) client.count(cappedNs(), BSONObj());
verify(count > 1);
// check _id index created
Client::Context ctx(cappedNs());
- Collection* collection = ctx.db()->getCollection( cappedNs() );
+ Collection* collection = ctx.db()->getCollection( &txn, cappedNs() );
verify(collection->getIndexCatalog()->findIdIndex());
}
};
@@ -438,6 +439,7 @@ namespace ReplSetTests {
public:
virtual ~CappedInsert() {}
void run() {
+ OperationContextImpl txn;
// This will succeed, but not insert anything because they are changed to upserts
for (int i=0; i<150; i++) {
insertSucceed();
@@ -446,7 +448,7 @@ namespace ReplSetTests {
// this changed in 2.1.2
// we now have indexes on capped collections
Client::Context ctx(cappedNs());
- Collection* collection = ctx.db()->getCollection( cappedNs() );
+ Collection* collection = ctx.db()->getCollection( &txn, cappedNs() );
verify(collection->getIndexCatalog()->findIdIndex());
}
};
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index e76ebd3c6eb..906d4990766 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -57,7 +57,6 @@ namespace ReplTests {
class Base {
Lock::GlobalWrite lk;
Client::Context _context;
- mutable OperationContextImpl _txn;
public:
Base() : _context( ns() ) {
oldRepl();
@@ -66,7 +65,7 @@ namespace ReplTests {
replSettings.master = true;
createOplog();
- Collection* c = _context.db()->getCollection( ns() );
+ Collection* c = _context.db()->getCollection( &_txn, ns() );
if ( ! c ) {
c = _context.db()->createCollection( &_txn, ns() );
}
@@ -136,7 +135,7 @@ namespace ReplTests {
Client::Context ctx( cllNS() );
OperationContextImpl txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection( cllNS() );
+ Collection* coll = db->getCollection( &txn, cllNS() );
if ( !coll ) {
coll = db->createCollection( &txn, cllNS() );
}
@@ -157,7 +156,7 @@ namespace ReplTests {
{
Client::Context ctx( cllNS() );
Database* db = ctx.db();
- Collection* coll = db->getCollection( cllNS() );
+ Collection* coll = db->getCollection( &txn, cllNS() );
RecordIterator* it = coll->getIterator( DiskLoc(), false,
CollectionScanParams::FORWARD );
@@ -186,7 +185,7 @@ namespace ReplTests {
Client::Context ctx( ns );
OperationContextImpl txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection( ns );
+ Collection* coll = db->getCollection( &txn, ns );
if ( !coll ) {
coll = db->createCollection( &txn, ns );
}
@@ -206,7 +205,7 @@ namespace ReplTests {
Client::Context ctx( ns );
OperationContextImpl txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection( ns );
+ Collection* coll = db->getCollection( &txn, ns );
if ( !coll ) {
coll = db->createCollection( &txn, ns );
}
@@ -227,7 +226,7 @@ namespace ReplTests {
Client::Context ctx( ns() );
OperationContextImpl txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection( ns() );
+ Collection* coll = db->getCollection( &txn, ns() );
if ( !coll ) {
coll = db->createCollection( &txn, ns() );
}
@@ -255,6 +254,8 @@ namespace ReplTests {
Database* db() {
return _context.db();
}
+
+ mutable OperationContextImpl _txn;
private:
static DBDirectClient client_;
};
@@ -1395,7 +1396,7 @@ namespace ReplTests {
bool returnEmpty;
SyncTest() : Sync(""), returnEmpty(false) {}
virtual ~SyncTest() {}
- virtual BSONObj getMissingDoc(Database* db, const BSONObj& o) {
+ virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
if (returnEmpty) {
BSONObj o;
return o;
@@ -1413,7 +1414,7 @@ namespace ReplTests {
// this should fail because we can't connect
try {
Sync badSource("localhost:123");
- badSource.getMissingDoc(db(), o);
+ badSource.getMissingDoc(&_txn, db(), o);
}
catch (DBException&) {
threw = true;
diff --git a/src/mongo/dbtests/runner_registry.cpp b/src/mongo/dbtests/runner_registry.cpp
index b29088e36d8..8f197a5c552 100644
--- a/src/mongo/dbtests/runner_registry.cpp
+++ b/src/mongo/dbtests/runner_registry.cpp
@@ -74,23 +74,24 @@ namespace RunnerRegistry {
CanonicalQuery* cq;
ASSERT(CanonicalQuery::canonicalize(ns(), BSONObj(), &cq).isOK());
// Owns all args
- auto_ptr<Runner> run(new SingleSolutionRunner(_ctx->ctx().db()->getCollection( ns() ),
+ auto_ptr<Runner> run(new SingleSolutionRunner(_ctx->ctx().db()->getCollection( &_opCtx,
+ ns() ),
cq, NULL, scan.release(), ws.release()));
return run.release();
}
void registerRunner( Runner* runner ) {
- _ctx->ctx().db()->getOrCreateCollection( ns() )->cursorCache()->registerRunner( runner );
+ _ctx->ctx().db()->getOrCreateCollection( &_opCtx, ns() )->cursorCache()->registerRunner( runner );
}
void deregisterRunner( Runner* runner ) {
- _ctx->ctx().db()->getOrCreateCollection( ns() )->cursorCache()->deregisterRunner( runner );
+ _ctx->ctx().db()->getOrCreateCollection( &_opCtx, ns() )->cursorCache()->deregisterRunner( runner );
}
int N() { return 50; }
Collection* collection() {
- return _ctx->ctx().db()->getCollection( ns() );
+ return _ctx->ctx().db()->getCollection( &_opCtx, ns() );
}
static const char* ns() { return "unittests.RunnerRegistryDiskLocInvalidation"; }
diff --git a/src/mongo/s/d_logic.h b/src/mongo/s/d_logic.h
index 668969517da..d23bc6f9c2e 100644
--- a/src/mongo/s/d_logic.h
+++ b/src/mongo/s/d_logic.h
@@ -42,6 +42,7 @@ namespace mongo {
class Database;
class DiskLoc;
+ class OperationContext;
// --------------
// --- global state ---
@@ -370,7 +371,8 @@ namespace mongo {
* if it's relevant. The entries saved here are later transferred to the receiving side of
* the migration. A relevant entry is an insertion, a deletion, or an update.
*/
- void logOpForSharding( const char * opstr,
+ void logOpForSharding( OperationContext* txn,
+ const char * opstr,
const char * ns,
const BSONObj& obj,
BSONObj * patt,
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index b041b4d4e5a..fbb06b79590 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -267,7 +267,8 @@ namespace mongo {
_inCriticalSectionCV.notify_all();
}
- void logOp(const char* opstr,
+ void logOp(OperationContext* txn,
+ const char* opstr,
const char* ns,
const BSONObj& obj,
BSONObj* patt,
@@ -321,7 +322,7 @@ namespace mongo {
case 'u':
Client::Context ctx( _ns );
- if ( ! Helpers::findById( ctx.db(), _ns.c_str(), ide.wrap(), it ) ) {
+ if ( ! Helpers::findById( txn, ctx.db(), _ns.c_str(), ide.wrap(), it ) ) {
warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog;
return;
}
@@ -336,7 +337,7 @@ namespace mongo {
_memoryUsed += ide.size() + 5;
}
- void xfer( Database* db, list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) {
+ void xfer( OperationContext* txn, Database* db, list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) {
const long long maxSize = 1024 * 1024;
if ( l->size() == 0 || size > maxSize )
@@ -350,7 +351,7 @@ namespace mongo {
BSONObj t = *i;
if ( explode ) {
BSONObj it;
- if ( Helpers::findById( db , _ns.c_str() , t, it ) ) {
+ if ( Helpers::findById( txn, db , _ns.c_str() , t, it ) ) {
arr.append( it );
size += it.objsize();
}
@@ -380,8 +381,8 @@ namespace mongo {
{
Client::ReadContext cx(txn, _ns);
- xfer( cx.ctx().db(), &_deleted, b, "deleted", size, false );
- xfer( cx.ctx().db(), &_reload, b, "reload", size, true );
+ xfer( txn, cx.ctx().db(), &_deleted, b, "deleted", size, false );
+ xfer( txn, cx.ctx().db(), &_reload, b, "reload", size, true );
}
b.append( "size" , size );
@@ -401,7 +402,7 @@ namespace mongo {
string& errmsg,
BSONObjBuilder& result ) {
Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
if ( !collection ) {
errmsg = "ns not found, should be impossible";
return false;
@@ -491,7 +492,7 @@ namespace mongo {
int allocSize;
{
Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
verify( collection );
scoped_spinlock lk( _trackerLocks );
allocSize =
@@ -504,7 +505,7 @@ namespace mongo {
bool filledBuffer = false;
Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
scoped_spinlock lk( _trackerLocks );
set<DiskLoc>::iterator i = _cloneLocs.begin();
@@ -629,7 +630,7 @@ namespace mongo {
if ( !_collection )
return;
Client::ReadContext ctx(_txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ Collection* collection = ctx.ctx().db()->getCollection( _txn, _ns );
invariant( _collection == collection );
_collection->cursorCache()->deregisterRunner( this );
}
@@ -699,12 +700,13 @@ namespace mongo {
bool _isAnotherMigrationActive;
};
- void logOpForSharding(const char * opstr,
+ void logOpForSharding(OperationContext* txn,
+ const char * opstr,
const char * ns,
const BSONObj& obj,
BSONObj * patt,
bool notInActiveChunk) {
- migrateFromStatus.logOp(opstr, ns, obj, patt, notInActiveChunk);
+ migrateFromStatus.logOp(txn, opstr, ns, obj, patt, notInActiveChunk);
}
class TransferModsCommand : public ChunkCommandHelper {
@@ -1628,7 +1630,7 @@ namespace mongo {
Client::WriteContext ctx(txn, ns );
// Only copy if ns doesn't already exist
Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
string system_namespaces = nsToDatabase(ns) + ".system.namespaces";
@@ -1759,7 +1761,7 @@ namespace mongo {
Client::WriteContext cx(txn, ns );
BSONObj localDoc;
- if ( willOverrideLocalId( cx.ctx().db(), o, &localDoc ) ) {
+ if ( willOverrideLocalId( txn, cx.ctx().db(), o, &localDoc ) ) {
string errMsg =
str::stream() << "cannot migrate chunk, local document "
<< localDoc
@@ -1957,7 +1959,7 @@ namespace mongo {
// do not apply deletes if they do not belong to the chunk being migrated
BSONObj fullObj;
- if ( Helpers::findById( cx.ctx().db(), ns.c_str(), id, fullObj ) ) {
+ if ( Helpers::findById( txn, cx.ctx().db(), ns.c_str(), id, fullObj ) ) {
if ( ! isInRange( fullObj , min , max , shardKeyPattern ) ) {
log() << "not applying out of range deletion: " << fullObj << migrateLog;
@@ -1991,7 +1993,7 @@ namespace mongo {
BSONObj it = i.next().Obj();
BSONObj localDoc;
- if ( willOverrideLocalId( cx.ctx().db(), it, &localDoc ) ) {
+ if ( willOverrideLocalId( txn, cx.ctx().db(), it, &localDoc ) ) {
string errMsg =
str::stream() << "cannot migrate chunk, local document "
<< localDoc
@@ -2021,10 +2023,10 @@ namespace mongo {
* Must be in WriteContext to avoid races and DBHelper errors.
* TODO: Could optimize this check out if sharding on _id.
*/
- bool willOverrideLocalId( Database* db, BSONObj remoteDoc, BSONObj* localDoc ) {
+ bool willOverrideLocalId( OperationContext* txn, Database* db, BSONObj remoteDoc, BSONObj* localDoc ) {
*localDoc = BSONObj();
- if ( Helpers::findById( db, ns.c_str(), remoteDoc, *localDoc ) ) {
+ if ( Helpers::findById( txn, db, ns.c_str(), remoteDoc, *localDoc ) ) {
return !isInRange( *localDoc , min , max , shardKeyPattern );
}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 7bb1d4d82d7..f45585293c7 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -120,7 +120,7 @@ namespace mongo {
}
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection ) {
errmsg = "ns not found";
return false;
@@ -276,7 +276,7 @@ namespace mongo {
{
// Get the size estimate for this namespace
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection ) {
errmsg = "ns not found";
return false;
@@ -859,7 +859,7 @@ namespace mongo {
for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj
Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
verify( collection );
// Allow multiKey based on the invariant that shard keys must be
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index f699e76585e..83382b6c1ff 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -301,8 +301,11 @@ public:
* NOTE: The "outfile" parameter passed in should actually represent a directory, but it is
* called "outfile" because we append the filename and use it as our output file.
*/
- void _repair( Database* db , string ns , boost::filesystem::path outfile ){
- Collection* collection = db->getCollection( ns );
+ void _repair(OperationContext* opCtx,
+ Database* db,
+ string ns,
+ boost::filesystem::path outfile) {
+ Collection* collection = db->getCollection(opCtx, ns);
toolInfoLog() << "nrecords: " << collection->numRecords()
<< " datasize: " << collection->dataSize()
<< std::endl;
@@ -363,7 +366,7 @@ public:
LogIndentLevel lil1;
try {
- _repair( db , ns , root );
+ _repair( &txn, db , ns , root );
}
catch ( DBException& e ){
toolError() << "ERROR recovering: " << ns << " " << e.toString() << std::endl;