summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2014-10-07 12:13:21 -0400
committerGeert Bosch <geert@mongodb.com>2014-10-09 18:21:06 -0400
commit5ea4221f81e7af9272e4e687d516bb439ef29498 (patch)
tree8bfee151ed5a434935681e17a9f6e26a8c080a33 /src
parentff10cf8c748997338f51de2de61520e17518758c (diff)
downloadmongo-5ea4221f81e7af9272e4e687d516bb439ef29498.tar.gz
SERVER-14668: Collection level locking for update path
Temporarily disable collection locking for MMAPv1.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/client.cpp11
-rw-r--r--src/mongo/db/client.h14
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/commands/apply_ops.cpp26
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/compact.cpp4
-rw-r--r--src/mongo/db/commands/copydb.cpp15
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp18
-rw-r--r--src/mongo/db/commands/mr.cpp35
-rw-r--r--src/mongo/db/commands/test_commands.cpp8
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp18
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp15
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp20
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h19
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp4
-rw-r--r--src/mongo/db/concurrency/lock_mgr_new.cpp5
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/index_builder.cpp5
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/instance.cpp35
-rw-r--r--src/mongo/db/introspect.cpp9
-rw-r--r--src/mongo/db/ops/update_executor.cpp44
-rw-r--r--src/mongo/db/repl/master_slave.cpp5
-rw-r--r--src/mongo/db/repl/minvalid.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/repl_coordinator_external_state_impl.cpp6
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp7
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp6
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/dbtests/clienttests.cpp2
-rw-r--r--src/mongo/dbtests/counttests.cpp8
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp3
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp6
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp80
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp45
-rw-r--r--src/mongo/dbtests/threadedtests.cpp8
-rw-r--r--src/mongo/s/d_merge.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp10
-rw-r--r--src/mongo/s/d_split.cpp2
-rw-r--r--src/mongo/s/d_state.cpp4
55 files changed, 324 insertions, 279 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index d2f58534349..917003e4916 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -257,12 +257,13 @@ namespace mongo {
}
- Client::WriteContext::WriteContext(
- OperationContext* opCtx, const std::string& ns, bool doVersion)
- : _lk(opCtx->lockState(), ns),
+ Client::WriteContext::WriteContext(OperationContext* opCtx, const std::string& ns)
+ : _txn(opCtx),
+ _nss(ns),
+ _dblk(opCtx->lockState(), _nss.db(), newlm::MODE_IX),
+ _collk(opCtx->lockState(), ns, newlm::MODE_IX),
_wunit(opCtx),
- _c(opCtx, ns, doVersion) {
- }
+ _c(opCtx, ns) { }
void Client::WriteContext::commit() {
_wunit.commit();
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index a3eb0cee79e..99957f6359e 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -36,6 +36,7 @@
#pragma once
+#include "mongo/db/catalog/database.h"
#include "mongo/db/client_basic.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/lasterror.h"
@@ -180,15 +181,24 @@ namespace mongo {
class WriteContext : boost::noncopyable {
public:
- WriteContext(OperationContext* opCtx, const std::string& ns, bool doVersion = true);
+ WriteContext(OperationContext* opCtx, const std::string& ns);
/** Commit any writes done so far in this context. */
void commit();
+ Database* db() const { return _c.db(); }
+
+ Collection* getCollection() const {
+ return _c.db()->getCollection(_txn, _nss.ns());
+ }
+
Context& ctx() { return _c; }
private:
- Lock::DBWrite _lk;
+ OperationContext* _txn;
+ NamespaceString _nss;
+ Lock::DBLock _dblk;
+ Lock::CollectionLock _collk;
WriteUnitOfWork _wunit;
Context _c;
};
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index eb75db0a55c..109c0598711 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -306,14 +306,14 @@ namespace mongo {
bool logForRepl) {
const NamespaceString nss(ns);
- Lock::DBWrite dbWrite(txn->lockState(), nss.db());
+ const string dbname = nss.db().toString();
- const string dbName = nss.db().toString();
+ Lock::DBLock dbWrite(txn->lockState(), dbname, newlm::MODE_X);
- Database* db = dbHolder().openDb(txn, dbName);
+ Database* db = dbHolder().openDb(txn, dbname);
// config
- string temp = dbName + ".system.namespaces";
+ string temp = dbname + ".system.namespaces";
BSONObj config = _conn->findOne(temp , BSON("name" << ns));
if (config["options"].isABSONObj()) {
WriteUnitOfWork wunit(txn);
@@ -326,7 +326,7 @@ namespace mongo {
}
// main data
- copy(txn, dbName,
+ copy(txn, dbname,
nss, nss,
logForRepl, false, true, mayYield, mayBeInterrupted,
Query(query).snapshot());
@@ -337,7 +337,7 @@ namespace mongo {
}
// indexes
- copyIndexes(txn, dbName,
+ copyIndexes(txn, dbname,
NamespaceString(ns), NamespaceString(ns),
logForRepl, false, true, mayYield,
mayBeInterrupted);
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index c0a1f398e56..cc760412120 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -126,26 +126,26 @@ namespace mongo {
string ns = temp["ns"].String();
- // Run operations under a nested lock as a hack to prevent them from yielding.
+ // Run operations under a nested lock as a hack to prevent yielding.
//
- // The list of operations is supposed to be applied atomically; yielding would break
- // atomicity by allowing an interruption or a shutdown to occur after only some
- // operations are applied. We are already locked globally at this point, so taking
- // a DBWrite on the namespace creates a nested lock, and yields are disallowed for
- // operations that hold a nested lock.
+ // The list of operations is supposed to be applied atomically; yielding
+ // would break atomicity by allowing an interruption or a shutdown to occur
+ // after only some operations are applied. We are already locked globally
+ // at this point, so taking a DBLock on the namespace creates a nested lock,
+ // and yields are disallowed for operations that hold a nested lock.
//
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal commit
- // to happen with a subset of ops applied.
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
+ // commit to happen with a subset of ops applied.
// TODO figure out what to do about this.
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
invariant(txn->lockState()->isRecursive());
Client::Context ctx(txn, ns);
bool failed = repl::applyOperation_inlock(txn,
- ctx.db(),
- temp,
- false,
- alwaysUpsert);
+ ctx.db(),
+ temp,
+ false,
+ alwaysUpsert);
ab.append(!failed);
if ( failed )
errors++;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index a25deba9e32..0b73001e559 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -115,7 +115,7 @@ namespace mongo {
set<string> clonedColls;
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Cloner cloner;
bool rval = cloner.go(txn, dbname, from, opts, &clonedColls, errmsg);
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 2a4f60cee31..24439e82b9b 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -161,7 +161,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true );
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 5ed0ac16aca..8ffac0bff38 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -97,7 +97,7 @@ namespace mongo {
return false;
}
- NamespaceString ns(db,coll);
+ NamespaceString ns(db, coll);
if ( !ns.isNormal() ) {
errmsg = "bad namespace name";
return false;
@@ -144,7 +144,7 @@ namespace mongo {
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), db, newlm::MODE_X);
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 208d792c59e..58f733e02b0 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -182,15 +182,14 @@ namespace mongo {
cloner.setConnection(conn);
}
-
- // SERVER-4328 todo lock just the two db's not everything for the fromself case
- scoped_ptr<Lock::ScopedLock> lk( fromSelf ?
- static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite(txn->lockState())) :
- static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb)));
- if (!cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg )) {
- return false;
+ if (fromSelf) {
+ // SERVER-4328 todo lock just the two db's not everything for the fromself case
+ Lock::GlobalWrite lk(txn->lockState());
+ return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
}
- return true;
+
+ Lock::DBLock lk (txn->lockState(), todb, newlm::MODE_X);
+ return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
}
} cmdCopyDB;
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 884e43d0c29..b1c81ca3584 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -133,7 +133,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBWrite dbXLock(txn->lockState(), db);
+ Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
// The lock here is just to prevent concurrency, nothing will write.
Client::Context ctx(txn, db);
@@ -152,7 +152,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBWrite dbXLock(txn->lockState(), db);
+ Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, db);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 58791dea990..43ff03f26e4 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -134,7 +134,7 @@ namespace mongo {
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X);
Client::Context ctx(txn, ns.ns(), false /* doVersion */ );
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 26ea4d8df9c..937039070a4 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -104,7 +104,7 @@ namespace mongo {
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder);
if (!ok) {
@@ -240,7 +240,7 @@ namespace mongo {
LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, toDeleteNs);
Collection* collection = ctx.db()->getCollection( txn, toDeleteNs );
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 4477e62bec0..e8b308bbfdd 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -97,10 +97,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
- Client::Context ctx(txn, ns);
-
- return runNoDirectClient( txn, ns ,
+ return runNoDirectClient( txn, ns ,
query , fields , update ,
upsert , returnNew , remove ,
result , errmsg );
@@ -137,11 +134,8 @@ namespace mongo {
BSONObjBuilder& result,
string& errmsg) {
- Lock::DBWrite lk(txn->lockState(), ns);
- WriteUnitOfWork wunit(txn);
- Client::Context cx(txn, ns);
-
- Collection* collection = cx.db()->getCollection( txn, ns );
+ Client::WriteContext cx(txn, ns);
+ Collection* collection = cx.getCollection();
const WhereCallbackReal whereCallback = WhereCallbackReal(txn, StringData(ns));
@@ -265,7 +259,7 @@ namespace mongo {
if ( !collection ) {
// collection created by an upsert
- collection = cx.db()->getCollection( txn, ns );
+ collection = cx.getCollection();
}
LOG(3) << "update result: " << res ;
@@ -303,7 +297,7 @@ namespace mongo {
}
}
- wunit.commit();
+ cx.commit();
return true;
}
@@ -335,7 +329,7 @@ namespace mongo {
}
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index d5613aea554..0e88bc4e2a4 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -332,7 +332,9 @@ namespace mongo {
if (_useIncremental) {
// We don't want to log the deletion of incLong as it isn't replicated. While
// harmless, this would lead to a scary looking warning on the secondaries.
- Lock::DBWrite lk(_txn->lockState(), _config.incLong);
+ Lock::DBLock lk(_txn->lockState(),
+ nsToDatabaseSubstring(_config.incLong),
+ newlm::MODE_X);
if (Database* db = dbHolder().get(_txn, _config.incLong)) {
WriteUnitOfWork wunit(_txn);
db->dropCollection(_txn, _config.incLong);
@@ -356,13 +358,13 @@ namespace mongo {
// Create the inc collection and make sure we have index on "0" key.
// Intentionally not replicating the inc collection to secondaries.
Client::WriteContext incCtx(_txn, _config.incLong);
- Collection* incColl = incCtx.ctx().db()->getCollection( _txn, _config.incLong );
+ Collection* incColl = incCtx.getCollection();
invariant(!incColl);
CollectionOptions options;
options.setNoIdIndex();
options.temp = true;
- incColl = incCtx.ctx().db()->createCollection( _txn, _config.incLong, options );
+ incColl = incCtx.db()->createCollection(_txn, _config.incLong, options);
invariant(incColl);
BSONObj indexSpec = BSON( "key" << BSON( "0" << 1 ) << "ns" << _config.incLong
@@ -381,8 +383,7 @@ namespace mongo {
{
// copy indexes into temporary storage
Client::WriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
- Collection* finalColl =
- finalCtx.ctx().db()->getCollection(_txn, _config.outputOptions.finalNamespace);
+ Collection* const finalColl = finalCtx.getCollection();
if ( finalColl ) {
IndexCatalog::IndexIterator ii =
finalColl->getIndexCatalog()->getIndexIterator( _txn, true );
@@ -413,14 +414,12 @@ namespace mongo {
uassert(ErrorCodes::NotMaster, "no longer master",
repl::getGlobalReplicationCoordinator()->
canAcceptWritesForDatabase(nsToDatabase(_config.tempNamespace.c_str())));
- Collection* tempColl = tempCtx.ctx().db()->getCollection( _txn, _config.tempNamespace );
+ Collection* tempColl = tempCtx.getCollection();
invariant(!tempColl);
CollectionOptions options;
options.temp = true;
- tempColl = tempCtx.ctx().db()->createCollection(_txn,
- _config.tempNamespace,
- options);
+ tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
// Log the createCollection operation.
BSONObjBuilder b;
@@ -591,9 +590,11 @@ namespace mongo {
op->setMessage("m/r: merge post processing",
"M/R Merge Post Processing Progress",
_safeCount(_db, _config.tempNamespace, BSONObj()));
- auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
- while ( cursor->more() ) {
- Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace);
+ auto_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
+ while (cursor->more()) {
+ Lock::DBLock lock(_txn->lockState(),
+ nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
+ newlm::MODE_X);
WriteUnitOfWork wunit(_txn);
BSONObj o = cursor->nextSafe();
Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
@@ -662,7 +663,7 @@ namespace mongo {
uassert(ErrorCodes::NotMaster, "no longer master",
repl::getGlobalReplicationCoordinator()->
canAcceptWritesForDatabase(nsToDatabase(ns.c_str())));
- Collection* coll = getCollectionOrUassert(ctx.ctx().db(), ns);
+ Collection* coll = getCollectionOrUassert(ctx.db(), ns);
class BSONObjBuilder b;
if ( !o.hasField( "_id" ) ) {
@@ -685,7 +686,7 @@ namespace mongo {
verify( _onDisk );
Client::WriteContext ctx(_txn, _config.incLong );
- Collection* coll = getCollectionOrUassert(ctx.ctx().db(), _config.incLong);
+ Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
coll->insertDocument( _txn, o, true );
ctx.commit();
}
@@ -964,7 +965,7 @@ namespace mongo {
{
Client::WriteContext incCtx(_txn, _config.incLong );
- Collection* incColl = getCollectionOrUassert(incCtx.ctx().db(), _config.incLong );
+ Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong );
bool foundIndex = false;
IndexCatalog::IndexIterator ii =
@@ -1110,7 +1111,9 @@ namespace mongo {
if ( ! _onDisk )
return;
- Lock::DBWrite kl(_txn->lockState(), _config.incLong);
+ Lock::DBLock kl(_txn->lockState(),
+ nsToDatabaseSubstring(_config.incLong),
+ newlm::MODE_X);
WriteUnitOfWork wunit(_txn);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 316d763babf..9121971efb1 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -64,7 +64,7 @@ namespace mongo {
string ns = dbname + "." + coll;
BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
Database* db = ctx.db();
@@ -148,7 +148,7 @@ namespace mongo {
bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
Client::WriteContext ctx(txn, nss.ns() );
- Collection* collection = ctx.ctx().db()->getCollection( txn, nss.ns() );
+ Collection* collection = ctx.getCollection();
massert( 13417, "captrunc collection not found or empty", collection);
DiskLoc end;
@@ -198,8 +198,8 @@ namespace mongo {
NamespaceString nss( dbname, coll );
Client::WriteContext ctx(txn, nss.ns() );
- Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( txn, nss.ns() );
+ Database* db = ctx.db();
+ Collection* collection = ctx.getCollection();
massert( 13429, "emptycapped no such collection", collection );
std::vector<BSONObj> indexes = stopIndexBuilds(txn, db, cmdObj);
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 1e433e82e66..c9063c674cc 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -696,7 +696,7 @@ namespace mongo {
/**
* Gets the lock-holding object. Only valid if hasLock().
*/
- Lock::DBWrite& getLock() { return *_writeLock; }
+ Lock::DBLock& getLock() { return *_writeLock; }
/**
* Gets the target collection for the batch operation. Value is undefined
@@ -721,7 +721,7 @@ namespace mongo {
bool _lockAndCheckImpl(WriteOpResult* result);
// Guard object for the write lock on the target database.
- scoped_ptr<Lock::DBWrite> _writeLock;
+ scoped_ptr<Lock::DBLock> _writeLock;
// Context object on the target database. Must appear after writeLock, so that it is
// destroyed in proper order.
@@ -921,7 +921,9 @@ namespace mongo {
}
invariant(!_context.get());
- _writeLock.reset(new Lock::DBWrite(txn->lockState(), request->getNS()));
+ _writeLock.reset(new Lock::DBLock(txn->lockState(),
+ nsToDatabase(request->getNS()),
+ newlm::MODE_X));
if (!checkIsMasterForDatabase(request->getNS(), result)) {
return false;
}
@@ -1109,10 +1111,11 @@ namespace mongo {
WriteOpResult* result ) {
const NamespaceString nsString(updateItem.getRequest()->getNS());
+ const bool isMulti = updateItem.getUpdate()->getMulti();
UpdateRequest request(txn, nsString);
request.setQuery(updateItem.getUpdate()->getQuery());
request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
- request.setMulti(updateItem.getUpdate()->getMulti());
+ request.setMulti(isMulti);
request.setUpsert(updateItem.getUpdate()->getUpsert());
request.setUpdateOpLog(true);
UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
@@ -1126,7 +1129,10 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock(txn->lockState(), nsString.ns());
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), newlm::MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(),
+ nsString.ns(),
+ isMulti ? newlm::MODE_X : newlm::MODE_IX);
///////////////////////////////////////////
if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
@@ -1181,7 +1187,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock(txn->lockState(), nss.ns());
+ Lock::DBLock writeLock(txn->lockState(), nss.db(), newlm::MODE_X);
///////////////////////////////////////////
// Check version once we're locked
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index fb014322e4a..5ec2bd98aa9 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -31,6 +31,7 @@
#include "mongo/base/init.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/element.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/commands/write_commands/batch_executor.h"
#include "mongo/db/commands/write_commands/write_commands_common.h"
@@ -208,11 +209,12 @@ namespace mongo {
return prepStatus;
}
- // Explains of write commands are read-only, but we take a write lock so that timing
- // info is more accurate.
- Client::WriteContext ctx( txn, nsString );
+ // Explains of write commands are read-only, but we take an exclusive lock so
+ // that timing info is more accurate.
+ Lock::DBLock dlk(txn->lockState(), nsString.db(), newlm::MODE_X);
+ Client::Context ctx(txn, nsString);
- Status prepInLockStatus = updateExecutor.prepareInLock( ctx.ctx().db() );
+ Status prepInLockStatus = updateExecutor.prepareInLock(ctx.db());
if ( !prepInLockStatus.isOK() ) {
return prepInLockStatus;
}
@@ -244,9 +246,10 @@ namespace mongo {
// Explains of write commands are read-only, but we take a write lock so that timing
// info is more accurate.
- Client::WriteContext ctx( txn, nsString );
+ Lock::DBLock dlk(txn->lockState(), nsString.db(), newlm::MODE_X);
+ Client::Context ctx(txn, nsString);
- Status prepInLockStatus = deleteExecutor.prepareInLock( ctx.ctx().db() );
+ Status prepInLockStatus = deleteExecutor.prepareInLock(ctx.db());
if ( !prepInLockStatus.isOK()) {
return prepInLockStatus;
}
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 1e33f5246e7..f510513fe5f 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -258,12 +258,12 @@ namespace mongo {
TrackLockAcquireTime a(isRead ? 'r' : 'w');
_lockState->lockGlobal(isRead ? newlm::MODE_IS : newlm::MODE_IX);
-
- if (supportsDocLocking() || isRead) {
+ if (supportsDocLocking()) {
+ // SERVER-14668: Make this branch unconditional when MMAPv1 has coll. locking
_lockState->lock(_id, _mode);
}
else {
- _lockState->lock(_id, newlm::MODE_X);
+ _lockState->lock(_id, isRead ? newlm::MODE_S : newlm::MODE_X);
}
resetTime();
@@ -291,19 +291,19 @@ namespace mongo {
isRead ? newlm::MODE_IS : newlm::MODE_IX));
if (supportsDocLocking()) {
_lockState->lock(_id, mode);
- }
- else {
- _lockState->lock(_id, isRead ? newlm::MODE_S : newlm::MODE_X);
+ // SERVER-14668: add when MMAPv1 ready for collection-level locking
+ // else { _lockState->lock(_id, isRead ? newlm::MODE_S : newlm::MODE_X); }
+ invariant(isRead || !isRead); // artificial use to silence warning.
}
}
Lock::CollectionLock::~CollectionLock() {
- _lockState->unlock(_id);
+ if (supportsDocLocking()) {
+ // SERVER-14668: Make unconditional when MMAPv1 has collection-level locking
+ _lockState->unlock(_id);
+ }
}
- Lock::DBWrite::DBWrite(Locker* lockState, const StringData& dbOrNs) :
- DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_X) { }
-
Lock::DBRead::DBRead(Locker* lockState, const StringData& dbOrNs) :
DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_S) { }
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index d4f11bb28d4..a55eaa077f3 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -196,9 +196,10 @@ namespace mongo {
* MODE_S: shared read access to the collection, blocking any writers
* MODE_X: exclusive access to the collection, blocking all other readers and writers
*
- * An appropriate DBLock must already be held before locking a collection.
- * For storage engines that do not support document-level locking, MODE_IS will be
- * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ * An appropriate DBLock must already be held before locking a collection: it is an error,
+ * checked with a dassert(), to not have a suitable database lock before locking the
+ * collection. For storage engines that do not support document-level locking, MODE_IS
+ * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
*/
class CollectionLock : boost::noncopyable {
public:
@@ -210,18 +211,6 @@ namespace mongo {
};
/**
- * Exclusive database lock -- DEPRECATED, please transition to DBLock and collection locks
- *
- * Allows exclusive write access to the given database, blocking any other access.
- * Allows further (recursive) acquisition of database locks for this database in any mode.
- * Also acquires the global lock in intent-exclusive (IX) mode.
- */
- class DBWrite : public DBLock {
- public:
- DBWrite(Locker* lockState, const StringData& dbOrNs);
- };
-
- /**
* Shared database lock -- DEPRECATED, please transition to DBLock and collection locks
*
* Allows concurrent read access to the given database, blocking any writers.
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 3391eab52a7..ddc59b17bd7 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -133,10 +133,10 @@ namespace mongo {
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_S);
}
- TEST(DConcurrency, DBWriteTakesX) {
+ TEST(DConcurrency, DBLockTakesX) {
LockState ls;
- Lock::DBWrite dbWrite(&ls, "db");
+ Lock::DBLock dbWrite(&ls, "db", newlm::MODE_X);
const newlm::ResourceId resIdDb(newlm::RESOURCE_DATABASE, string("db"));
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_X);
diff --git a/src/mongo/db/concurrency/lock_mgr_new.cpp b/src/mongo/db/concurrency/lock_mgr_new.cpp
index 75465e0cf1d..8bb924c2ed5 100644
--- a/src/mongo/db/concurrency/lock_mgr_new.cpp
+++ b/src/mongo/db/concurrency/lock_mgr_new.cpp
@@ -168,9 +168,8 @@ namespace newlm {
LockManager::LockManager() : _noCheckForLeakedLocksTestOnly(false) {
- // TODO: Generate this based on the # of CPUs. For now, use 1 bucket to make debugging
- // easier.
- _numLockBuckets = 1;
+ // Have more buckets than CPUs to reduce contention on lock and caches
+ _numLockBuckets = 128;
_lockBuckets = new LockBucket[_numLockBuckets];
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 9f60fbcae5d..4b010e4ce9b 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -349,7 +349,7 @@ namespace mongo {
// Needs to be locked exclusively, because creates the system.profile collection
// in the local database.
//
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
BSONElement e = cmdObj.firstElement();
@@ -404,7 +404,7 @@ namespace mongo {
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
@@ -460,7 +460,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, nsToDrop);
Database* db = ctx.db();
@@ -560,7 +560,7 @@ namespace mongo {
!options["capped"].trueValue() || options["size"].isNumber() ||
options.hasField("$nExtents"));
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
@@ -951,7 +951,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index d8de49742fb..68f1998047c 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -364,7 +364,7 @@ namespace mongo {
// Scoping for write lock.
{
Client::WriteContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection )
break;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 04d181c00c0..e2db4f3a120 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -121,7 +121,7 @@ namespace mongo {
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- Lock::DBWrite lk(txn->lockState(), dbname);
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
// Make sure the collection is valid.
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 9e7e7c09312..737cb4d1196 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -70,7 +70,9 @@ namespace mongo {
txn.getCurOp()->reset(HostAndPort(), dbInsert);
NamespaceString ns(_index["ns"].String());
- Client::WriteContext ctx(&txn, ns.getSystemIndexesCollection());
+
+ Lock::DBLock dlk(txn.lockState(), ns.db(), newlm::MODE_X);
+ Client::Context ctx(&txn, ns.getSystemIndexesCollection());
Database* db = dbHolder().get(&txn, ns.db().toString());
@@ -78,7 +80,6 @@ namespace mongo {
if ( !status.isOK() ) {
log() << "IndexBuilder could not build index: " << status.toString();
}
- ctx.commit();
txn.getClient()->shutdown();
}
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 22d8e055382..fa494c5f220 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -64,7 +64,7 @@ namespace {
// This write lock is held throughout the index building process
// for this namespace.
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
Collection* collection = ctx.db()->getCollection(txn, ns);
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 2ad8a6f94fd..9634249582a 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -621,13 +621,30 @@ namespace mongo {
UpdateExecutor executor(&request, &op.debug());
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(txn->lockState(), ns.ns());
- Client::Context ctx(txn, ns );
-
- UpdateResult res = executor.execute(ctx.db());
+ {
+ // Tentatively take an intent lock, fix up if we need to create the collection
+ Lock::DBLock dbLock(txn->lockState(), ns.db(), newlm::MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), ns.ns(), newlm::MODE_IX);
+ Client::Context ctx(txn, ns);
+
+ // The common case: no implicit collection creation
+ if (!upsert || ctx.db()->getCollection(txn, ns) != NULL) {
+ UpdateResult res = executor.execute(ctx.db());
+
+ // for getlasterror
+ lastError.getSafe()->recordUpdate( res.existing , res.numMatched , res.upserted );
+ return;
+ }
+ }
- // for getlasterror
- lastError.getSafe()->recordUpdate( res.existing , res.numMatched , res.upserted );
+ // This is an upsert into a non-existing database, so need an exclusive lock
+ // to avoid deadlock
+ {
+ Lock::DBLock dbLock(txn->lockState(), ns.db(), newlm::MODE_X);
+ Client::Context ctx(txn, ns);
+ UpdateResult res = executor.execute(ctx.db());
+ lastError.getSafe()->recordUpdate( res.existing , res.numMatched , res.upserted );
+ }
}
void receivedDelete(OperationContext* txn, Message& m, CurOp& op) {
@@ -655,7 +672,8 @@ namespace mongo {
DeleteExecutor executor(&request);
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock dbLocklk(txn->lockState(), ns.db(), newlm::MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), ns.ns(), newlm::MODE_IX);
Client::Context ctx(txn, ns);
long long n = executor.execute(ctx.db());
@@ -914,7 +932,8 @@ namespace mongo {
uassertStatusOK(status);
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), newlm::MODE_X);
+ // TODO(SERVER-14668): Use IX coll lock except for system.indexes or non-existing
// CONCURRENCY TODO: is being read locked in big log sufficient here?
// writelock is used to synchronize stepdowns w/ writes
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index f3172fce6a4..aa6cf2eda1f 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -135,10 +135,11 @@ namespace {
BufBuilder profileBufBuilder(1024);
try {
- // NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
- // we're sometimes inside the lock already
- Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
- if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) {
+ // NOTE: It's kind of weird that we lock the op's namespace, but have to for now
+ // since we're sometimes inside the lock already
+ const string dbname(nsToDatabase(currentOp.getNS()));
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ if (dbHolder().get(txn, dbname) != NULL) {
// We are ok with the profiling happening in a different WUOW from the actual op.
WriteUnitOfWork wunit(txn);
Client::Context cx(txn, currentOp.getNS(), false);
diff --git a/src/mongo/db/ops/update_executor.cpp b/src/mongo/db/ops/update_executor.cpp
index 77d59ec3729..12bb3e89a73 100644
--- a/src/mongo/db/ops/update_executor.cpp
+++ b/src/mongo/db/ops/update_executor.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/ops/update_executor.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/exec/update.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_driver.h"
@@ -40,6 +41,7 @@
#include "mongo/db/query/get_executor.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -92,6 +94,8 @@ namespace mongo {
return _exec.get();
}
+ MONGO_FP_DECLARE(implicitCollectionCreationDelay);
+
Status UpdateExecutor::prepareInLock(Database* db) {
// If we have a non-NULL PlanExecutor, then we've already done the in-lock preparation.
if (_exec.get()) {
@@ -107,22 +111,32 @@ namespace mongo {
// The update stage does not create its own collection. As such, if the update is
// an upsert, create the collection that the update stage inserts into beforehand.
- if (_request->isUpsert()) {
- if (!collection) {
- OperationContext* const txn = _request->getOpCtx();
- WriteUnitOfWork wuow(txn);
- invariant(txn->lockState()->isWriteLocked());
- invariant(db->createCollection(txn, nsString.ns()));
-
- if (!_request->isFromReplication()) {
- repl::logOp(txn,
- "c",
- (db->name() + ".$cmd").c_str(),
- BSON("create" << (nsString.coll())));
- }
- wuow.commit();
- collection = db->getCollection(_request->getOpCtx(), nsString.ns());
+ if (!collection && _request->isUpsert()) {
+ OperationContext* const txn = _request->getOpCtx();
+
+ // Upgrade to an exclusive lock. While this may possibly lead to a deadlock,
+ // collection creation is rare and a retry will definitively succeed in this
+ // case. Add a fail point to allow reliably triggering the deadlock situation.
+
+ MONGO_FAIL_POINT_BLOCK(implicitCollectionCreationDelay, data) {
+ LOG(0) << "Sleeping for creation of collection " + nsString.ns();
+ sleepmillis(1000);
+ LOG(0) << "About to upgrade to exclusive lock on " + nsString.ns();
+ }
+
+ Lock::DBLock lk(txn->lockState(), nsString.db(), newlm::MODE_X);
+
+ WriteUnitOfWork wuow(txn);
+ invariant(db->createCollection(txn, nsString.ns()));
+
+ if (!_request->isFromReplication()) {
+ repl::logOp(txn,
+ "c",
+ (db->name() + ".$cmd").c_str(),
+ BSON("create" << (nsString.coll())));
}
+ wuow.commit();
+ collection = db->getCollection(_request->getOpCtx(), nsString.ns());
invariant(collection);
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 77a4b09e9d2..123d52bf344 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -168,7 +168,8 @@ namespace repl {
bool exists = Helpers::getSingleton(txn, "local.me", _me);
if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
- Client::WriteContext ctx(txn, "local");
+ Lock::DBLock dblk(txn->lockState(), "local", newlm::MODE_X);
+ WriteUnitOfWork wunit(txn);
// clean out local.me
Helpers::emptyCollection(txn, "local.me");
@@ -178,7 +179,7 @@ namespace repl {
b.append("host", myname);
_me = b.obj();
Helpers::putSingleton(txn, "local.me", _me);
- ctx.commit();
+ wunit.commit();
}
_me = _me.getOwned();
}
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index c1d4db3f221..47620adbb06 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -49,14 +49,14 @@ namespace {
} // namespace
void clearInitialSyncFlag(OperationContext* txn) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
wunit.commit();
}
void setInitialSyncFlag(OperationContext* txn) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
wunit.commit();
@@ -72,7 +72,7 @@ namespace {
}
void setMinValid(OperationContext* ctx, OpTime ts) {
- Lock::DBWrite lk(ctx->lockState(), "local");
+ Lock::DBLock lk(ctx->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(ctx);
Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts)));
wunit.commit();
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 47813751bee..12596e0ce63 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -120,7 +120,7 @@ namespace repl {
todo : make _logOpRS() call this so we don't repeat ourself?
*/
OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
// XXX soon this needs to be part of an outer WUOW not its own.
// We can't do this yet due to locking limitations.
WriteUnitOfWork wunit(txn);
@@ -237,7 +237,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk1(txn->lockState(), "local");
+ Lock::DBLock lk1(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
if ( strncmp(ns, "local.", 6) == 0 ) {
@@ -321,7 +321,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
diff --git a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
index dcbabb5e5a1..4d359bd023e 100644
--- a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
@@ -58,7 +58,9 @@ namespace repl {
namespace {
const char configCollectionName[] = "local.system.replset";
+ const char configDatabaseName[] = "local";
const char meCollectionName[] = "local.me";
+ const char meDatabaseName[] = "local";
const char tsFieldName[] = "ts";
} // namespace
@@ -100,7 +102,7 @@ namespace {
std::string myname = getHostName();
OID myRID;
{
- Lock::DBWrite lock(txn->lockState(), meCollectionName);
+ Lock::DBLock lock(txn->lockState(), meDatabaseName, newlm::MODE_X);
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
@@ -146,7 +148,7 @@ namespace {
OperationContext* txn,
const BSONObj& config) {
try {
- Lock::DBWrite dbWriteLock(txn->lockState(), configCollectionName);
+ Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, newlm::MODE_X);
Helpers::putSingleton(txn, configCollectionName, config);
return Status::OK();
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 6f4e58b14a9..0226547127d 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -84,7 +84,7 @@ namespace {
options.syncIndexes = ! dataPass;
// Make database stable
- Lock::DBWrite dbWrite(txn->lockState(), db);
+ Lock::DBLock dbWrite(txn->lockState(), db, newlm::MODE_X);
if (!cloner.go(txn, db, host, options, NULL, err, &errCode)) {
log() << "initial sync: error while "
@@ -280,7 +280,8 @@ namespace {
log() << "initial sync finishing up";
{
- Client::WriteContext cx(&txn, "local.");
+ AutoGetDb autodb(&txn, "local", newlm::MODE_X);
+ WriteUnitOfWork wunit(&txn);
OpTime lastOpTimeWritten(getGlobalReplicationCoordinator()->getMyLastOptime());
log() << "replSet set minValid=" << lastOpTimeWritten << rsLog;
@@ -291,7 +292,7 @@ namespace {
// Clear the initial sync flag.
clearInitialSyncFlag(&txn);
BackgroundSync::get()->setInitialSyncRequestedFlag(false);
- cx.commit();
+ wunit.commit();
}
// If we just cloned & there were no ops applied, we still want the primary to know where
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 6e642a70af6..28cb8c12384 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -78,7 +78,9 @@ namespace repl {
void SyncSourceFeedback::ensureMe(OperationContext* txn) {
string myname = getHostName();
{
- Client::WriteContext ctx(txn, "local");
+ Lock::DBLock dlk(txn->lockState(), "local", newlm::MODE_X);
+ WriteUnitOfWork wunit(txn);
+ Client::Context ctx(txn, "local");
// local.me is an identifier for a server for getLastError w:2+
if (!Helpers::getSingleton(txn, "local.me", _me) ||
@@ -95,7 +97,7 @@ namespace repl {
_me = b.obj();
Helpers::putSingleton(txn, "local.me", _me);
}
- ctx.commit();
+ wunit.commit();
// _me is used outside of a read lock, so we must copy it out of the mmap
_me = _me.getOwned();
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index bd15fa58ccb..683990a5dab 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -124,7 +124,7 @@ namespace repl {
lk.reset(new Lock::GlobalWrite(txn->lockState()));
} else {
// DB level lock for this operation
- lk.reset(new Lock::DBWrite(txn->lockState(), ns));
+ lk.reset(new Lock::DBLock(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X));
}
Client::Context ctx(txn, ns);
@@ -332,7 +332,7 @@ namespace {
BackgroundSync* bgsync = BackgroundSync::get();
if (bgsync->getInitialSyncRequestedFlag()) {
// got a resync command
- Lock::DBWrite lk(txn.lockState(), "local");
+ Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, "local");
@@ -500,7 +500,7 @@ namespace {
OpTime lastOpTime;
{
OperationContextImpl txn; // XXX?
- Lock::DBWrite lk(txn.lockState(), "local");
+ Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(&txn);
while (!ops->empty()) {
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 7ea062c0784..9d5ad64b787 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -121,7 +121,7 @@ namespace mongo {
const string ns = idx["ns"].String();
Client::WriteContext ctx(txn, ns );
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
// collection was dropped
continue;
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 039bf9f3e36..01584aed69d 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -142,7 +142,7 @@ namespace ClientTests {
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
- Collection* collection = ctx.ctx().db()->getCollection( &txn, ns() );
+ Collection* collection = ctx.getCollection();
ASSERT( collection );
IndexCatalog* indexCatalog = collection->getIndexCatalog();
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 31dabc13a97..4d0f87bb811 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -43,9 +43,9 @@ namespace CountTests {
class Base {
public:
- Base() : lk(_txn.lockState(), ns()),
- _context(&_txn, ns()),
- _client(&_txn) {
+ Base() : _lk(_txn.lockState(), nsToDatabaseSubstring(ns()), newlm::MODE_X),
+ _context(&_txn, ns()),
+ _client(&_txn) {
_database = _context.db();
{
WriteUnitOfWork wunit(&_txn);
@@ -104,7 +104,7 @@ namespace CountTests {
OperationContextImpl _txn;
- Lock::DBWrite lk;
+ Lock::DBLock _lk;
Client::Context _context;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 3706b5dac48..a49fb91bd9f 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -62,7 +62,7 @@ namespace mongo {
{
// Remove _id range [_min, _max).
- Lock::DBWrite lk(txn.lockState(), ns);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, ns );
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 1a17bfa7c8c..65ef3f1ee63 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -178,8 +178,7 @@ namespace DocumentSourceTests {
CanonicalQuery* cq;
uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
PlanExecutor* execBare;
- uassertStatusOK(getExecutor(&_opCtx, ctx.ctx().db()->getCollection(&_opCtx, ns),
- cq, &execBare));
+ uassertStatusOK(getExecutor(&_opCtx, ctx.getCollection(), cq, &execBare));
_exec.reset(execBare);
_exec->saveState();
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index d66633f07b7..a18488c542a 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -35,7 +35,7 @@ namespace IndexCatalogTests {
OperationContextImpl txn;
Client::WriteContext ctx(&txn, _ns);
- _db = ctx.ctx().db();
+ _db = ctx.db();
_coll = _db->createCollection(&txn, _ns);
_catalog = _coll->getIndexCatalog();
ctx.commit();
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 1e9e3a520b1..28d1d707289 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -65,7 +65,7 @@ namespace IndexUpdateTests {
getGlobalEnvironment()->unsetKillAllOperations();
}
Collection* collection() {
- return _ctx.ctx().db()->getCollection( &_txn, _ns );
+ return _ctx.getCollection();
}
protected:
// QUERY_MIGRATION
@@ -663,7 +663,9 @@ namespace IndexUpdateTests {
memcpy( infoRecord->data(), indexInfo.objdata(), indexInfo.objsize() );
addRecordToRecListInExtent( infoRecord, infoLoc );
- return new IndexCatalog::IndexBuildBlock( _ctx.ctx().db()->getCollection( _ns )->getIndexCatalog(), name, infoLoc );
+ return new IndexCatalog::IndexBuildBlock( _ctx.getCollection()->getIndexCatalog(),
+ name,
+ infoLoc );
}
};
#endif
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index a33f33ef06c..d755d355a82 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -524,7 +524,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBWrite lk(txn.lockState(), dbName);
+ Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
bool justCreated;
Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
@@ -567,7 +567,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBWrite lk(txn.lockState(), dbName);
+ Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
bool justCreated;
Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 8b77c8f4eb4..6687c4a47f3 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -191,7 +191,7 @@ namespace QueryPlanExecutor {
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* coll = ctx.getCollection();
scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
registerExec(exec.get());
@@ -271,7 +271,7 @@ namespace QueryPlanExecutor {
std::auto_ptr<WorkingSet> ws(new WorkingSet());
std::auto_ptr<PipelineProxyStage> proxy(
new PipelineProxyStage(pipeline, innerExec, ws.get()));
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* collection = ctx.getCollection();
boost::scoped_ptr<PlanExecutor> outerExec(
new PlanExecutor(&_txn, ws.release(), proxy.release(), collection));
@@ -343,7 +343,7 @@ namespace QueryPlanExecutor {
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* coll = ctx.getCollection();
scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
BSONObj objOut;
@@ -403,7 +403,7 @@ namespace QueryPlanExecutor {
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* coll = ctx.getCollection();
PlanExecutor* exec = makeCollScanExec(coll,filterObj);
// Make a client cursor from the runner.
@@ -412,7 +412,7 @@ namespace QueryPlanExecutor {
// There should be one cursor before invalidation,
// and zero cursors after invalidation.
ASSERT_EQUALS(1U, numCursors());
- ctx.ctx().db()->getCollection( &_txn, ns() )->cursorCache()->invalidateAll(false);
+ coll->cursorCache()->invalidateAll(false);
ASSERT_EQUALS(0U, numCursors());
ctx.commit();
}
@@ -428,7 +428,7 @@ namespace QueryPlanExecutor {
Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* collection = ctx.getCollection();
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
PlanExecutor* exec = makeCollScanExec(collection, filterObj);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 171e916b8c2..ddc255bfce0 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -157,8 +157,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -259,8 +259,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -345,8 +345,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -394,8 +394,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -446,8 +446,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -497,8 +497,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -558,8 +558,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -617,8 +617,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -676,8 +676,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -724,8 +724,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -776,8 +776,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -834,8 +834,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -897,8 +897,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1014,8 +1014,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1067,8 +1067,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1112,8 +1112,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1160,8 +1160,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1205,8 +1205,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1267,8 +1267,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
@@ -1320,8 +1320,8 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Database* db = ctx.ctx().db();
- Collection* coll = db->getCollection(&_txn, ns());
+ Database* db = ctx.db();
+ Collection* coll = ctx.getCollection();
if (!coll) {
coll = db->createCollection(&_txn, ns());
}
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 689ce3677e9..9cbf2dc22f7 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -246,7 +246,7 @@ namespace QueryStageCollectionScan {
void run() {
Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.ctx().db()->getCollection( &_txn, ns() );
+ Collection* coll = ctx.getCollection();
// Get the DiskLocs that would be returned by an in-order scan.
vector<DiskLoc> locs;
@@ -308,7 +308,7 @@ namespace QueryStageCollectionScan {
public:
void run() {
Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* coll = ctx.getCollection();
// Get the DiskLocs that would be returned by an in-order scan.
vector<DiskLoc> locs;
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 6c70aa1280a..8e18509342e 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -121,7 +121,7 @@ namespace QueryStageCount {
// Set up the count stage
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
verify(params.descriptor);
params.startKey = BSON("a" << 1);
params.startKeyInclusive = true;
@@ -155,7 +155,7 @@ namespace QueryStageCount {
// Set up the count stage
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 3);
params.startKeyInclusive = true;
params.endKey = BSON("" << 7);
@@ -188,7 +188,7 @@ namespace QueryStageCount {
// Set up the count stage
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 3);
params.startKeyInclusive = false;
params.endKey = BSON("" << 7);
@@ -217,7 +217,7 @@ namespace QueryStageCount {
// Set up count, and run
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 2);
params.startKeyInclusive = false;
params.endKey = BSON("" << 3);
@@ -247,7 +247,7 @@ namespace QueryStageCount {
// Set up count, and run
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 2);
params.startKeyInclusive = false;
params.endKey = BSON("" << 3);
@@ -278,7 +278,7 @@ namespace QueryStageCount {
// Set up count, and run
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 2);
params.startKeyInclusive = false;
params.endKey = BSON("" << 3);
@@ -602,7 +602,7 @@ namespace QueryStageCount {
// Set up count stage
CountScanParams params;
- params.descriptor = getIndex(ctx.ctx().db(), BSON("a" << 1));
+ params.descriptor = getIndex(ctx.db(), BSON("a" << 1));
params.startKey = BSON("" << 1);
params.startKeyInclusive = true;
params.endKey = BSON("" << 1);
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index b2e6c9db085..19ce4fe37cd 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -110,7 +110,7 @@ namespace QueryStageDelete {
void run() {
Client::WriteContext ctx(&_txn, ns());
- Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* coll = ctx.getCollection();
// Get the DiskLocs that would be returned by an in-order scan.
vector<DiskLoc> locs;
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 1d72dccca7d..ecc406b1be1 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -86,7 +86,7 @@ namespace QueryStageSubplan {
CanonicalQuery* cq;
ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &cq));
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ Collection* collection = ctx.getCollection();
// Get planner params.
QueryPlannerParams plannerParams;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index f092eb926a4..02e3bcaebd0 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -250,15 +250,13 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
- Lock::DBWrite lk(_txn.lockState(), ns);
- WriteUnitOfWork wunit(&_txn);
- Client::Context ctx(&_txn, ns );
- ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
+ Client::WriteContext ctx(&_txn, ns);
+ ClientCursorPin clientCursor( ctx.getCollection(), cursorId );
// pq doesn't exist if it's a runner inside of the clientcursor.
// ASSERT( clientCursor.c()->pq );
// ASSERT_EQUALS( 2, clientCursor.c()->pq->getNumToReturn() );
ASSERT_EQUALS( 2, clientCursor.c()->pos() );
- wunit.commit();
+ ctx.commit();
}
cursor = _client.getMore( ns, cursorId );
@@ -596,7 +594,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
- Lock::DBWrite lk(_txn.lockState(), ns);
+ Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X);
WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
@@ -1164,14 +1162,15 @@ namespace QueryTests {
}
void run() {
string err;
- Client::WriteContext ctx(&_txn, "unittests" );
+ Client::WriteContext ctx(&_txn, ns());
- // note that extents are always at least 4KB now - so this will get rounded up a bit.
- ASSERT( userCreateNS( &_txn, ctx.ctx().db(), ns(),
- fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() );
- for ( int i=0; i<200; i++ ) {
+ // note that extents are always at least 4KB now - so this will get rounded up
+ // a bit.
+ ASSERT( userCreateNS(&_txn, ctx.db(), ns(),
+ fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() );
+ for (int i = 0; i < 200; i++) {
insertNext();
- ASSERT( count() < 90 );
+ ASSERT(count() < 90);
}
int a = count();
@@ -1214,7 +1213,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx(&_txn, "unittests" );
+ Client::WriteContext ctx(&_txn, ns());
for ( int i=0; i<50; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1224,8 +1223,8 @@ namespace QueryTests {
ASSERT_EQUALS( 50 , count() );
BSONObj res;
- ASSERT( Helpers::findOne( &_txn, ctx.ctx().db()->getCollection( &_txn, ns() ),
- BSON( "_id" << 20 ) , res , true ) );
+ ASSERT( Helpers::findOne(&_txn, ctx.getCollection(),
+ BSON("_id" << 20) , res , true));
ASSERT_EQUALS( 40 , res["x"].numberInt() );
ASSERT( Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
@@ -1241,15 +1240,15 @@ namespace QueryTests {
{
Timer t;
for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findOne( &_txn, ctx.ctx().db()->getCollection(&_txn, ns()),
- BSON( "_id" << 20 ), res, true ) );
+ ASSERT( Helpers::findOne(&_txn, ctx.getCollection(),
+ BSON( "_id" << 20 ), res, true ) );
}
slow = t.micros();
}
{
Timer t;
for ( int i=0; i<n; i++ ) {
- ASSERT( Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) );
+ ASSERT( Helpers::findById(&_txn, ctx.db(), ns() , BSON( "_id" << 20 ) , res ) );
}
fast = t.micros();
}
@@ -1266,7 +1265,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx(&_txn, "unittests" );
+ Client::WriteContext ctx(&_txn, ns());
for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1278,7 +1277,7 @@ namespace QueryTests {
BSONObj res;
for ( int i=0; i<1000; i++ ) {
- bool found = Helpers::findById( &_txn, ctx.ctx().db(), ns() , BSON( "_id" << i ) , res );
+ bool found = Helpers::findById( &_txn, ctx.db(), ns() , BSON( "_id" << i ) , res );
ASSERT_EQUALS( i % 2 , int(found) );
}
@@ -1290,7 +1289,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx(&_txn, "unittests" );
+ Client::WriteContext ctx(&_txn, ns());
for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1406,12 +1405,12 @@ namespace QueryTests {
public:
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
- _lk(_txn.lockState(), ns() ),
+ _lk(_txn.lockState(), "unittests", newlm::MODE_X),
_ctx(&_txn, ns()) {
}
private:
- Lock::DBWrite _lk;
+ Lock::DBLock _lk;
Client::Context _ctx;
};
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 47ec740bd30..62ab464fc98 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -187,7 +187,7 @@ namespace ThreadedTests {
Lock::DBRead x(&lockState, "local");
}
{
- Lock::DBWrite x(&lockState, "local");
+ Lock::DBLock x(&lockState, "local", newlm::MODE_X);
// No actual writing here, so no WriteUnitOfWork
if( sometimes ) {
Lock::TempRelease t(&lockState);
@@ -199,11 +199,11 @@ namespace ThreadedTests {
}
{
- Lock::DBWrite x(&lockState, "admin");
+ Lock::DBLock x(&lockState, "admin", newlm::MODE_X);
}
}
else if( q == 3 ) {
- Lock::DBWrite x(&lockState, "foo");
+ Lock::DBLock x(&lockState, "foo", newlm::MODE_X);
Lock::DBRead y(&lockState, "admin");
}
else if( q == 4 ) {
@@ -211,7 +211,7 @@ namespace ThreadedTests {
Lock::DBRead y(&lockState, "admin");
}
else {
- Lock::DBWrite w(&lockState, "foo");
+ Lock::DBLock w(&lockState, "foo", newlm::MODE_X);
{
Lock::TempRelease t(&lockState);
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index ac2f415a047..bdfcda69b6b 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -293,7 +293,7 @@ namespace mongo {
//
{
- Lock::DBWrite writeLk(txn->lockState(), nss.ns());
+ Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 434dbce863d..d20b8cd2a61 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1242,7 +1242,7 @@ namespace mongo {
myVersion.incMajor();
{
- Lock::DBWrite lk(txn->lockState(), ns );
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
verify( myVersion > shardingState.getVersion( ns ) );
// bump the metadata's version up and "forget" about the chunk being moved
@@ -1656,7 +1656,7 @@ namespace mongo {
if ( getState() != DONE ) {
// Unprotect the range if needed/possible on unsuccessful TO migration
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
string errMsg;
if (!shardingState.forgetPending(txn, ns, min, max, epoch, &errMsg)) {
warning() << errMsg << endl;
@@ -1714,7 +1714,7 @@ namespace mongo {
indexSpecs.insert(indexSpecs.begin(), indexes.begin(), indexes.end());
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
Database* db = ctx.db();
Collection* collection = db->getCollection( txn, ns );
@@ -1799,7 +1799,7 @@ namespace mongo {
{
// Protect the range by noting that we're now starting a migration to it
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
if (!shardingState.notePending(txn, ns, min, max, epoch, &errmsg)) {
warning() << errmsg << endl;
setState(FAIL);
@@ -2099,7 +2099,7 @@ namespace mongo {
}
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
if (serverGlobalParams.moveParanoia) {
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 344e0bd2698..37c535e4eb9 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -787,7 +787,7 @@ namespace mongo {
//
{
- Lock::DBWrite writeLk(txn->lockState(), ns);
+ Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
// NOTE: The newShardVersion resulting from this split is higher than any
// other chunk version, so it's also implicitly the newCollVersion
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 93b3a19b62f..dcaa8c6a074 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -595,7 +595,7 @@ namespace mongo {
{
// DBLock needed since we're now potentially changing the metadata, and don't want
// reads/writes to be ongoing.
- Lock::DBWrite writeLk(txn->lockState(), ns );
+ Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
//
// Get the metadata now that the load has completed
@@ -1298,7 +1298,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
shardingState.appendInfo( result );