summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2014-09-25 14:41:01 -0400
committerGeert Bosch <geert@mongodb.com>2014-09-25 18:12:34 -0400
commit962f959a09b63aa0482d7e0c9bad89363d1e1194 (patch)
tree975a9ff722ec19992c15feb9d3fd1587278afc3c
parent77b00970997d13d0758c745e5a94fc79982d4401 (diff)
downloadmongo-962f959a09b63aa0482d7e0c9bad89363d1e1194.tar.gz
SERVER-14668: Replace uses of DBWrite lock with DBLock
Make the lock mode explicit as preparation to move some to intent locks, and use proper database name instead of full namespace string to lock databases.
-rw-r--r--src/mongo/db/client.cpp2
-rw-r--r--src/mongo/db/client.h2
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/commands/apply_ops.cpp26
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/compact.cpp4
-rw-r--r--src/mongo/db/commands/copydb.cpp15
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp16
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp12
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp5
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h19
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp4
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/instance.cpp6
-rw-r--r--src/mongo/db/introspect.cpp9
-rw-r--r--src/mongo/db/repl/minvalid.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/repl_coordinator_external_state_impl.cpp6
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp4
-rw-r--r--src/mongo/dbtests/counttests.cpp5
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp8
-rw-r--r--src/mongo/dbtests/threadedtests.cpp8
-rw-r--r--src/mongo/s/d_merge.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp10
-rw-r--r--src/mongo/s/d_split.cpp10
-rw-r--r--src/mongo/s/d_state.cpp4
-rw-r--r--src/mongo/s/metadata_loader.h4
38 files changed, 123 insertions, 126 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 9c927222d67..779f21fe0e9 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -239,7 +239,7 @@ namespace mongo {
Client::WriteContext::WriteContext(
OperationContext* opCtx, const std::string& ns, bool doVersion)
- : _lk(opCtx->lockState(), ns),
+ : _lk(opCtx->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X),
_wunit(opCtx),
_c(opCtx, ns, doVersion) {
}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 71778e9f60c..31a1b272f20 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -204,7 +204,7 @@ namespace mongo {
Context& ctx() { return _c; }
private:
- Lock::DBWrite _lk;
+ Lock::DBLock _lk;
WriteUnitOfWork _wunit;
Context _c;
};
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 1686cfe2c10..67c76b071e9 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -308,15 +308,15 @@ namespace mongo {
bool logForRepl) {
const NamespaceString nss(ns);
- Lock::DBWrite dbWrite(txn->lockState(), nss.db());
+ const string dbname = nss.db().toString();
- const string dbName = nss.db().toString();
+ Lock::DBLock dbWrite(txn->lockState(), dbname, newlm::MODE_X);
bool unused;
- Database* db = dbHolder().getOrCreate(txn, dbName, unused);
+ Database* db = dbHolder().getOrCreate(txn, dbname, unused);
// config
- string temp = dbName + ".system.namespaces";
+ string temp = dbname + ".system.namespaces";
BSONObj config = _conn->findOne(temp , BSON("name" << ns));
if (config["options"].isABSONObj()) {
WriteUnitOfWork wunit(txn);
@@ -329,7 +329,7 @@ namespace mongo {
}
// main data
- copy(txn, dbName,
+ copy(txn, dbname,
nss, nss,
logForRepl, false, true, mayYield, mayBeInterrupted,
Query(query).snapshot());
@@ -340,7 +340,7 @@ namespace mongo {
}
// indexes
- copyIndexes(txn, dbName,
+ copyIndexes(txn, dbname,
NamespaceString(ns), NamespaceString(ns),
logForRepl, false, true, mayYield,
mayBeInterrupted);
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index c0a1f398e56..cc760412120 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -126,26 +126,26 @@ namespace mongo {
string ns = temp["ns"].String();
- // Run operations under a nested lock as a hack to prevent them from yielding.
+ // Run operations under a nested lock as a hack to prevent yielding.
//
- // The list of operations is supposed to be applied atomically; yielding would break
- // atomicity by allowing an interruption or a shutdown to occur after only some
- // operations are applied. We are already locked globally at this point, so taking
- // a DBWrite on the namespace creates a nested lock, and yields are disallowed for
- // operations that hold a nested lock.
+ // The list of operations is supposed to be applied atomically; yielding
+ // would break atomicity by allowing an interruption or a shutdown to occur
+ // after only some operations are applied. We are already locked globally
+ // at this point, so taking a DBLock on the namespace creates a nested lock,
+ // and yields are disallowed for operations that hold a nested lock.
//
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal commit
- // to happen with a subset of ops applied.
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
+ // commit to happen with a subset of ops applied.
// TODO figure out what to do about this.
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
invariant(txn->lockState()->isRecursive());
Client::Context ctx(txn, ns);
bool failed = repl::applyOperation_inlock(txn,
- ctx.db(),
- temp,
- false,
- alwaysUpsert);
+ ctx.db(),
+ temp,
+ false,
+ alwaysUpsert);
ab.append(!failed);
if ( failed )
errors++;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index a25deba9e32..0b73001e559 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -115,7 +115,7 @@ namespace mongo {
set<string> clonedColls;
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Cloner cloner;
bool rval = cloner.go(txn, dbname, from, opts, &clonedColls, errmsg);
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 2a4f60cee31..24439e82b9b 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -161,7 +161,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true );
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 5ed0ac16aca..8ffac0bff38 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -97,7 +97,7 @@ namespace mongo {
return false;
}
- NamespaceString ns(db,coll);
+ NamespaceString ns(db, coll);
if ( !ns.isNormal() ) {
errmsg = "bad namespace name";
return false;
@@ -144,7 +144,7 @@ namespace mongo {
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), db, newlm::MODE_X);
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 208d792c59e..58f733e02b0 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -182,15 +182,14 @@ namespace mongo {
cloner.setConnection(conn);
}
-
- // SERVER-4328 todo lock just the two db's not everything for the fromself case
- scoped_ptr<Lock::ScopedLock> lk( fromSelf ?
- static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite(txn->lockState())) :
- static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb)));
- if (!cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg )) {
- return false;
+ if (fromSelf) {
+ // SERVER-4328 todo lock just the two db's not everything for the fromself case
+ Lock::GlobalWrite lk(txn->lockState());
+ return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
}
- return true;
+
+ Lock::DBLock lk (txn->lockState(), todb, newlm::MODE_X);
+ return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
}
} cmdCopyDB;
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 884e43d0c29..b1c81ca3584 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -133,7 +133,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBWrite dbXLock(txn->lockState(), db);
+ Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
// The lock here is just to prevent concurrency, nothing will write.
Client::Context ctx(txn, db);
@@ -152,7 +152,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBWrite dbXLock(txn->lockState(), db);
+ Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, db);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 58791dea990..2276c15c67d 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -134,7 +134,7 @@ namespace mongo {
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, ns.ns(), false /* doVersion */ );
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index ba93d95bc54..122ed26cc9a 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -103,7 +103,7 @@ namespace mongo {
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder);
if (!ok) {
@@ -239,7 +239,7 @@ namespace mongo {
LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, toDeleteNs);
Collection* collection = ctx.db()->getCollection( txn, toDeleteNs );
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 4477e62bec0..0e05feb5a71 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -97,7 +97,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, ns);
return runNoDirectClient( txn, ns ,
@@ -137,7 +137,7 @@ namespace mongo {
BSONObjBuilder& result,
string& errmsg) {
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context cx(txn, ns);
@@ -335,7 +335,7 @@ namespace mongo {
}
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index e58e001a221..81ed57a285d 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -332,7 +332,9 @@ namespace mongo {
if (_useIncremental) {
// We don't want to log the deletion of incLong as it isn't replicated. While
// harmless, this would lead to a scary looking warning on the secondaries.
- Lock::DBWrite lk(_txn->lockState(), _config.incLong);
+ Lock::DBLock lk(_txn->lockState(),
+ nsToDatabaseSubstring(_config.incLong),
+ newlm::MODE_X);
if (Database* db = dbHolder().get(_txn, _config.incLong)) {
WriteUnitOfWork wunit(_txn);
db->dropCollection(_txn, _config.incLong);
@@ -591,9 +593,11 @@ namespace mongo {
op->setMessage("m/r: merge post processing",
"M/R Merge Post Processing Progress",
_safeCount(_db, _config.tempNamespace, BSONObj()));
- auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
- while ( cursor->more() ) {
- Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace);
+ auto_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
+ while (cursor->more()) {
+ Lock::DBLock lock(_txn->lockState(),
+ nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
+ newlm::MODE_X);
WriteUnitOfWork wunit(_txn);
BSONObj o = cursor->nextSafe();
Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
@@ -1110,7 +1114,9 @@ namespace mongo {
if ( ! _onDisk )
return;
- Lock::DBWrite kl(_txn->lockState(), _config.incLong);
+ Lock::DBLock kl(_txn->lockState(),
+ nsToDatabaseSubstring(_config.incLong),
+ newlm::MODE_X);
WriteUnitOfWork wunit(_txn);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 316d763babf..204be07b75b 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -64,7 +64,7 @@ namespace mongo {
string ns = dbname + "." + coll;
BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 8b21578ce89..f5e372359b0 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -696,7 +696,7 @@ namespace mongo {
/**
* Gets the lock-holding object. Only valid if hasLock().
*/
- Lock::DBWrite& getLock() { return *_writeLock; }
+ Lock::DBLock& getLock() { return *_writeLock; }
/**
* Gets the target collection for the batch operation. Value is undefined
@@ -721,7 +721,7 @@ namespace mongo {
bool _lockAndCheckImpl(WriteOpResult* result);
// Guard object for the write lock on the target database.
- scoped_ptr<Lock::DBWrite> _writeLock;
+ scoped_ptr<Lock::DBLock> _writeLock;
// Context object on the target database. Must appear after writeLock, so that it is
// destroyed in proper order.
@@ -919,7 +919,9 @@ namespace mongo {
}
invariant(!_context.get());
- _writeLock.reset(new Lock::DBWrite(txn->lockState(), request->getNS()));
+ _writeLock.reset(new Lock::DBLock(txn->lockState(),
+ nsToDatabase(request->getNS()),
+ newlm::MODE_X));
if (!checkIsMasterForDatabase(request->getNS(), result)) {
return false;
}
@@ -1124,7 +1126,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock(txn->lockState(), nsString.ns());
+ Lock::DBLock writeLock(txn->lockState(), nsString.db(), newlm::MODE_X);
///////////////////////////////////////////
if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
@@ -1179,7 +1181,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock(txn->lockState(), nss.ns());
+ Lock::DBLock writeLock(txn->lockState(), nss.db(), newlm::MODE_X);
///////////////////////////////////////////
// Check version once we're locked
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 15e108a53f2..6701159a420 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -313,7 +313,7 @@ namespace {
if (supportsDocLocking()) {
_lockState->lock(_id, mode);
}
- else if (isRead) {
+ else {
_lockState->lock(_id, isRead ? newlm::MODE_S : newlm::MODE_X);
}
}
@@ -322,9 +322,6 @@ namespace {
_lockState->unlock(_id);
}
- Lock::DBWrite::DBWrite(Locker* lockState, const StringData& dbOrNs) :
- DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_X) { }
-
Lock::DBRead::DBRead(Locker* lockState, const StringData& dbOrNs) :
DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_S) { }
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index d4f11bb28d4..a55eaa077f3 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -196,9 +196,10 @@ namespace mongo {
* MODE_S: shared read access to the collection, blocking any writers
* MODE_X: exclusive access to the collection, blocking all other readers and writers
*
- * An appropriate DBLock must already be held before locking a collection.
- * For storage engines that do not support document-level locking, MODE_IS will be
- * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ * An appropriate DBLock must already be held before locking a collection: it is an error,
+ * checked with a dassert(), to not have a suitable database lock before locking the
+ * collection. For storage engines that do not support document-level locking, MODE_IS
+ * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
*/
class CollectionLock : boost::noncopyable {
public:
@@ -210,18 +211,6 @@ namespace mongo {
};
/**
- * Exclusive database lock -- DEPRECATED, please transition to DBLock and collection locks
- *
- * Allows exclusive write access to the given database, blocking any other access.
- * Allows further (recursive) acquisition of database locks for this database in any mode.
- * Also acquires the global lock in intent-exclusive (IX) mode.
- */
- class DBWrite : public DBLock {
- public:
- DBWrite(Locker* lockState, const StringData& dbOrNs);
- };
-
- /**
* Shared database lock -- DEPRECATED, please transition to DBLock and collection locks
*
* Allows concurrent read access to the given database, blocking any writers.
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 3391eab52a7..ddc59b17bd7 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -133,10 +133,10 @@ namespace mongo {
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_S);
}
- TEST(DConcurrency, DBWriteTakesX) {
+ TEST(DConcurrency, DBLockTakesX) {
LockState ls;
- Lock::DBWrite dbWrite(&ls, "db");
+ Lock::DBLock dbWrite(&ls, "db", newlm::MODE_X);
const newlm::ResourceId resIdDb(newlm::RESOURCE_DATABASE, string("db"));
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_X);
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 2cd9cc893f5..134d0eeb16e 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -349,7 +349,7 @@ namespace mongo {
// Needs to be locked exclusively, because creates the system.profile collection
// in the local database.
//
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
@@ -405,7 +405,7 @@ namespace mongo {
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
@@ -461,7 +461,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, nsToDrop);
Database* db = ctx.db();
@@ -561,7 +561,7 @@ namespace mongo {
!options["capped"].trueValue() || options["size"].isNumber() ||
options.hasField("$nExtents"));
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
@@ -988,7 +988,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 0f804e890ee..9a86fe82da8 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -121,7 +121,7 @@ namespace mongo {
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- Lock::DBWrite lk(txn->lockState(), dbname);
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
// Make sure the collection is valid.
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index cbde00dbf04..8dd345de946 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -64,7 +64,7 @@ namespace {
// This write lock is held throughout the index building process
// for this namespace.
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
Collection* collection = ctx.db()->getCollection(txn, ns);
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index dd37aaf5b1e..2d8b189edc3 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -621,7 +621,7 @@ namespace mongo {
UpdateExecutor executor(&request, &op.debug());
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X);
Client::Context ctx(txn, ns );
UpdateResult res = executor.execute(ctx.db());
@@ -655,7 +655,7 @@ namespace mongo {
DeleteExecutor executor(&request);
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(txn->lockState(), ns.ns());
+ Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X);
Client::Context ctx(txn, ns);
long long n = executor.execute(ctx.db());
@@ -914,7 +914,7 @@ namespace mongo {
uassertStatusOK(status);
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsString.db(), newlm::MODE_X);
// CONCURRENCY TODO: is being read locked in big log sufficient here?
// writelock is used to synchronize stepdowns w/ writes
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index c2f11b34e6d..c84a1caafa0 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -135,10 +135,11 @@ namespace {
BufBuilder profileBufBuilder(1024);
try {
- // NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
- // we're sometimes inside the lock already
- Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
- if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) {
+ // NOTE: It's kind of weird that we lock the op's namespace, but have to for now
+ // since we're sometimes inside the lock already
+ const string dbname(nsToDatabase(currentOp.getNS()));
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ if (dbHolder().get(txn, dbname) != NULL) {
// We are ok with the profiling happening in a different WUOW from the actual op.
WriteUnitOfWork wunit(txn);
Client::Context cx(txn, currentOp.getNS(), false);
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index f2b0f4c189e..30f1d17c18d 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -49,14 +49,14 @@ namespace {
} // namespace
void clearInitialSyncFlag(OperationContext* txn) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
wunit.commit();
}
void setInitialSyncFlag(OperationContext* txn) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
wunit.commit();
@@ -73,7 +73,7 @@ namespace {
}
void setMinValid(OperationContext* ctx, OpTime ts) {
- Lock::DBWrite lk(ctx->lockState(), "local");
+ Lock::DBLock lk(ctx->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(ctx);
Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts)));
wunit.commit();
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 3f54075013c..f2a9eb30a64 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -120,7 +120,7 @@ namespace repl {
todo : make _logOpRS() call this so we don't repeat ourself?
*/
OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
// XXX soon this needs to be part of an outer WUOW not its own.
// We can't do this yet due to locking limitations.
WriteUnitOfWork wunit(txn);
@@ -244,7 +244,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk1(txn->lockState(), "local");
+ Lock::DBLock lk1(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
if ( strncmp(ns, "local.", 6) == 0 ) {
@@ -323,7 +323,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk(txn->lockState(), "local");
+ Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(txn);
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
diff --git a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
index 19eb17c0ebc..dd78ae482b4 100644
--- a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
@@ -55,7 +55,9 @@ namespace repl {
namespace {
// TODO: Change this to local.system.replset when we remove disable the hybrid coordinator.
const char configCollectionName[] = "local.new.replset";
+ const char configDatabaseName[] = "local";
const char meCollectionName[] = "local.me";
+ const char meDatabaseName[] = "local";
const char tsFieldName[] = "ts";
const char hashFieldName[] = "h";
} // namespace
@@ -83,7 +85,7 @@ namespace {
std::string myname = getHostName();
OID myRID;
{
- Lock::DBWrite lock(txn->lockState(), meCollectionName);
+ Lock::DBLock lock(txn->lockState(), meDatabaseName, newlm::MODE_X);
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
@@ -130,7 +132,7 @@ namespace {
OperationContext* txn,
const BSONObj& config) {
try {
- Lock::DBWrite dbWriteLock(txn->lockState(), configCollectionName);
+ Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, newlm::MODE_X);
Helpers::putSingleton(txn, configCollectionName, config);
return Status::OK();
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 5ab802640b9..a4d8f77045a 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -121,7 +121,7 @@ namespace repl {
options.syncIndexes = ! dataPass;
// Make database stable
- Lock::DBWrite dbWrite(txn->lockState(), db);
+ Lock::DBLock dbWrite(txn->lockState(), db, newlm::MODE_X);
if (!cloner.go(txn, db, host, options, NULL, err, &errCode)) {
sethbmsg(str::stream() << "initial sync: error while "
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 32c269bfb0a..d87d475056f 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -122,7 +122,7 @@ namespace repl {
lk.reset(new Lock::GlobalWrite(txn->lockState()));
} else {
// DB level lock for this operation
- lk.reset(new Lock::DBWrite(txn->lockState(), ns));
+ lk.reset(new Lock::DBLock(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X));
}
Client::Context ctx(txn, ns);
@@ -482,7 +482,7 @@ namespace {
OpTime lastOpTime;
{
OperationContextImpl txn; // XXX?
- Lock::DBWrite lk(txn.lockState(), "local");
+ Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X);
WriteUnitOfWork wunit(&txn);
while (!ops->empty()) {
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 71cb99539d0..fff985c5666 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -43,7 +43,8 @@ namespace CountTests {
class Base {
public:
- Base() : lk(_txn.lockState(), ns()), _wunit(&_txn), _context(&_txn, ns()) {
+ Base() : lk(_txn.lockState(), nsToDatabaseSubstring(ns()), newlm::MODE_X),
+ _wunit(&_txn), _context(&_txn, ns()) {
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
@@ -96,7 +97,7 @@ namespace CountTests {
OperationContextImpl _txn;
private:
- Lock::DBWrite lk;
+ Lock::DBLock lk;
WriteUnitOfWork _wunit;
Client::Context _context;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 4801b711a1c..70ab52c878c 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -62,7 +62,7 @@ namespace mongo {
{
// Remove _id range [_min, _max).
- Lock::DBWrite lk(txn.lockState(), ns);
+ Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, ns );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 72a27859efa..aa6d7828629 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -524,7 +524,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBWrite lk(txn.lockState(), dbName);
+ Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
bool justCreated;
Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
@@ -567,7 +567,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBWrite lk(txn.lockState(), dbName);
+ Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
bool justCreated;
Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index aaeaf6c6817..598dad993d1 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -239,7 +239,7 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
- Lock::DBWrite lk(_txn.lockState(), ns);
+ Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X);
WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
@@ -585,7 +585,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
- Lock::DBWrite lk(_txn.lockState(), ns);
+ Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X);
WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
@@ -1401,7 +1401,7 @@ namespace QueryTests {
public:
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
- _lk(_txn.lockState(), ns() ),
+ _lk(_txn.lockState(), "unittests", newlm::MODE_X),
_wunit( &_txn ),
_ctx(&_txn, ns()) {
}
@@ -1410,7 +1410,7 @@ namespace QueryTests {
}
private:
- Lock::DBWrite _lk;
+ Lock::DBLock _lk;
WriteUnitOfWork _wunit;
Client::Context _ctx;
};
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 797d1d72f6d..420c38f3d7f 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -199,7 +199,7 @@ namespace ThreadedTests {
Lock::DBRead x(&lockState, "local");
}
{
- Lock::DBWrite x(&lockState, "local");
+ Lock::DBLock x(&lockState, "local", newlm::MODE_X);
// No actual writing here, so no WriteUnitOfWork
if( sometimes ) {
Lock::TempRelease t(&lockState);
@@ -211,11 +211,11 @@ namespace ThreadedTests {
}
{
- Lock::DBWrite x(&lockState, "admin");
+ Lock::DBLock x(&lockState, "admin", newlm::MODE_X);
}
}
else if( q == 3 ) {
- Lock::DBWrite x(&lockState, "foo");
+ Lock::DBLock x(&lockState, "foo", newlm::MODE_X);
Lock::DBRead y(&lockState, "admin");
}
else if( q == 4 ) {
@@ -223,7 +223,7 @@ namespace ThreadedTests {
Lock::DBRead y(&lockState, "admin");
}
else {
- Lock::DBWrite w(&lockState, "foo");
+ Lock::DBLock w(&lockState, "foo", newlm::MODE_X);
{
Lock::TempRelease t(&lockState);
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index a314f2161c7..9d1e728ac4f 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -293,7 +293,7 @@ namespace mongo {
//
{
- Lock::DBWrite writeLk(txn->lockState(), nss.ns());
+ Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 7f76724609f..f3d73f2fa49 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1244,7 +1244,7 @@ namespace mongo {
myVersion.incMajor();
{
- Lock::DBWrite lk(txn->lockState(), ns );
+ Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
verify( myVersion > shardingState.getVersion( ns ) );
// bump the metadata's version up and "forget" about the chunk being moved
@@ -1658,7 +1658,7 @@ namespace mongo {
if ( getState() != DONE ) {
// Unprotect the range if needed/possible on unsuccessful TO migration
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
string errMsg;
if (!shardingState.forgetPending(txn, ns, min, max, epoch, &errMsg)) {
warning() << errMsg << endl;
@@ -1719,7 +1719,7 @@ namespace mongo {
}
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
Database* db = ctx.db();
Collection* collection = db->getCollection( txn, ns );
@@ -1804,7 +1804,7 @@ namespace mongo {
{
// Protect the range by noting that we're now starting a migration to it
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
if (!shardingState.notePending(txn, ns, min, max, epoch, &errmsg)) {
warning() << errmsg << endl;
setState(FAIL);
@@ -2104,7 +2104,7 @@ namespace mongo {
}
}
- Lock::DBWrite lk(txn->lockState(), ns);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
Client::Context ctx(txn, ns);
if (serverGlobalParams.moveParanoia) {
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index c5f6fe331a3..e718bf9db19 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -787,14 +787,14 @@ namespace mongo {
//
{
- Lock::DBWrite writeLk(txn->lockState(), ns);
+ Lock::DBLock writeLk(txn->lockState(), dbname, newlm::MODE_X);
- // NOTE: The newShardVersion resulting from this split is higher than any other
- // chunk version, so it's also implicitly the newCollVersion
+ // NOTE: The newShardVersion resulting from this split is higher than any
+ // other chunk version, so it's also implicitly the newCollVersion
ChunkVersion newShardVersion = collVersion;
- // Increment the minor version once, shardingState.splitChunk increments once per
- // split point (resulting in the correct final shard/collection version)
+ // Increment the minor version once, shardingState.splitChunk increments once
+ // per split point (resulting in the correct final shard/collection version)
// TODO: Revisit this interface, it's a bit clunky
newShardVersion.incMinor();
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 191c10b450a..809053fb56e 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -594,7 +594,7 @@ namespace mongo {
{
// DBLock needed since we're now potentially changing the metadata, and don't want
// reads/writes to be ongoing.
- Lock::DBWrite writeLk(txn->lockState(), ns );
+ Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
//
// Get the metadata now that the load has completed
@@ -1297,7 +1297,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- Lock::DBWrite dbXLock(txn->lockState(), dbname);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
Client::Context ctx(txn, dbname);
shardingState.appendInfo( result );
diff --git a/src/mongo/s/metadata_loader.h b/src/mongo/s/metadata_loader.h
index 744a0d0c25a..a058a1e5cfa 100644
--- a/src/mongo/s/metadata_loader.h
+++ b/src/mongo/s/metadata_loader.h
@@ -48,12 +48,12 @@ namespace mongo {
* CollectionMetadata has both persisted and volatile state (for now) - the persisted
* config server chunk state and the volatile pending state which is only tracked locally
* while a server is the primary. This requires a two-step loading process - the persisted
- * chunk state *cannot* be loaded in a DBWrite lock while the pending chunk state *must* be.
+ * chunk state *cannot* be loaded in a DBLock lock while the pending chunk state *must* be.
*
* Example usage:
* beforeMetadata = <get latest local metadata>;
* remoteMetadata = makeCollectionMetadata( beforeMetadata, remoteMetadata );
- * DBWrite lock( ns );
+ * DBLock lock(txn, dbname, newlm::MODE_X);
* afterMetadata = <get latest local metadata>;
* promotePendingChunks( afterMetadata, remoteMetadata );
*