diff options
author | Geert Bosch <geert@mongodb.com> | 2014-10-03 17:25:41 -0400 |
---|---|---|
committer | Geert Bosch <geert@mongodb.com> | 2014-10-03 17:25:41 -0400 |
commit | f2daf8addd4ef391c135a3da2a1472777405ba06 (patch) | |
tree | 0e53c98e5ca8a317cd90e75ba5c3cff66cae5ad1 | |
parent | 6d8ef4551dfb1e76b480602e8f1d6224656e5841 (diff) | |
download | mongo-f2daf8addd4ef391c135a3da2a1472777405ba06.tar.gz |
Revert "SERVER-14668: Replace uses of DBWrite lock with DBLock"
This reverts commit da599844c97ef6d290c03e073a9bafe41c1a914e.
Conflicts:
src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
src/mongo/dbtests/counttests.cpp
src/mongo/dbtests/querytests.cpp
37 files changed, 122 insertions, 119 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index 812e062ede9..17901be98de 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -238,7 +238,7 @@ namespace mongo { Client::WriteContext::WriteContext( OperationContext* opCtx, const std::string& ns, bool doVersion) - : _lk(opCtx->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X), + : _lk(opCtx->lockState(), ns), _wunit(opCtx), _c(opCtx, ns, doVersion) { } diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index 265b8439037..ef78f899154 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -204,7 +204,7 @@ namespace mongo { Context& ctx() { return _c; } private: - Lock::DBLock _lk; + Lock::DBWrite _lk; WriteUnitOfWork _wunit; Context _c; }; diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 67c76b071e9..1686cfe2c10 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -308,15 +308,15 @@ namespace mongo { bool logForRepl) { const NamespaceString nss(ns); - const string dbname = nss.db().toString(); + Lock::DBWrite dbWrite(txn->lockState(), nss.db()); - Lock::DBLock dbWrite(txn->lockState(), dbname, newlm::MODE_X); + const string dbName = nss.db().toString(); bool unused; - Database* db = dbHolder().getOrCreate(txn, dbname, unused); + Database* db = dbHolder().getOrCreate(txn, dbName, unused); // config - string temp = dbname + ".system.namespaces"; + string temp = dbName + ".system.namespaces"; BSONObj config = _conn->findOne(temp , BSON("name" << ns)); if (config["options"].isABSONObj()) { WriteUnitOfWork wunit(txn); @@ -329,7 +329,7 @@ namespace mongo { } // main data - copy(txn, dbname, + copy(txn, dbName, nss, nss, logForRepl, false, true, mayYield, mayBeInterrupted, Query(query).snapshot()); @@ -340,7 +340,7 @@ namespace mongo { } // indexes - copyIndexes(txn, dbname, + copyIndexes(txn, dbName, NamespaceString(ns), NamespaceString(ns), logForRepl, false, true, mayYield, mayBeInterrupted); diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp index cc760412120..c0a1f398e56 100644 --- a/src/mongo/db/commands/apply_ops.cpp +++ b/src/mongo/db/commands/apply_ops.cpp @@ -126,26 +126,26 @@ namespace mongo { string ns = temp["ns"].String(); - // Run operations under a nested lock as a hack to prevent yielding. + // Run operations under a nested lock as a hack to prevent them from yielding. // - // The list of operations is supposed to be applied atomically; yielding - // would break atomicity by allowing an interruption or a shutdown to occur - // after only some operations are applied. We are already locked globally - // at this point, so taking a DBLock on the namespace creates a nested lock, - // and yields are disallowed for operations that hold a nested lock. + // The list of operations is supposed to be applied atomically; yielding would break + // atomicity by allowing an interruption or a shutdown to occur after only some + // operations are applied. We are already locked globally at this point, so taking + // a DBWrite on the namespace creates a nested lock, and yields are disallowed for + // operations that hold a nested lock. // - // We do not have a wrapping WriteUnitOfWork so it is possible for a journal - // commit to happen with a subset of ops applied. + // We do not have a wrapping WriteUnitOfWork so it is possible for a journal commit + // to happen with a subset of ops applied. // TODO figure out what to do about this. - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); invariant(txn->lockState()->isRecursive()); Client::Context ctx(txn, ns); bool failed = repl::applyOperation_inlock(txn, - ctx.db(), - temp, - false, - alwaysUpsert); + ctx.db(), + temp, + false, + alwaysUpsert); ab.append(!failed); if ( failed ) errors++; diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp index 0b73001e559..a25deba9e32 100644 --- a/src/mongo/db/commands/clone.cpp +++ b/src/mongo/db/commands/clone.cpp @@ -115,7 +115,7 @@ namespace mongo { set<string> clonedColls; - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Cloner cloner; bool rval = cloner.go(txn, dbname, from, opts, &clonedColls, errmsg); diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index 24439e82b9b..2a4f60cee31 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -161,7 +161,7 @@ namespace mongo { return false; } - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, dbname); Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true ); diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp index 8ffac0bff38..5ed0ac16aca 100644 --- a/src/mongo/db/commands/compact.cpp +++ b/src/mongo/db/commands/compact.cpp @@ -97,7 +97,7 @@ namespace mongo { return false; } - NamespaceString ns(db, coll); + NamespaceString ns(db,coll); if ( !ns.isNormal() ) { errmsg = "bad namespace name"; return false; @@ -144,7 +144,7 @@ namespace mongo { compactOptions.validateDocuments = cmdObj["validate"].trueValue(); - Lock::DBLock lk(txn->lockState(), db, newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns.ns()); BackgroundOperation::assertNoBgOpInProgForNs(ns.ns()); Client::Context ctx(txn, ns); diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp index 58f733e02b0..208d792c59e 100644 --- a/src/mongo/db/commands/copydb.cpp +++ b/src/mongo/db/commands/copydb.cpp @@ -182,14 +182,15 @@ namespace mongo { cloner.setConnection(conn); } - if (fromSelf) { - // SERVER-4328 todo lock just the two db's not everything for the fromself case - Lock::GlobalWrite lk(txn->lockState()); - return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg); - } - Lock::DBLock lk (txn->lockState(), todb, newlm::MODE_X); - return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg); + // SERVER-4328 todo lock just the two db's not everything for the fromself case + scoped_ptr<Lock::ScopedLock> lk( fromSelf ? + static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite(txn->lockState())) : + static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb))); + if (!cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg )) { + return false; + } + return true; } } cmdCopyDB; diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp index b1c81ca3584..884e43d0c29 100644 --- a/src/mongo/db/commands/cpuprofile.cpp +++ b/src/mongo/db/commands/cpuprofile.cpp @@ -133,7 +133,7 @@ namespace mongo { std::string &errmsg, BSONObjBuilder &result, bool fromRepl ) { - Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), db); // The lock here is just to prevent concurrency, nothing will write. Client::Context ctx(txn, db); @@ -152,7 +152,7 @@ namespace mongo { std::string &errmsg, BSONObjBuilder &result, bool fromRepl ) { - Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), db); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, db); diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index 43ff03f26e4..58791dea990 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -134,7 +134,7 @@ namespace mongo { // now we know we have to create index(es) // Note: createIndexes command does not currently respect shard versioning. - Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns.ns()); Client::Context ctx(txn, ns.ns(), false /* doVersion */ ); Database* db = ctx.db(); diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index 122ed26cc9a..ba93d95bc54 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -103,7 +103,7 @@ namespace mongo { CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { } bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) { - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder); if (!ok) { @@ -239,7 +239,7 @@ namespace mongo { LOG(0) << "CMD: reIndex " << toDeleteNs << endl; - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 0e05feb5a71..4477e62bec0 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -97,7 +97,7 @@ namespace mongo { return false; } - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, ns); return runNoDirectClient( txn, ns , @@ -137,7 +137,7 @@ namespace mongo { BSONObjBuilder& result, string& errmsg) { - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); WriteUnitOfWork wunit(txn); Client::Context cx(txn, ns); @@ -335,7 +335,7 @@ namespace mongo { } } - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, ns); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 81ed57a285d..e58e001a221 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -332,9 +332,7 @@ namespace mongo { if (_useIncremental) { // We don't want to log the deletion of incLong as it isn't replicated. While // harmless, this would lead to a scary looking warning on the secondaries. - Lock::DBLock lk(_txn->lockState(), - nsToDatabaseSubstring(_config.incLong), - newlm::MODE_X); + Lock::DBWrite lk(_txn->lockState(), _config.incLong); if (Database* db = dbHolder().get(_txn, _config.incLong)) { WriteUnitOfWork wunit(_txn); db->dropCollection(_txn, _config.incLong); @@ -593,11 +591,9 @@ namespace mongo { op->setMessage("m/r: merge post processing", "M/R Merge Post Processing Progress", _safeCount(_db, _config.tempNamespace, BSONObj())); - auto_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj()); - while (cursor->more()) { - Lock::DBLock lock(_txn->lockState(), - nsToDatabaseSubstring(_config.outputOptions.finalNamespace), - newlm::MODE_X); + auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() ); + while ( cursor->more() ) { + Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace); WriteUnitOfWork wunit(_txn); BSONObj o = cursor->nextSafe(); Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o ); @@ -1114,9 +1110,7 @@ namespace mongo { if ( ! _onDisk ) return; - Lock::DBLock kl(_txn->lockState(), - nsToDatabaseSubstring(_config.incLong), - newlm::MODE_X); + Lock::DBWrite kl(_txn->lockState(), _config.incLong); WriteUnitOfWork wunit(_txn); for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) { diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 204be07b75b..316d763babf 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -64,7 +64,7 @@ namespace mongo { string ns = dbname + "." + coll; BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck(); - Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, ns ); Database* db = ctx.db(); diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index f5e372359b0..8b21578ce89 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -696,7 +696,7 @@ namespace mongo { /** * Gets the lock-holding object. Only valid if hasLock(). */ - Lock::DBLock& getLock() { return *_writeLock; } + Lock::DBWrite& getLock() { return *_writeLock; } /** * Gets the target collection for the batch operation. Value is undefined @@ -721,7 +721,7 @@ namespace mongo { bool _lockAndCheckImpl(WriteOpResult* result); // Guard object for the write lock on the target database. - scoped_ptr<Lock::DBLock> _writeLock; + scoped_ptr<Lock::DBWrite> _writeLock; // Context object on the target database. Must appear after writeLock, so that it is // destroyed in proper order. @@ -919,9 +919,7 @@ namespace mongo { } invariant(!_context.get()); - _writeLock.reset(new Lock::DBLock(txn->lockState(), - nsToDatabase(request->getNS()), - newlm::MODE_X)); + _writeLock.reset(new Lock::DBWrite(txn->lockState(), request->getNS())); if (!checkIsMasterForDatabase(request->getNS(), result)) { return false; } @@ -1126,7 +1124,7 @@ namespace mongo { } /////////////////////////////////////////// - Lock::DBLock writeLock(txn->lockState(), nsString.db(), newlm::MODE_X); + Lock::DBWrite writeLock(txn->lockState(), nsString.ns()); /////////////////////////////////////////// if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result)) @@ -1181,7 +1179,7 @@ namespace mongo { } /////////////////////////////////////////// - Lock::DBLock writeLock(txn->lockState(), nss.db(), newlm::MODE_X); + Lock::DBWrite writeLock(txn->lockState(), nss.ns()); /////////////////////////////////////////// // Check version once we're locked diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp index dc30d3fe405..bdd78c68f83 100644 --- a/src/mongo/db/concurrency/d_concurrency.cpp +++ b/src/mongo/db/concurrency/d_concurrency.cpp @@ -301,6 +301,9 @@ namespace mongo { _lockState->unlock(_id); } + Lock::DBWrite::DBWrite(Locker* lockState, const StringData& dbOrNs) : + DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_X) { } + Lock::DBRead::DBRead(Locker* lockState, const StringData& dbOrNs) : DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_S) { } diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h index a55eaa077f3..d4f11bb28d4 100644 --- a/src/mongo/db/concurrency/d_concurrency.h +++ b/src/mongo/db/concurrency/d_concurrency.h @@ -196,10 +196,9 @@ namespace mongo { * MODE_S: shared read access to the collection, blocking any writers * MODE_X: exclusive access to the collection, blocking all other readers and writers * - * An appropriate DBLock must already be held before locking a collection: it is an error, - * checked with a dassert(), to not have a suitable database lock before locking the - * collection. For storage engines that do not support document-level locking, MODE_IS - * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X. + * An appropriate DBLock must already be held before locking a collection. + * For storage engines that do not support document-level locking, MODE_IS will be + * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X. */ class CollectionLock : boost::noncopyable { public: @@ -211,6 +210,18 @@ namespace mongo { }; /** + * Exclusive database lock -- DEPRECATED, please transition to DBLock and collection locks + * + * Allows exclusive write access to the given database, blocking any other access. + * Allows further (recursive) acquisition of database locks for this database in any mode. + * Also acquires the global lock in intent-exclusive (IX) mode. + */ + class DBWrite : public DBLock { + public: + DBWrite(Locker* lockState, const StringData& dbOrNs); + }; + + /** * Shared database lock -- DEPRECATED, please transition to DBLock and collection locks * * Allows concurrent read access to the given database, blocking any writers. diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp index ddc59b17bd7..3391eab52a7 100644 --- a/src/mongo/db/concurrency/d_concurrency_test.cpp +++ b/src/mongo/db/concurrency/d_concurrency_test.cpp @@ -133,10 +133,10 @@ namespace mongo { ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_S); } - TEST(DConcurrency, DBLockTakesX) { + TEST(DConcurrency, DBWriteTakesX) { LockState ls; - Lock::DBLock dbWrite(&ls, "db", newlm::MODE_X); + Lock::DBWrite dbWrite(&ls, "db"); const newlm::ResourceId resIdDb(newlm::RESOURCE_DATABASE, string("db")); ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_X); diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp index 4f559c3c89a..31b0c8a8475 100644 --- a/src/mongo/db/dbcommands.cpp +++ b/src/mongo/db/dbcommands.cpp @@ -349,7 +349,7 @@ namespace mongo { // Needs to be locked exclusively, because creates the system.profile collection // in the local database. // - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, dbname); @@ -405,7 +405,7 @@ namespace mongo { // This doesn't look like it requires exclusive DB lock, because it uses its own diag // locking, but originally the lock was set to be WRITE, so preserving the behaviour. // - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, dbname); int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() ); @@ -461,7 +461,7 @@ namespace mongo { return false; } - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, nsToDrop); Database* db = ctx.db(); @@ -561,7 +561,7 @@ namespace mongo { !options["capped"].trueValue() || options["size"].isNumber() || options.hasField("$nExtents")); - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, ns); @@ -942,7 +942,7 @@ namespace mongo { bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { const string ns = dbname + "." + jsobj.firstElement().valuestr(); - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, ns ); diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index e2db4f3a120..04d181c00c0 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -121,7 +121,7 @@ namespace mongo { // TODO A write lock is currently taken here to accommodate stages that perform writes // (e.g. DeleteStage). This should be changed to use a read lock for read-only // execution trees. - Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), dbname); Client::Context ctx(txn, dbname); // Make sure the collection is valid. diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index 8dd345de946..cbde00dbf04 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -64,7 +64,7 @@ namespace { // This write lock is held throughout the index building process // for this namespace. - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); Client::Context ctx(txn, ns); Collection* collection = ctx.db()->getCollection(txn, ns); diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index e5ed820ca50..2ad8a6f94fd 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -621,7 +621,7 @@ namespace mongo { UpdateExecutor executor(&request, &op.debug()); uassertStatusOK(executor.prepare()); - Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns.ns()); Client::Context ctx(txn, ns ); UpdateResult res = executor.execute(ctx.db()); @@ -655,7 +655,7 @@ namespace mongo { DeleteExecutor executor(&request); uassertStatusOK(executor.prepare()); - Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns.ns()); Client::Context ctx(txn, ns); long long n = executor.execute(ctx.db()); @@ -914,7 +914,7 @@ namespace mongo { uassertStatusOK(status); } - Lock::DBLock lk(txn->lockState(), nsString.db(), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); // CONCURRENCY TODO: is being read locked in big log sufficient here? // writelock is used to synchronize stepdowns w/ writes diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index c84a1caafa0..c2f11b34e6d 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -135,11 +135,10 @@ namespace { BufBuilder profileBufBuilder(1024); try { - // NOTE: It's kind of weird that we lock the op's namespace, but have to for now - // since we're sometimes inside the lock already - const string dbname(nsToDatabase(currentOp.getNS())); - Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X); - if (dbHolder().get(txn, dbname) != NULL) { + // NOTE: It's kind of weird that we lock the op's namespace, but have to for now since + // we're sometimes inside the lock already + Lock::DBWrite lk(txn->lockState(), currentOp.getNS() ); + if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) { // We are ok with the profiling happening in a different WUOW from the actual op. WriteUnitOfWork wunit(txn); Client::Context cx(txn, currentOp.getNS(), false); diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp index 30f1d17c18d..f2b0f4c189e 100644 --- a/src/mongo/db/repl/minvalid.cpp +++ b/src/mongo/db/repl/minvalid.cpp @@ -49,14 +49,14 @@ namespace { } // namespace void clearInitialSyncFlag(OperationContext* txn) { - Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), "local"); WriteUnitOfWork wunit(txn); Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag)); wunit.commit(); } void setInitialSyncFlag(OperationContext* txn) { - Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), "local"); WriteUnitOfWork wunit(txn); Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag)); wunit.commit(); @@ -73,7 +73,7 @@ namespace { } void setMinValid(OperationContext* ctx, OpTime ts) { - Lock::DBLock lk(ctx->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(ctx->lockState(), "local"); WriteUnitOfWork wunit(ctx); Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts))); wunit.commit(); diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 12596e0ce63..47813751bee 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -120,7 +120,7 @@ namespace repl { todo : make _logOpRS() call this so we don't repeat ourself? */ OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) { - Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), "local"); // XXX soon this needs to be part of an outer WUOW not its own. // We can't do this yet due to locking limitations. WriteUnitOfWork wunit(txn); @@ -237,7 +237,7 @@ namespace repl { BSONObj *o2, bool *bb, bool fromMigrate ) { - Lock::DBLock lk1(txn->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk1(txn->lockState(), "local"); WriteUnitOfWork wunit(txn); if ( strncmp(ns, "local.", 6) == 0 ) { @@ -321,7 +321,7 @@ namespace repl { BSONObj *o2, bool *bb, bool fromMigrate ) { - Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), "local"); WriteUnitOfWork wunit(txn); static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor diff --git a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp index d6775cbf02f..e1d1b52be7b 100644 --- a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp @@ -57,9 +57,7 @@ namespace repl { namespace { const char configCollectionName[] = "local.system.replset"; - const char configDatabaseName[] = "local"; const char meCollectionName[] = "local.me"; - const char meDatabaseName[] = "local"; const char tsFieldName[] = "ts"; } // namespace @@ -96,7 +94,7 @@ namespace { std::string myname = getHostName(); OID myRID; { - Lock::DBLock lock(txn->lockState(), meDatabaseName, newlm::MODE_X); + Lock::DBWrite lock(txn->lockState(), meCollectionName); BSONObj me; // local.me is an identifier for a server for getLastError w:2+ @@ -143,7 +141,7 @@ namespace { OperationContext* txn, const BSONObj& config) { try { - Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, newlm::MODE_X); + Lock::DBWrite dbWriteLock(txn->lockState(), configCollectionName); Helpers::putSingleton(txn, configCollectionName, config); return Status::OK(); } diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp index 75b0bc6ecf9..74273a40ba2 100644 --- a/src/mongo/db/repl/rs_initialsync.cpp +++ b/src/mongo/db/repl/rs_initialsync.cpp @@ -84,7 +84,7 @@ namespace { options.syncIndexes = ! dataPass; // Make database stable - Lock::DBLock dbWrite(txn->lockState(), db, newlm::MODE_X); + Lock::DBWrite dbWrite(txn->lockState(), db); if (!cloner.go(txn, db, host, options, NULL, err, &errCode)) { log() << "initial sync: error while " diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index fceafc04b6e..df8c14db1e3 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -124,7 +124,7 @@ namespace repl { lk.reset(new Lock::GlobalWrite(txn->lockState())); } else { // DB level lock for this operation - lk.reset(new Lock::DBLock(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X)); + lk.reset(new Lock::DBWrite(txn->lockState(), ns)); } Client::Context ctx(txn, ns); @@ -330,7 +330,7 @@ namespace { BackgroundSync* bgsync = BackgroundSync::get(); if (bgsync->getInitialSyncRequestedFlag()) { // got a resync command - Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn.lockState(), "local"); WriteUnitOfWork wunit(&txn); Client::Context ctx(&txn, "local"); @@ -487,7 +487,7 @@ namespace { OpTime lastOpTime; { OperationContextImpl txn; // XXX? - Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X); + Lock::DBWrite lk(txn.lockState(), "local"); WriteUnitOfWork wunit(&txn); while (!ops->empty()) { diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp index b643c30b9c5..31dabc13a97 100644 --- a/src/mongo/dbtests/counttests.cpp +++ b/src/mongo/dbtests/counttests.cpp @@ -43,10 +43,9 @@ namespace CountTests { class Base { public: - Base() : _lk(_txn.lockState(), nsToDatabaseSubstring(ns()), newlm::MODE_X), - _context(&_txn, ns()), - _client(&_txn) { - + Base() : lk(_txn.lockState(), ns()), + _context(&_txn, ns()), + _client(&_txn) { _database = _context.db(); { WriteUnitOfWork wunit(&_txn); @@ -105,7 +104,7 @@ namespace CountTests { OperationContextImpl _txn; - Lock::DBLock _lk; + Lock::DBWrite lk; Client::Context _context; diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp index 70ab52c878c..4801b711a1c 100644 --- a/src/mongo/dbtests/dbhelper_tests.cpp +++ b/src/mongo/dbtests/dbhelper_tests.cpp @@ -62,7 +62,7 @@ namespace mongo { { // Remove _id range [_min, _max). - Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn.lockState(), ns); WriteUnitOfWork wunit(&txn); Client::Context ctx(&txn, ns ); diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp index aa6d7828629..72a27859efa 100644 --- a/src/mongo/dbtests/namespacetests.cpp +++ b/src/mongo/dbtests/namespacetests.cpp @@ -524,7 +524,7 @@ namespace NamespaceTests { OperationContextImpl txn; - Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X); + Lock::DBWrite lk(txn.lockState(), dbName); bool justCreated; Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated); @@ -567,7 +567,7 @@ namespace NamespaceTests { OperationContextImpl txn; - Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X); + Lock::DBWrite lk(txn.lockState(), dbName); bool justCreated; Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated); diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 5ea2f66f303..7140f489d73 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -250,7 +250,7 @@ namespace QueryTests { { // Check internal server handoff to getmore. - Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X); + Lock::DBWrite lk(_txn.lockState(), ns); WriteUnitOfWork wunit(&_txn); Client::Context ctx(&_txn, ns ); ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId ); @@ -596,7 +596,7 @@ namespace QueryTests { } void run() { const char *ns = "unittests.querytests.OplogReplaySlaveReadTill"; - Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X); + Lock::DBWrite lk(_txn.lockState(), ns); WriteUnitOfWork wunit(&_txn); Client::Context ctx(&_txn, ns ); @@ -1406,12 +1406,12 @@ namespace QueryTests { public: CollectionInternalBase( const char *nsLeaf ) : CollectionBase( nsLeaf ), - _lk(_txn.lockState(), "unittests", newlm::MODE_X), + _lk(_txn.lockState(), ns() ), _ctx(&_txn, ns()) { } private: - Lock::DBLock _lk; + Lock::DBWrite _lk; Client::Context _ctx; }; diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp index 62ab464fc98..47ec740bd30 100644 --- a/src/mongo/dbtests/threadedtests.cpp +++ b/src/mongo/dbtests/threadedtests.cpp @@ -187,7 +187,7 @@ namespace ThreadedTests { Lock::DBRead x(&lockState, "local"); } { - Lock::DBLock x(&lockState, "local", newlm::MODE_X); + Lock::DBWrite x(&lockState, "local"); // No actual writing here, so no WriteUnitOfWork if( sometimes ) { Lock::TempRelease t(&lockState); @@ -199,11 +199,11 @@ namespace ThreadedTests { } { - Lock::DBLock x(&lockState, "admin", newlm::MODE_X); + Lock::DBWrite x(&lockState, "admin"); } } else if( q == 3 ) { - Lock::DBLock x(&lockState, "foo", newlm::MODE_X); + Lock::DBWrite x(&lockState, "foo"); Lock::DBRead y(&lockState, "admin"); } else if( q == 4 ) { @@ -211,7 +211,7 @@ namespace ThreadedTests { Lock::DBRead y(&lockState, "admin"); } else { - Lock::DBLock w(&lockState, "foo", newlm::MODE_X); + Lock::DBWrite w(&lockState, "foo"); { Lock::TempRelease t(&lockState); diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp index bdfcda69b6b..ac2f415a047 100644 --- a/src/mongo/s/d_merge.cpp +++ b/src/mongo/s/d_merge.cpp @@ -293,7 +293,7 @@ namespace mongo { // { - Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X); + Lock::DBWrite writeLk(txn->lockState(), nss.ns()); shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion); } diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp index eaf59dea53c..6f8ba1b0bf0 100644 --- a/src/mongo/s/d_migrate.cpp +++ b/src/mongo/s/d_migrate.cpp @@ -1242,7 +1242,7 @@ namespace mongo { myVersion.incMajor(); { - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns ); verify( myVersion > shardingState.getVersion( ns ) ); // bump the metadata's version up and "forget" about the chunk being moved @@ -1656,7 +1656,7 @@ namespace mongo { if ( getState() != DONE ) { // Unprotect the range if needed/possible on unsuccessful TO migration - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); string errMsg; if (!shardingState.forgetPending(txn, ns, min, max, epoch, &errMsg)) { warning() << errMsg << endl; @@ -1714,7 +1714,7 @@ namespace mongo { indexSpecs.insert(indexSpecs.begin(), indexes.begin(), indexes.end()); } - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); Client::Context ctx(txn, ns); Database* db = ctx.db(); Collection* collection = db->getCollection( txn, ns ); @@ -1799,7 +1799,7 @@ namespace mongo { { // Protect the range by noting that we're now starting a migration to it - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); if (!shardingState.notePending(txn, ns, min, max, epoch, &errmsg)) { warning() << errmsg << endl; setState(FAIL); @@ -2099,7 +2099,7 @@ namespace mongo { } } - Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite lk(txn->lockState(), ns); Client::Context ctx(txn, ns); if (serverGlobalParams.moveParanoia) { diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp index e745899eaf3..721678695e5 100644 --- a/src/mongo/s/d_split.cpp +++ b/src/mongo/s/d_split.cpp @@ -787,7 +787,7 @@ namespace mongo { // { - Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite writeLk(txn->lockState(), ns); // NOTE: The newShardVersion resulting from this split is higher than any // other chunk version, so it's also implicitly the newCollVersion diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp index 71340b94288..24928c69400 100644 --- a/src/mongo/s/d_state.cpp +++ b/src/mongo/s/d_state.cpp @@ -595,7 +595,7 @@ namespace mongo { { // DBLock needed since we're now potentially changing the metadata, and don't want // reads/writes to be ongoing. - Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X); + Lock::DBWrite writeLk(txn->lockState(), ns ); // // Get the metadata now that the load has completed @@ -1298,7 +1298,7 @@ namespace mongo { } bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X); + Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, dbname); shardingState.appendInfo( result ); |