summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2014-12-10 17:15:45 -0500
committerGeert Bosch <geert@mongodb.com>2014-12-12 18:05:34 -0500
commite067fff4e3f3079d070ec168f32c24db9a51a944 (patch)
tree5ccdf8c4cbae815a7d485a30c4d2d5bfbdf3aaaa /src/mongo/db
parent2a148f717998b77199c3dd3d7d4e4e47eb1141ef (diff)
downloadmongo-e067fff4e3f3079d070ec168f32c24db9a51a944.tar.gz
SERVER-16493: Have Client own Locker to amortize initialization cost
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/client.cpp24
-rw-r--r--src/mongo/db/client.h13
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp50
-rw-r--r--src/mongo/db/concurrency/deadlock_detection_test.cpp24
-rw-r--r--src/mongo/db/concurrency/lock_mgr_new_test.cpp94
-rw-r--r--src/mongo/db/concurrency/lock_mgr_test_help.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp18
-rw-r--r--src/mongo/db/concurrency/lock_state.h4
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp24
-rw-r--r--src/mongo/db/concurrency/locker.h4
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/operation_context.h4
-rw-r--r--src/mongo/db/operation_context_impl.cpp24
-rw-r--r--src/mongo/db/operation_context_impl.h2
-rw-r--r--src/mongo/db/repl/operation_context_repl_mock.cpp9
15 files changed, 164 insertions, 134 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index f7722b7dbec..1b4efbb5281 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -51,8 +51,10 @@
#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/curop.h"
#include "mongo/db/dbwebserver.h"
+#include "mongo/db/global_environment_experiment.h"
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
@@ -107,6 +109,17 @@ namespace mongo {
clients.insert(client);
}
+namespace {
+ // Create an appropriate new locker for the storage engine in use. Caller owns.
+ Locker* newLocker() {
+ if (isMMAPV1()) {
+ return new MMAPV1LockerImpl();
+ }
+
+ return new LockerImpl<false>();
+ }
+}
+
Client::Client(const string& desc, AbstractMessagingPort *p)
: ClientBasic(p),
_desc(desc),
@@ -114,6 +127,7 @@ namespace mongo {
_connectionId(p ? p->connectionId() : 0),
_god(0),
_txn(NULL),
+ _locker(newLocker()),
_lastOp(0),
_shutdown(false) {
@@ -335,13 +349,19 @@ namespace mongo {
}
void Client::setOperationContext(OperationContext* txn) {
- // We can only set or unset the OperationContexts, never swap them.
- invariant((txn == NULL) ^ (_txn == NULL));
+ // We can only set the OperationContext once before resetting it.
+ invariant(txn != NULL && _txn == NULL);
boost::unique_lock<SpinLock> uniqueLock(_lock);
_txn = txn;
}
+ void Client::resetOperationContext() {
+ invariant(_txn != NULL);
+ boost::unique_lock<SpinLock> uniqueLock(_lock);
+ _txn = NULL;
+ }
+
string Client::clientAddress(bool includePort) const {
if( _curOp )
return _curOp->getRemoteString(includePort);
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index ff35e4394d1..3703d555ff2 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -58,6 +58,7 @@ namespace mongo {
class Client;
class Collection;
class AbstractMessagingPort;
+ class Locker;
TSP_DECLARE(Client, currentClient)
@@ -203,18 +204,22 @@ namespace mongo {
void setLastOp( OpTime op ) { _lastOp = op; }
OpTime getLastOp() const { return _lastOp; }
+ // Return a reference to the Locker for this client. Client retains ownership.
+ Locker* getLocker() const { return _locker.get(); }
+
/* report what the last operation was. used by getlasterror */
void appendLastOp( BSONObjBuilder& b ) const;
void reportState(BSONObjBuilder& builder);
- // Ensures stability of the client. When the client is locked, it is safe to access its
- // contents. For example, the OperationContext will not disappear.
+ // Ensures stability of the client's OperationContext. When the client is locked,
+ // the OperationContext will not disappear.
void lock() { _lock.lock(); }
void unlock() { _lock.unlock(); }
// Changes the currently active operation context on this client. There can only be one
// active OperationContext at a time.
void setOperationContext(OperationContext* txn);
+ void resetOperationContext();
const OperationContext* getOperationContext() const { return _txn; }
// TODO(spencer): SERVER-10228 SERVER-14779 Remove this/move it fully into OperationContext.
@@ -251,6 +256,10 @@ namespace mongo {
// Changes, based on what operation is running. Some of this should be in OperationContext.
CurOp* _curOp;
+ // By having Client, rather than the OperationContext, own the Locker, setup cost such as
+ // allocating OS resources can be amortized over multiple operations.
+ boost::scoped_ptr<Locker> const _locker;
+
// Used by replication
OpTime _lastOp;
OID _remoteId; // Only used by master-slave
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index b874a1c1b4d..d6e65360ef4 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -37,19 +37,19 @@
namespace mongo {
TEST(DConcurrency, GlobalRead) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalRead globalRead(&ls);
ASSERT(ls.isR());
}
TEST(DConcurrency, GlobalWrite) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
ASSERT(ls.isW());
}
TEST(DConcurrency, GlobalWriteAndGlobalRead) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
ASSERT(ls.isW());
@@ -63,71 +63,71 @@ namespace mongo {
}
TEST(DConcurrency, readlocktryTimeout) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
writelocktry globalWrite(&ls, 0);
ASSERT(globalWrite.got());
{
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
readlocktry lockTry(&lsTry, 1);
ASSERT(!lockTry.got());
}
}
TEST(DConcurrency, writelocktryTimeout) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
writelocktry globalWrite(&ls, 0);
ASSERT(globalWrite.got());
{
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
writelocktry lockTry(&lsTry, 1);
ASSERT(!lockTry.got());
}
}
TEST(DConcurrency, readlocktryNoTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
readlocktry lockTry(&lsTry, 1);
ASSERT(lockTry.got());
}
TEST(DConcurrency, writelocktryTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
writelocktry lockTry(&lsTry, 1);
ASSERT(!lockTry.got());
}
TEST(DConcurrency, readlocktryTimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
readlocktry lockTry(&lsTry, 1);
ASSERT(!lockTry.got());
}
TEST(DConcurrency, writelocktryTimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
- MMAPV1LockerImpl lsTry(2);
+ MMAPV1LockerImpl lsTry;
writelocktry lockTry(&lsTry, 1);
ASSERT(!lockTry.got());
}
TEST(DConcurrency, TempReleaseGlobalWrite) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
{
@@ -139,7 +139,7 @@ namespace mongo {
}
TEST(DConcurrency, TempReleaseRecursive) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::GlobalWrite globalWrite(&ls);
Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
@@ -153,7 +153,7 @@ namespace mongo {
}
TEST(DConcurrency, DBLockTakesS) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbRead(&ls, "db", MODE_S);
@@ -162,7 +162,7 @@ namespace mongo {
}
TEST(DConcurrency, DBLockTakesX) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbWrite(&ls, "db", MODE_X);
@@ -171,7 +171,7 @@ namespace mongo {
}
TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock r1(&ls, "db1", MODE_X);
Lock::DBLock r2(&ls, "db1", MODE_X);
@@ -180,7 +180,7 @@ namespace mongo {
}
TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock r1(&ls, "db1", MODE_X);
Lock::DBLock r2(&ls, "db1", MODE_S);
@@ -192,7 +192,7 @@ namespace mongo {
TEST(DConcurrency, IsDbLockedForSMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbLock(&ls, dbName, MODE_S);
@@ -205,7 +205,7 @@ namespace mongo {
TEST(DConcurrency, IsDbLockedForXMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbLock(&ls, dbName, MODE_X);
@@ -218,7 +218,7 @@ namespace mongo {
TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbLock(&ls, "db1", MODE_IS);
@@ -247,7 +247,7 @@ namespace mongo {
TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls(1);
+ MMAPV1LockerImpl ls;
Lock::DBLock dbLock(&ls, "db1", MODE_IX);
diff --git a/src/mongo/db/concurrency/deadlock_detection_test.cpp b/src/mongo/db/concurrency/deadlock_detection_test.cpp
index d9c1f0ed69e..526405d9cdb 100644
--- a/src/mongo/db/concurrency/deadlock_detection_test.cpp
+++ b/src/mongo/db/concurrency/deadlock_detection_test.cpp
@@ -34,8 +34,8 @@ namespace mongo {
TEST(Deadlock, NoDeadlock) {
const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(0);
- LockerForTests locker2(1);
+ LockerForTests locker1;
+ LockerForTests locker2;
ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
@@ -51,8 +51,8 @@ namespace mongo {
const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(1);
- LockerForTests locker2(2);
+ LockerForTests locker1;
+ LockerForTests locker2;
ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
@@ -77,8 +77,8 @@ namespace mongo {
TEST(Deadlock, SimpleUpgrade) {
const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(1);
- LockerForTests locker2(2);
+ LockerForTests locker1;
+ LockerForTests locker2;
// Both acquire lock in intent mode
ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
@@ -103,9 +103,9 @@ namespace mongo {
const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(1);
- LockerForTests locker2(2);
- LockerForTests lockerIndirect(3);
+ LockerForTests locker1;
+ LockerForTests locker2;
+ LockerForTests lockerIndirect;
ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
@@ -138,9 +138,9 @@ namespace mongo {
const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
const ResourceId resIdDb(RESOURCE_DATABASE, 2);
- LockerForTests flush(1);
- LockerForTests reader(2);
- LockerForTests writer(3);
+ LockerForTests flush;
+ LockerForTests reader;
+ LockerForTests writer;
// This sequence simulates the deadlock which occurs during flush
ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
diff --git a/src/mongo/db/concurrency/lock_mgr_new_test.cpp b/src/mongo/db/concurrency/lock_mgr_new_test.cpp
index cbca40450c6..e540321beea 100644
--- a/src/mongo/db/concurrency/lock_mgr_new_test.cpp
+++ b/src/mongo/db/concurrency/lock_mgr_new_test.cpp
@@ -83,7 +83,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request;
@@ -102,7 +102,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -137,7 +137,7 @@ namespace mongo {
boost::scoped_ptr<MMAPV1LockerImpl> locker[6];
for (int i = 0; i < 6; i++) {
- locker[i].reset(new MMAPV1LockerImpl(i));
+ locker[i].reset(new MMAPV1LockerImpl());
}
TrackingLockGrantNotification notify[6];
@@ -168,7 +168,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
@@ -196,7 +196,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
@@ -224,7 +224,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
@@ -252,7 +252,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
@@ -280,8 +280,8 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -319,7 +319,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -353,10 +353,10 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
TrackingLockGrantNotification notify1;
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
TrackingLockGrantNotification notify2;
LockRequest request1;
@@ -386,7 +386,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -419,8 +419,8 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -454,8 +454,8 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -489,7 +489,7 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[3];
@@ -521,11 +521,11 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
LockRequestCombo request1(&locker1);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
LockRequestCombo request2(&locker2);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
@@ -542,11 +542,11 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
LockRequestCombo request1(&locker1);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
LockRequestCombo request2(&locker2);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
@@ -568,14 +568,14 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl lockerExisting(1);
+ MMAPV1LockerImpl lockerExisting;
TrackingLockGrantNotification notifyExisting;
LockRequest requestExisting;
requestExisting.initNew(&lockerExisting, &notifyExisting);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
- MMAPV1LockerImpl lockerNew(1);
+ MMAPV1LockerImpl lockerNew;
TrackingLockGrantNotification notifyNew;
LockRequest requestNew;
requestNew.initNew(&lockerNew, &notifyNew);
@@ -618,19 +618,19 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl lockerX(1);
+ MMAPV1LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
// The subsequent request will block
- MMAPV1LockerImpl lockerLow(2);
+ MMAPV1LockerImpl lockerLow;
LockRequestCombo requestLow(&lockerLow);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
// This is a "queue jumping request", which will go before locker 2 above
- MMAPV1LockerImpl lockerHi(2);
+ MMAPV1LockerImpl lockerHi;
LockRequestCombo requestHi(&lockerHi);
requestHi.enqueueAtFront = true;
@@ -656,14 +656,14 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
LockRequestCombo request1(&locker1);
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
LockRequestCombo request2(&locker2);
request2.compatibleFirst = true;
- MMAPV1LockerImpl locker3(3);
+ MMAPV1LockerImpl locker3;
LockRequestCombo request3(&locker3);
// Lock all in IS mode
@@ -672,14 +672,14 @@ namespace mongo {
ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
// Now an exclusive mode comes, which would block
- MMAPV1LockerImpl lockerX(4);
+ MMAPV1LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
// If an S comes, it should be granted, because of request2
{
- MMAPV1LockerImpl lockerS(5);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -690,7 +690,7 @@ namespace mongo {
// If S comes again, it should be granted, because of request2 still there
{
- MMAPV1LockerImpl lockerS(6);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -700,7 +700,7 @@ namespace mongo {
ASSERT(lockMgr.unlock(&request2));
{
- MMAPV1LockerImpl lockerS(7);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -715,18 +715,18 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl lockerXInitial(1);
+ MMAPV1LockerImpl lockerXInitial;
LockRequestCombo requestXInitial(&lockerXInitial);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
- MMAPV1LockerImpl locker1(2);
+ MMAPV1LockerImpl locker1;
LockRequestCombo request1(&locker1);
- MMAPV1LockerImpl locker2(3);
+ MMAPV1LockerImpl locker2;
LockRequestCombo request2(&locker2);
request2.compatibleFirst = true;
- MMAPV1LockerImpl locker3(4);
+ MMAPV1LockerImpl locker3;
LockRequestCombo request3(&locker3);
// Lock all in IS mode (should block behind the global lock)
@@ -735,7 +735,7 @@ namespace mongo {
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
// Now an exclusive mode comes, which would block behind the IS modes
- MMAPV1LockerImpl lockerX(5);
+ MMAPV1LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
@@ -747,7 +747,7 @@ namespace mongo {
// If an S comes, it should be granted, because of request2
{
- MMAPV1LockerImpl lockerS(6);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -758,7 +758,7 @@ namespace mongo {
// If S comes again, it should be granted, because of request2 still there
{
- MMAPV1LockerImpl lockerS(7);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -768,7 +768,7 @@ namespace mongo {
ASSERT(lockMgr.unlock(&request2));
{
- MMAPV1LockerImpl lockerS(8);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -783,22 +783,22 @@ namespace mongo {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl lockerSInitial(1);
+ MMAPV1LockerImpl lockerSInitial;
LockRequestCombo requestSInitial(&lockerSInitial);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
- MMAPV1LockerImpl lockerX(2);
+ MMAPV1LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
- MMAPV1LockerImpl lockerPending(3);
+ MMAPV1LockerImpl lockerPending;
LockRequestCombo requestPending(&lockerPending);
requestPending.compatibleFirst = true;
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
// S1 is not granted yet, so the policy should still be FIFO
{
- MMAPV1LockerImpl lockerS(4);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -808,7 +808,7 @@ namespace mongo {
ASSERT(lockMgr.unlock(&requestPending));
{
- MMAPV1LockerImpl lockerS(5);
+ MMAPV1LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
diff --git a/src/mongo/db/concurrency/lock_mgr_test_help.h b/src/mongo/db/concurrency/lock_mgr_test_help.h
index 507e827a352..24d4a6c7844 100644
--- a/src/mongo/db/concurrency/lock_mgr_test_help.h
+++ b/src/mongo/db/concurrency/lock_mgr_test_help.h
@@ -35,7 +35,7 @@ namespace mongo {
class LockerForTests : public LockerImpl<false> {
public:
- explicit LockerForTests(LockerId lockerId) : LockerImpl<false>(lockerId) {
+ explicit LockerForTests() {
lockGlobal(MODE_S);
}
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index aed2fffe15f..435ed15bb2f 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -57,6 +57,9 @@ namespace {
// How often (in millis) to check for deadlock if a lock has not been granted for some time
const unsigned DeadlockTimeoutMs = 100;
+ // Dispenses unique LockerId identifiers
+ AtomicUInt64 idCounter(0);
+
/**
* Used to sort locks by granularity when snapshotting lock state. We must report and reacquire
* locks in the same granularity in which they are acquired (i.e. global, flush, database,
@@ -176,6 +179,13 @@ namespace {
}
template<bool IsForMMAPV1>
+ void LockerImpl<IsForMMAPV1>::assertEmpty() const {
+ invariant(!inAWriteUnitOfWork());
+ invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
+ invariant(_requests.empty());
+ }
+
+ template<bool IsForMMAPV1>
void LockerImpl<IsForMMAPV1>::dump() const {
StringBuilder ss;
ss << "lock status: ";
@@ -240,8 +250,8 @@ namespace {
//
template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::LockerImpl(LockerId id)
- : _id(id),
+ LockerImpl<IsForMMAPV1>::LockerImpl()
+ : _id(idCounter.addAndFetch(1)),
_wuowNestingLevel(0),
_batchWriter(false),
_lockPendingParallelWriter(false) {
@@ -253,9 +263,7 @@ namespace {
// Cannot delete the Locker while there are still outstanding requests, because the
// LockManager may attempt to access deleted memory. Besides it is probably incorrect
// to delete with unaccounted locks anyways.
- invariant(!inAWriteUnitOfWork());
- invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
- invariant(_requests.empty());
+ assertEmpty();
}
template<bool IsForMMAPV1>
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 05c063804e7..357b188fbd5 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -89,7 +89,7 @@ namespace mongo {
* Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers
* having the same identifier will not conflict on lock acquisition.
*/
- LockerImpl(LockerId id);
+ LockerImpl();
virtual ~LockerImpl();
@@ -218,6 +218,8 @@ namespace mongo {
virtual bool isWriteLocked() const;
virtual bool isReadLocked() const;
+ virtual void assertEmpty() const;
+
virtual bool hasLockPending() const { return getWaitingResource().isValid() || _lockPendingParallelWriter; }
virtual void setIsBatchWriter(bool newValue) { _batchWriter = newValue; }
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 1e66abca6b2..d2d11abac3e 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -44,7 +44,7 @@ namespace {
TEST(LockerImpl, LockNoConflict) {
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
locker.lockGlobal(MODE_IX);
ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
@@ -62,7 +62,7 @@ namespace {
TEST(LockerImpl, ReLockNoConflict) {
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
locker.lockGlobal(MODE_IX);
ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
@@ -80,11 +80,11 @@ namespace {
TEST(LockerImpl, ConflictWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, 0));
@@ -99,11 +99,11 @@ namespace {
TEST(LockerImpl, ConflictUpgradeWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1(1);
+ MMAPV1LockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
- MMAPV1LockerImpl locker2(2);
+ MMAPV1LockerImpl locker2;
ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
@@ -115,7 +115,7 @@ namespace {
}
TEST(LockerImpl, ReadTransaction) {
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
locker.lockGlobal(MODE_IS);
locker.unlockAll();
@@ -135,7 +135,7 @@ namespace {
TEST(LockerImpl, saveAndRestoreGlobal) {
Locker::LockSnapshot lockInfo;
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
// No lock requests made, no locks held.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -162,7 +162,7 @@ namespace {
TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
Locker::LockSnapshot lockInfo;
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
// No lock requests made, no locks held.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -188,7 +188,7 @@ namespace {
TEST(LockerImpl, saveAndRestoreDBAndCollection) {
Locker::LockSnapshot lockInfo;
- MMAPV1LockerImpl locker(1);
+ MMAPV1LockerImpl locker;
const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
@@ -248,10 +248,10 @@ namespace {
for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
std::vector<boost::shared_ptr<LockerForTests> > lockers(numLockers);
for (int i = 0; i < numLockers; i++) {
- lockers[i].reset(new LockerForTests(LockerId(100 + i)));
+ lockers[i].reset(new LockerForTests());
}
- LockerImpl<true> locker(1);
+ LockerImpl<true> locker;
// Do some warm-up loops
for (int i = 0; i < 1000; i++) {
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 352ab67328c..94caf497c7c 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -258,6 +258,10 @@ namespace mongo {
virtual bool isWriteLocked() const = 0;
virtual bool isReadLocked() const = 0;
+ // This asserts we're not in a WriteUnitOfWork, and there are no requests on the Locker,
+ // so it would be safe to call the destructor or reuse the Locker.
+ virtual void assertEmpty() const = 0;
+
/**
* Pending means we are currently trying to get a lock (could be the parallel batch writer
* lock).
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 51464fa184d..aad9da30096 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -1099,7 +1099,7 @@ namespace {
// operation context, which also instantiates a recovery unit. Also, using the
// lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock. This will
// all go away if we start acquiring the global/flush lock as part of ScopedTransaction.
- DefaultLockerImpl globalLocker(0);
+ DefaultLockerImpl globalLocker;
LockResult result = globalLocker.lockGlobalBegin(MODE_X);
if (result == LOCK_WAITING) {
result = globalLocker.lockGlobalComplete(UINT_MAX);
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 70dba10d2fa..59c190733f8 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -47,6 +47,10 @@ namespace mongo {
* TODO(HK): clarify what this means. There's one OperationContext for one user operation...
* but is this true for getmore? Also what about things like fsyncunlock / internal
* users / etc.?
+ *
+ * On construction, an OperationContext associates itself with the current client, and only on
+ * destruction it deassociates itself. At any time a client can be associated with at most one
+ * OperationContext.
*/
class OperationContext {
MONGO_DISALLOW_COPYING(OperationContext);
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index 865f300308e..99f48c78f13 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -44,30 +44,20 @@
#include "mongo/util/fail_point_service.h"
namespace mongo {
-namespace {
- // Dispenses unique OperationContext identifiers
- AtomicUInt64 idCounter(0);
-}
-
- OperationContextImpl::OperationContextImpl() : _client(currentClient.get()) {
- invariant(_client);
+ OperationContextImpl::OperationContextImpl()
+ : _client(currentClient.get()),
+ _locker(_client->getLocker()) {
+ invariant(_locker);
StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine();
invariant(storageEngine);
_recovery.reset(storageEngine->newRecoveryUnit());
-
- if (storageEngine->isMmapV1()) {
- _locker.reset(new MMAPV1LockerImpl(idCounter.addAndFetch(1)));
- }
- else {
- _locker.reset(new LockerImpl<false>(idCounter.addAndFetch(1)));
- }
-
_client->setOperationContext(this);
}
OperationContextImpl::~OperationContextImpl() {
- _client->setOperationContext(NULL);
+ _locker->assertEmpty();
+ _client->resetOperationContext();
}
RecoveryUnit* OperationContextImpl::recoveryUnit() const {
@@ -87,7 +77,7 @@ namespace {
}
Locker* OperationContextImpl::lockState() const {
- return _locker.get();
+ return _locker;
}
ProgressMeter* OperationContextImpl::setMessage(const char * msg,
diff --git a/src/mongo/db/operation_context_impl.h b/src/mongo/db/operation_context_impl.h
index 09bfd614769..306f343731e 100644
--- a/src/mongo/db/operation_context_impl.h
+++ b/src/mongo/db/operation_context_impl.h
@@ -71,8 +71,8 @@ namespace mongo {
private:
std::auto_ptr<RecoveryUnit> _recovery;
- std::auto_ptr<Locker> _locker;
Client* const _client; // cached, not owned
+ Locker* const _locker; // cached, not owned
};
} // namespace mongo
diff --git a/src/mongo/db/repl/operation_context_repl_mock.cpp b/src/mongo/db/repl/operation_context_repl_mock.cpp
index 59239c4117e..b5c44316261 100644
--- a/src/mongo/db/repl/operation_context_repl_mock.cpp
+++ b/src/mongo/db/repl/operation_context_repl_mock.cpp
@@ -36,15 +36,8 @@
namespace mongo {
namespace repl {
-namespace {
- // Dispenses unique OperationContext identifiers
- AtomicUInt64 idCounter(0);
-}
-
OperationContextReplMock::OperationContextReplMock()
- : _lockState(new MMAPV1LockerImpl(idCounter.addAndFetch(1))) {
-
- }
+ : _lockState(new MMAPV1LockerImpl()) { }
OperationContextReplMock::~OperationContextReplMock() {}