summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorGeert Bosch <bosch@gnat.com>2018-07-08 11:26:02 -0400
committerGeert Bosch <geert@mongodb.com>2018-07-11 10:11:00 -0400
commitc7bd92f7ded5501654989eb9b9a80bbf50d3ec3b (patch)
tree60724a273bc50960b0ed9afee6e24ad8b92e8e1b /src/mongo
parent80c7c825a44cf99b17e81f4233445c7ab1927706 (diff)
downloadmongo-c7bd92f7ded5501654989eb9b9a80bbf50d3ec3b.tar.gz
SERVER-36011 Remove MMAPv1 lockmanager support
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog_raii_test.cpp2
-rw-r--r--src/mongo/db/commands/fsync.cpp3
-rw-r--r--src/mongo/db/commands/oplog_note.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp3
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h14
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp15
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp182
-rw-r--r--src/mongo/db/concurrency/deadlock_detection_test.cpp54
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h4
-rw-r--r--src/mongo/db/concurrency/lock_manager_test.cpp116
-rw-r--r--src/mongo/db/concurrency/lock_manager_test_help.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp314
-rw-r--r--src/mongo/db/concurrency/lock_state.h80
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp77
-rw-r--r--src/mongo/db/concurrency/locker.h29
-rw-r--r--src/mongo/db/concurrency/locker_noop.h8
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/repl/noop_writer.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/session.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp6
22 files changed, 233 insertions, 695 deletions
diff --git a/src/mongo/db/catalog_raii_test.cpp b/src/mongo/db/catalog_raii_test.cpp
index b824b445148..05a0bfeb1cb 100644
--- a/src/mongo/db/catalog_raii_test.cpp
+++ b/src/mongo/db/catalog_raii_test.cpp
@@ -51,7 +51,7 @@ public:
ClientAndCtx makeClientWithLocker(const std::string& clientName) {
auto client = getGlobalServiceContext()->makeClient(clientName);
auto opCtx = client->makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<DefaultLockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
return std::make_pair(std::move(client), std::move(opCtx));
}
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index a72454ab115..0187003684e 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -331,9 +331,8 @@ void FSyncLockThread::run() {
try {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- Lock::GlobalWrite global(&opCtx); // No WriteUnitOfWork needed
+ Lock::GlobalRead global(&opCtx); // Block any writes in order to flush the files.
- opCtx.lockState()->downgradeGlobalXtoSForMMAPV1();
StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine();
try {
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 09a32cc0f6e..23e9c84f0f7 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -53,9 +53,8 @@ namespace mongo {
namespace {
Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData note) {
repl::ReplicationCoordinator* const replCoord = repl::ReplicationCoordinator::get(opCtx);
- // Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
- // available. It may happen when the primary steps down and a shared global lock is
- // acquired.
+ // Use GlobalLock instead of DBLock to allow return when the lock is not available. It may
+ // happen when the primary steps down and a shared global lock is acquired.
Lock::GlobalLock lock(
opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow);
@@ -63,7 +62,6 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
LOG(1) << "Global lock is not available skipping noopWrite";
return {ErrorCodes::LockFailed, "Global lock is not available"};
}
- opCtx->lockState()->lockMMAPV1Flush();
// Its a proxy for being a primary passing "local" will cause it to return true on secondary
if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index ca9e9f44691..0bff0d6b4ff 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -219,9 +219,6 @@ Lock::DBLock::DBLock(OperationContext* opCtx, StringData db, LockMode mode, Date
if (!_globalLock.isLocked())
return;
- // Need to acquire the flush lock
- _opCtx->lockState()->lockMMAPV1Flush();
-
// The check for the admin db is to ensure direct writes to auth collections
// are serialized (see SERVER-16092).
if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index 9491969733e..1e453f3a7a6 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -182,8 +182,6 @@ public:
* in any mode, see LockMode. An outermost GlobalLock, when not in a WriteUnitOfWork, calls
* abandonSnapshot() on destruction. This allows the storage engine to release resources, such
* as snapshots or locks, that it may have acquired during the transaction.
- *
- * NOTE: Does not acquire flush lock.
*/
class GlobalLock {
public:
@@ -266,11 +264,7 @@ public:
explicit GlobalWrite(OperationContext* opCtx)
: GlobalWrite(opCtx, Date_t::max(), InterruptBehavior::kThrow) {}
explicit GlobalWrite(OperationContext* opCtx, Date_t deadline, InterruptBehavior behavior)
- : GlobalLock(opCtx, MODE_X, deadline, behavior) {
- if (isLocked()) {
- opCtx->lockState()->lockMMAPV1Flush();
- }
- }
+ : GlobalLock(opCtx, MODE_X, deadline, behavior) {}
};
/**
@@ -285,11 +279,7 @@ public:
explicit GlobalRead(OperationContext* opCtx)
: GlobalRead(opCtx, Date_t::max(), InterruptBehavior::kThrow) {}
explicit GlobalRead(OperationContext* opCtx, Date_t deadline, InterruptBehavior behavior)
- : GlobalLock(opCtx, MODE_S, deadline, behavior) {
- if (isLocked()) {
- opCtx->lockState()->lockMMAPV1Flush();
- }
- }
+ : GlobalLock(opCtx, MODE_S, deadline, behavior) {}
};
/**
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index 341b4ff7b13..b52acd1a99c 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -46,16 +46,15 @@ class DConcurrencyTest : public benchmark::Fixture {
public:
/**
* Returns a vector of Clients of length 'k', each of which has an OperationContext with its
- * lockState set to a DefaultLockerImpl.
+ * lockState set to a LockerImpl.
*/
- template <typename LockerType>
void makeKClientsWithLockers(int k) {
clients.reserve(k);
for (int i = 0; i < k; ++i) {
auto client = getGlobalServiceContext()->makeClient(
str::stream() << "test client for thread " << i);
auto opCtx = client->makeOperationContext();
- opCtx->swapLockState(std::make_unique<LockerType>());
+ opCtx->swapLockState(std::make_unique<LockerImpl>());
clients.emplace_back(std::move(client), std::move(opCtx));
}
}
@@ -63,7 +62,7 @@ public:
protected:
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
clients;
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
+ std::array<LockerImpl, kMaxPerfThreads> locker;
};
BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) {
@@ -94,7 +93,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentSharedLock)(benchmark::S
std::unique_ptr<ForceSupportsDocLocking> supportDocLocking;
if (state.thread_index == 0) {
- makeKClientsWithLockers<DefaultLockerImpl>(state.threads);
+ makeKClientsWithLockers(state.threads);
supportDocLocking = std::make_unique<ForceSupportsDocLocking>(true);
}
@@ -113,7 +112,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentExclusiveLock)(benchmark
std::unique_ptr<ForceSupportsDocLocking> supportDocLocking;
if (state.thread_index == 0) {
- makeKClientsWithLockers<DefaultLockerImpl>(state.threads);
+ makeKClientsWithLockers(state.threads);
supportDocLocking = std::make_unique<ForceSupportsDocLocking>(true);
}
@@ -132,7 +131,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_MMAPv1CollectionSharedLock)(benchmark::S
std::unique_ptr<ForceSupportsDocLocking> supportDocLocking;
if (state.thread_index == 0) {
- makeKClientsWithLockers<DefaultLockerImpl>(state.threads);
+ makeKClientsWithLockers(state.threads);
supportDocLocking = std::make_unique<ForceSupportsDocLocking>(false);
}
@@ -151,7 +150,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_MMAPv1CollectionExclusiveLock)(benchmark
std::unique_ptr<ForceSupportsDocLocking> supportDocLocking;
if (state.thread_index == 0) {
- makeKClientsWithLockers<DefaultLockerImpl>(state.threads);
+ makeKClientsWithLockers(state.threads);
supportDocLocking = std::make_unique<ForceSupportsDocLocking>(false);
}
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index d54f2896c62..2738a8db2e6 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -83,9 +83,8 @@ class DConcurrencyTestFixture : public ServiceContextMongoDTest {
public:
/**
* Returns a vector of Clients of length 'k', each of which has an OperationContext with its
- * lockState set to a DefaultLockerImpl.
+ * lockState set to a LockerImpl.
*/
- template <typename LockerType>
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
makeKClientsWithLockers(int k) {
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
@@ -95,7 +94,7 @@ public:
auto client =
getServiceContext()->makeClient(str::stream() << "test client for thread " << i);
auto opCtx = client->makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<LockerType>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
clients.emplace_back(std::move(client), std::move(opCtx));
}
return clients;
@@ -125,13 +124,13 @@ public:
TEST_F(DConcurrencyTestFixture, WriteConflictRetryInstantiatesOK) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
writeConflictRetry(opCtx.get(), "", "", [] {});
}
TEST_F(DConcurrencyTestFixture, WriteConflictRetryRetriesFunctionOnWriteConflictException) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto&& opDebug = CurOp::get(opCtx.get())->debug();
ASSERT_EQUALS(boost::none, opDebug.additiveMetrics.writeConflicts);
ASSERT_EQUALS(100, writeConflictRetry(opCtx.get(), "", "", [&opDebug] {
@@ -145,7 +144,7 @@ TEST_F(DConcurrencyTestFixture, WriteConflictRetryRetriesFunctionOnWriteConflict
TEST_F(DConcurrencyTestFixture, WriteConflictRetryPropagatesNonWriteConflictException) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
ASSERT_THROWS_CODE(writeConflictRetry(opCtx.get(),
"",
"",
@@ -160,7 +159,7 @@ TEST_F(DConcurrencyTestFixture, WriteConflictRetryPropagatesNonWriteConflictExce
TEST_F(DConcurrencyTestFixture,
WriteConflictRetryPropagatesWriteConflictExceptionIfAlreadyInAWriteUnitOfWork) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::GlobalWrite globalWrite(opCtx.get());
WriteUnitOfWork wuow(opCtx.get());
ASSERT_THROWS(writeConflictRetry(opCtx.get(), "", "", [] { throw WriteConflictException(); }),
@@ -169,9 +168,9 @@ TEST_F(DConcurrencyTestFixture,
TEST_F(DConcurrencyTestFixture, ResourceMutex) {
Lock::ResourceMutex mtx("testMutex");
- DefaultLockerImpl locker1;
- DefaultLockerImpl locker2;
- DefaultLockerImpl locker3;
+ LockerImpl locker1;
+ LockerImpl locker2;
+ LockerImpl locker3;
struct State {
void check(int n) {
@@ -251,21 +250,21 @@ TEST_F(DConcurrencyTestFixture, ResourceMutex) {
TEST_F(DConcurrencyTestFixture, GlobalRead) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::GlobalRead globalRead(opCtx.get());
ASSERT(opCtx->lockState()->isR());
}
TEST_F(DConcurrencyTestFixture, GlobalWrite) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::GlobalWrite globalWrite(opCtx.get());
ASSERT(opCtx->lockState()->isW());
}
TEST_F(DConcurrencyTestFixture, GlobalWriteAndGlobalRead) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::GlobalWrite globalWrite(opCtx.get());
@@ -282,26 +281,21 @@ TEST_F(DConcurrencyTestFixture, GlobalWriteAndGlobalRead) {
TEST_F(DConcurrencyTestFixture,
GlobalWriteRequiresExplicitDowngradeToIntentWriteModeIfDestroyedWhileHoldingDatabaseLock) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- const ResourceId mmapId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
auto globalWrite = stdx::make_unique<Lock::GlobalWrite>(opCtx.get());
ASSERT(lockState->isW());
ASSERT(MODE_X == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
{
Lock::DBLock dbWrite(opCtx.get(), "db", MODE_IX);
ASSERT(lockState->isW());
ASSERT(MODE_X == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
// If we destroy the GlobalWrite out of order relative to the DBLock, we will leave the
// global lock resource locked in MODE_X. We have to explicitly downgrade this resource to
@@ -314,8 +308,6 @@ TEST_F(DConcurrencyTestFixture,
ASSERT(lockState->isWriteLocked());
ASSERT(MODE_IX == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
}
@@ -323,33 +315,26 @@ TEST_F(DConcurrencyTestFixture,
ASSERT_FALSE(lockState->isWriteLocked());
ASSERT(MODE_NONE == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_NONE == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
}
TEST_F(DConcurrencyTestFixture,
GlobalWriteRequiresSupportsDowngradeToIntentWriteModeWhileHoldingDatabaseLock) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- const ResourceId mmapId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
auto globalWrite = stdx::make_unique<Lock::GlobalWrite>(opCtx.get());
ASSERT(lockState->isW());
ASSERT(MODE_X == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
{
Lock::DBLock dbWrite(opCtx.get(), "db", MODE_IX);
ASSERT(lockState->isW());
ASSERT(MODE_X == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
// Downgrade global lock resource to MODE_IX to allow other write operations to make
// progress.
@@ -358,8 +343,6 @@ TEST_F(DConcurrencyTestFixture,
ASSERT(lockState->isWriteLocked());
ASSERT(MODE_IX == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
}
ASSERT_FALSE(lockState->isW());
@@ -370,18 +353,15 @@ TEST_F(DConcurrencyTestFixture,
ASSERT_FALSE(lockState->isWriteLocked());
ASSERT(MODE_NONE == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_NONE == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
}
TEST_F(DConcurrencyTestFixture,
NestedGlobalWriteSupportsDowngradeToIntentWriteModeWhileHoldingDatabaseLock) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- const ResourceId mmapId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
auto outerGlobalWrite = stdx::make_unique<Lock::GlobalWrite>(opCtx.get());
auto innerGlobalWrite = stdx::make_unique<Lock::GlobalWrite>(opCtx.get());
@@ -391,8 +371,6 @@ TEST_F(DConcurrencyTestFixture,
ASSERT(lockState->isW());
ASSERT(MODE_X == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
// Downgrade global lock resource to MODE_IX to allow other write operations to make
// progress.
@@ -401,8 +379,6 @@ TEST_F(DConcurrencyTestFixture,
ASSERT(lockState->isWriteLocked());
ASSERT(MODE_IX == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId))
- << "unexpected MMAPv1 flush lock mode " << modeName(lockState->getLockMode(mmapId));
}
ASSERT_FALSE(lockState->isW());
@@ -413,20 +389,16 @@ TEST_F(DConcurrencyTestFixture,
ASSERT(lockState->isWriteLocked());
ASSERT(MODE_IX == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_IX == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
outerGlobalWrite = {};
ASSERT_FALSE(lockState->isW());
ASSERT_FALSE(lockState->isWriteLocked());
ASSERT(MODE_NONE == lockState->getLockMode(globalId))
<< "unexpected global lock mode " << modeName(lockState->getLockMode(globalId));
- ASSERT(MODE_NONE == lockState->getLockMode(mmapId)) << "unexpected MMAPv1 flush lock mode "
- << modeName(lockState->getLockMode(mmapId));
}
TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalLock globalWrite(
clients[0].second.get(), MODE_X, Date_t::now(), Lock::InterruptBehavior::kThrow);
@@ -440,7 +412,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalLock globalWrite(
clients[0].second.get(), MODE_X, Date_t::now(), Lock::InterruptBehavior::kThrow);
ASSERT(globalWrite.isLocked());
@@ -453,7 +425,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockXSetsGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -465,7 +437,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockXSetsGlobalLockTakenOnOperationContext
}
TEST_F(DConcurrencyTestFixture, GlobalLockIXSetsGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
@@ -477,7 +449,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockIXSetsGlobalLockTakenOnOperationContex
}
TEST_F(DConcurrencyTestFixture, GlobalLockSDoesNotSetGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
@@ -488,7 +460,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockSDoesNotSetGlobalLockTakenOnOperationC
}
TEST_F(DConcurrencyTestFixture, GlobalLockISDoesNotSetGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
@@ -499,7 +471,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockISDoesNotSetGlobalLockTakenOnOperation
}
TEST_F(DConcurrencyTestFixture, DBLockXSetsGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -508,7 +480,7 @@ TEST_F(DConcurrencyTestFixture, DBLockXSetsGlobalLockTakenOnOperationContext) {
}
TEST_F(DConcurrencyTestFixture, DBLockSDoesNotSetGlobalLockTakenOnOperationContext) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -517,7 +489,7 @@ TEST_F(DConcurrencyTestFixture, DBLockSDoesNotSetGlobalLockTakenOnOperationConte
}
TEST_F(DConcurrencyTestFixture, GlobalLockXDoesNotSetGlobalLockTakenWhenLockAcquisitionTimesOut) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
// Take a global lock so that the next one times out.
Lock::GlobalLock globalWrite0(
@@ -535,7 +507,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockXDoesNotSetGlobalLockTakenWhenLockAcqu
}
TEST_F(DConcurrencyTestFixture, GlobalLockS_NoTimeoutDueToGlobalLockS) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalRead globalRead(clients[0].second.get());
Lock::GlobalLock globalReadTry(clients[1].second.get(),
@@ -547,7 +519,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_NoTimeoutDueToGlobalLockS) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalRead globalRead(clients[0].second.get());
Lock::GlobalLock globalWriteTry(clients[1].second.get(),
@@ -559,7 +531,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
Lock::GlobalLock globalReadTry(clients[1].second.get(),
@@ -571,7 +543,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
Lock::GlobalLock globalWriteTry(clients[1].second.get(),
@@ -584,7 +556,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
TEST_F(DConcurrencyTestFixture, TempReleaseGlobalWrite) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::GlobalWrite globalWrite(opCtx.get());
@@ -598,7 +570,7 @@ TEST_F(DConcurrencyTestFixture, TempReleaseGlobalWrite) {
TEST_F(DConcurrencyTestFixture, TempReleaseRecursive) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::GlobalWrite globalWrite(opCtx.get());
Lock::DBLock lk(opCtx.get(), "SomeDBName", MODE_X);
@@ -613,7 +585,7 @@ TEST_F(DConcurrencyTestFixture, TempReleaseRecursive) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptible) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -630,7 +602,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptible) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptibleMMAP) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -650,7 +622,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptibleMMAP) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockWaitNotInterruptedWithLeaveUnlockedBehavior) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -673,7 +645,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitNotInterruptedWithLeaveUnlockedBeh
}
TEST_F(DConcurrencyTestFixture, GlobalLockEnqueueOnlyNotInterruptedWithLeaveUnlockedBehavior) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
// Kill the operation before acquiring the uncontested lock.
@@ -691,7 +663,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockEnqueueOnlyNotInterruptedWithLeaveUnlo
}
TEST_F(DConcurrencyTestFixture, GlobalLockWaitForLockUntilNotInterruptedWithLeaveUnlockedBehavior) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -717,7 +689,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitForLockUntilNotInterruptedWithLeav
}
TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndDoNotUsingWithInterruptBehavior) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -735,7 +707,7 @@ TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndDoNotUsingWithInterrup
}
TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndThrowUsingInterruptBehavior) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -755,7 +727,7 @@ TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndThrowUsingInterruptBeh
}
TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptible) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -772,7 +744,7 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptible) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptibleWithLockGuard) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -792,7 +764,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptibleWithLockGuard) {
}
TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptibleWithLockGuard) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
auto opCtx2 = clients[1].second.get();
@@ -814,7 +786,7 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptibleWithLockGuard) {
TEST_F(DConcurrencyTestFixture, DBLockTakesS) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbRead(opCtx.get(), "db", MODE_S);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
@@ -823,7 +795,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesS) {
TEST_F(DConcurrencyTestFixture, DBLockTakesX) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbWrite(opCtx.get(), "db", MODE_X);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
@@ -832,7 +804,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesX) {
TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbRead(opCtx.get(), "admin", MODE_IS);
ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_IS);
@@ -840,7 +812,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) {
TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbRead(opCtx.get(), "admin", MODE_S);
ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_S);
@@ -848,7 +820,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) {
TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminIX) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_IX);
ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
@@ -856,7 +828,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminIX) {
TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_X);
ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
@@ -864,7 +836,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) {
TEST_F(DConcurrencyTestFixture, MultipleWriteDBLocksOnSameThread) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
Lock::DBLock r2(opCtx.get(), "db1", MODE_X);
@@ -873,7 +845,7 @@ TEST_F(DConcurrencyTestFixture, MultipleWriteDBLocksOnSameThread) {
TEST_F(DConcurrencyTestFixture, MultipleConflictingDBLocksOnSameThread) {
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
Lock::DBLock r2(opCtx.get(), "db1", MODE_S);
@@ -886,7 +858,7 @@ TEST_F(DConcurrencyTestFixture, IsDbLockedForSMode) {
const std::string dbName("db");
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::DBLock dbLock(opCtx.get(), dbName, MODE_S);
@@ -900,7 +872,7 @@ TEST_F(DConcurrencyTestFixture, IsDbLockedForXMode) {
const std::string dbName("db");
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::DBLock dbLock(opCtx.get(), dbName, MODE_X);
@@ -914,7 +886,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
const std::string ns("db1.coll");
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IS);
@@ -945,7 +917,7 @@ TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
const std::string ns("db1.coll");
auto opCtx = makeOperationContext();
- opCtx->swapLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ opCtx->swapLockState(stdx::make_unique<LockerImpl>());
auto lockState = opCtx->lockState();
Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IX);
@@ -976,7 +948,7 @@ TEST_F(DConcurrencyTestFixture, Stress) {
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
- clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
+ clients = makeKClientsWithLockers(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
@@ -1090,7 +1062,7 @@ TEST_F(DConcurrencyTestFixture, Stress) {
for (auto& thread : threads)
thread.join();
- auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto newClients = makeKClientsWithLockers(2);
{ Lock::GlobalWrite w(newClients[0].second.get()); }
{ Lock::GlobalRead r(newClients[1].second.get()); }
}
@@ -1100,7 +1072,7 @@ TEST_F(DConcurrencyTestFixture, StressPartitioned) {
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
- clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
+ clients = makeKClientsWithLockers(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
@@ -1140,7 +1112,7 @@ TEST_F(DConcurrencyTestFixture, StressPartitioned) {
for (auto& thread : threads)
thread.join();
- auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ auto newClients = makeKClientsWithLockers(2);
{ Lock::GlobalWrite w(newClients[0].second.get()); }
{ Lock::GlobalRead r(newClients[1].second.get()); }
}
@@ -1153,7 +1125,7 @@ TEST_F(DConcurrencyTestFixture, ResourceMutexLabels) {
}
TEST_F(DConcurrencyTestFixture, Throttling) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
UseGlobalThrottling throttle(opctx1, 1);
@@ -1187,7 +1159,7 @@ TEST_F(DConcurrencyTestFixture, Throttling) {
}
TEST_F(DConcurrencyTestFixture, NoThrottlingWhenNotAcquiringTickets) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
// Limit the locker to 1 ticket at a time.
@@ -1205,7 +1177,7 @@ TEST_F(DConcurrencyTestFixture, NoThrottlingWhenNotAcquiringTickets) {
}
TEST_F(DConcurrencyTestFixture, ReleaseAndReacquireTicket) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
// Limit the locker to 1 ticket at a time.
@@ -1238,7 +1210,7 @@ TEST_F(DConcurrencyTestFixture, ReleaseAndReacquireTicket) {
}
TEST_F(DConcurrencyTestFixture, LockerWithReleasedTicketCanBeUnlocked) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
Lock::GlobalRead R1(opctx1, Date_t::now(), Lock::InterruptBehavior::kThrow);
@@ -1248,7 +1220,7 @@ TEST_F(DConcurrencyTestFixture, LockerWithReleasedTicketCanBeUnlocked) {
}
TEST_F(DConcurrencyTestFixture, TicketAcquireCanBeInterrupted) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clientOpctxPairs = makeKClientsWithLockers(1);
auto opctx1 = clientOpctxPairs[0].second.get();
// Limit the locker to 0 tickets at a time.
UseGlobalThrottling throttle(opctx1, 0);
@@ -1260,7 +1232,7 @@ TEST_F(DConcurrencyTestFixture, TicketAcquireCanBeInterrupted) {
}
TEST_F(DConcurrencyTestFixture, TicketReacquireCanBeInterrupted) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
// Limit the locker to 1 ticket at a time.
@@ -1288,7 +1260,7 @@ TEST_F(DConcurrencyTestFixture, TicketReacquireCanBeInterrupted) {
}
TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenWhenUncontested) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
opCtx->markKilled();
@@ -1301,7 +1273,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenWhenUnco
}
TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenAcquiringRecursively) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
Lock::GlobalWrite globalWriteLock(opCtx, Date_t::now(), Lock::InterruptBehavior::kThrow);
@@ -1318,7 +1290,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenAcquirin
}
TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextRespectsUninterruptibleGuard) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
opCtx->markKilled();
@@ -1329,7 +1301,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextRespectsUninterrup
}
TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenUncontested) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
opCtx->markKilled();
@@ -1340,7 +1312,7 @@ TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenUncontes
}
TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenAcquiringRecursively) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
Lock::DBLock dbWriteLock(opCtx, "db", MODE_X);
@@ -1356,7 +1328,7 @@ TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenAcquirin
}
TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextRespectsUninterruptibleGuard) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
opCtx->markKilled();
@@ -1366,7 +1338,7 @@ TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextRespectsUninterruptibl
}
TEST_F(DConcurrencyTestFixture, DBLockTimeout) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
@@ -1384,7 +1356,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeout) {
}
TEST_F(DConcurrencyTestFixture, DBLockTimeoutDueToGlobalLock) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
@@ -1401,7 +1373,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeoutDueToGlobalLock) {
}
TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
+ auto clientOpctxPairs = makeKClientsWithLockers(2);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
@@ -1424,7 +1396,7 @@ TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) {
}
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(3);
+ auto clientOpctxPairs = makeKClientsWithLockers(3);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
auto opctx3 = clientOpctxPairs[2].second.get();
@@ -1449,7 +1421,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(4);
+ auto clientOpctxPairs = makeKClientsWithLockers(4);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
auto opctx3 = clientOpctxPairs[2].second.get();
@@ -1495,7 +1467,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
}
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSXIXIS) {
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(5);
+ auto clientOpctxPairs = makeKClientsWithLockers(5);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
auto opctx3 = clientOpctxPairs[2].second.get();
@@ -1562,7 +1534,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
std::vector<uint64_t> acquisitionCount(numThreads);
std::vector<uint64_t> timeoutCount(numThreads);
std::vector<uint64_t> busyWaitCount(numThreads);
- auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(numThreads);
+ auto clientOpctxPairs = makeKClientsWithLockers(numThreads);
// Do some busy waiting to trigger different timings. The atomic load prevents compilers
// from optimizing the loop away.
@@ -1682,7 +1654,7 @@ public:
}
TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonsSnapshotWhenNotInWriteUnitOfWork) {
- auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
auto recovUnitOwned = stdx::make_unique<RecoveryUnitMock>();
auto recovUnitBorrowed = recovUnitOwned.get();
@@ -1707,7 +1679,7 @@ TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonsSnapshotWhenNotInWriteUnit
}
TEST_F(DConcurrencyTestFixture, TestGlobalLockDoesNotAbandonSnapshotWhenInWriteUnitOfWork) {
- auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
auto recovUnitOwned = stdx::make_unique<RecoveryUnitMock>();
auto recovUnitBorrowed = recovUnitOwned.get();
diff --git a/src/mongo/db/concurrency/deadlock_detection_test.cpp b/src/mongo/db/concurrency/deadlock_detection_test.cpp
index ead27261e1a..f5e64e23578 100644
--- a/src/mongo/db/concurrency/deadlock_detection_test.cpp
+++ b/src/mongo/db/concurrency/deadlock_detection_test.cpp
@@ -134,58 +134,4 @@ TEST(Deadlock, Indirect) {
locker2.unlock(resIdA);
}
-TEST(Deadlock, IndirectWithUpgrade) {
- const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
- const ResourceId resIdDb(RESOURCE_DATABASE, 2);
-
- LockerForTests flush(MODE_IX);
- LockerForTests reader(MODE_IS);
- LockerForTests writer(MODE_IX);
-
- // This sequence simulates the deadlock which occurs during flush
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(nullptr, resIdFlush, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(nullptr, resIdDb, MODE_X));
-
- ASSERT_EQUALS(LOCK_OK, reader.lockBegin(nullptr, resIdFlush, MODE_IS));
-
- // R -> W
- ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(nullptr, resIdDb, MODE_S));
-
- // R -> W
- // F -> W
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(nullptr, resIdFlush, MODE_S));
-
- // W yields its flush lock, so now f is granted in mode S
- //
- // R -> W
- writer.unlock(resIdFlush);
-
- // Flush thread upgrades S -> X in order to do the remap
- //
- // R -> W
- // F -> R
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(nullptr, resIdFlush, MODE_X));
-
- // W comes back from the commit and tries to re-acquire the flush lock
- //
- // R -> W
- // F -> R
- // W -> F
- ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(nullptr, resIdFlush, MODE_IX));
-
- // Run deadlock detection from the point of view of each of the involved lockers
- DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
- ASSERT(wfgF.check().hasCycle());
-
- DeadlockDetector wfgR(*getGlobalLockManager(), &reader);
- ASSERT(wfgR.check().hasCycle());
-
- DeadlockDetector wfgW(*getGlobalLockManager(), &writer);
- ASSERT(wfgW.check().hasCycle());
-
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- flush.unlock(resIdFlush);
- writer.unlock(resIdFlush);
-}
-
} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index 82731ddc722..b18007877f8 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -108,7 +108,7 @@ uint64_t hashStringData(StringData str) {
* Maps the resource id to a human-readable string.
*/
static const char* ResourceTypeNames[] = {
- "Invalid", "Global", "MMAPV1Journal", "Database", "Collection", "Metadata", "Mutex"};
+ "Invalid", "Global", "Database", "Collection", "Metadata", "Mutex"};
// Ensure we do not add new types without updating the names array
MONGO_STATIC_ASSERT((sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) ==
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index a8d45d5275e..906e345de8d 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -163,9 +163,6 @@ enum ResourceType {
/** Used for mode changes or global exclusive operations */
RESOURCE_GLOBAL,
- /** Necessary only for the MMAPv1 engine */
- RESOURCE_MMAPV1_FLUSH,
-
/** Generic resources, used for multi-granularity locking, together with RESOURCE_GLOBAL */
RESOURCE_DATABASE,
RESOURCE_COLLECTION,
@@ -200,7 +197,6 @@ public:
SINGLETON_INVALID = 0,
SINGLETON_PARALLEL_BATCH_WRITER_MODE,
SINGLETON_GLOBAL,
- SINGLETON_MMAPV1_FLUSH,
};
ResourceId() : _fullHash(0) {}
diff --git a/src/mongo/db/concurrency/lock_manager_test.cpp b/src/mongo/db/concurrency/lock_manager_test.cpp
index 4fcda52ae89..fb42beb4a1f 100644
--- a/src/mongo/db/concurrency/lock_manager_test.cpp
+++ b/src/mongo/db/concurrency/lock_manager_test.cpp
@@ -84,7 +84,7 @@ TEST(LockManager, Grant) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request;
@@ -103,7 +103,7 @@ TEST(LockManager, GrantMultipleNoConflict) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -136,9 +136,9 @@ TEST(LockManager, GrantMultipleFIFOOrder) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- std::unique_ptr<MMAPV1LockerImpl> locker[6];
+ std::unique_ptr<LockerImpl> locker[6];
for (int i = 0; i < 6; i++) {
- locker[i].reset(new MMAPV1LockerImpl());
+ locker[i].reset(new LockerImpl());
}
TrackingLockGrantNotification notify[6];
@@ -169,7 +169,7 @@ TEST(LockManager, GrantRecursive) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
@@ -197,7 +197,7 @@ TEST(LockManager, GrantRecursiveCompatibleConvertUp) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
@@ -225,7 +225,7 @@ TEST(LockManager, GrantRecursiveNonCompatibleConvertUp) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
@@ -253,7 +253,7 @@ TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
LockRequestCombo request(&locker);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
@@ -281,8 +281,8 @@ TEST(LockManager, Conflict) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -320,7 +320,7 @@ TEST(LockManager, MultipleConflict) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -353,10 +353,10 @@ TEST(LockManager, ConflictCancelWaiting) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
TrackingLockGrantNotification notify1;
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
TrackingLockGrantNotification notify2;
LockRequest request1;
@@ -386,7 +386,7 @@ TEST(LockManager, ConflictCancelMultipleWaiting) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[6];
@@ -419,8 +419,8 @@ TEST(LockManager, CancelWaitingConversionWeakModes) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -454,8 +454,8 @@ TEST(LockManager, CancelWaitingConversionStrongModes) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -489,8 +489,8 @@ TEST(LockManager, ConflictingConversion) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
LockRequestCombo request1(&locker1);
LockRequestCombo request2(&locker2);
@@ -524,7 +524,7 @@ TEST(LockManager, ConflictingConversionInTheMiddle) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
TrackingLockGrantNotification notify;
LockRequest request[3];
@@ -556,11 +556,11 @@ TEST(LockManager, ConvertUpgrade) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
LockRequestCombo request1(&locker1);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
LockRequestCombo request2(&locker2);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
@@ -577,11 +577,11 @@ TEST(LockManager, Downgrade) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
LockRequestCombo request1(&locker1);
ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
LockRequestCombo request2(&locker2);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
@@ -602,14 +602,14 @@ static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConfl
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl lockerExisting;
+ LockerImpl lockerExisting;
TrackingLockGrantNotification notifyExisting;
LockRequest requestExisting;
requestExisting.initNew(&lockerExisting, &notifyExisting);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
- MMAPV1LockerImpl lockerNew;
+ LockerImpl lockerNew;
TrackingLockGrantNotification notifyNew;
LockRequest requestNew;
requestNew.initNew(&lockerNew, &notifyNew);
@@ -651,19 +651,19 @@ TEST(LockManager, EnqueueAtFront) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl lockerX;
+ LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
// The subsequent request will block
- MMAPV1LockerImpl lockerLow;
+ LockerImpl lockerLow;
LockRequestCombo requestLow(&lockerLow);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
// This is a "queue jumping request", which will go before locker 2 above
- MMAPV1LockerImpl lockerHi;
+ LockerImpl lockerHi;
LockRequestCombo requestHi(&lockerHi);
requestHi.enqueueAtFront = true;
@@ -689,14 +689,14 @@ TEST(LockManager, CompatibleFirstImmediateGrant) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
LockRequestCombo request1(&locker1);
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
LockRequestCombo request2(&locker2);
request2.compatibleFirst = true;
- MMAPV1LockerImpl locker3;
+ LockerImpl locker3;
LockRequestCombo request3(&locker3);
// Lock all in IS mode
@@ -705,14 +705,14 @@ TEST(LockManager, CompatibleFirstImmediateGrant) {
ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
// Now an exclusive mode comes, which would block
- MMAPV1LockerImpl lockerX;
+ LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
// If an S comes, it should be granted, because of request2
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -723,7 +723,7 @@ TEST(LockManager, CompatibleFirstImmediateGrant) {
// If S comes again, it should be granted, because of request2 still there
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -733,7 +733,7 @@ TEST(LockManager, CompatibleFirstImmediateGrant) {
ASSERT(lockMgr.unlock(&request2));
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -759,17 +759,17 @@ TEST(LockManager, CompatibleFirstGrantAlreadyQueued) {
for (LockMode writerMode : conflictingModes) {
for (UnblockMethod unblockMethod : unblockMethods) {
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
LockRequestCombo request1(&locker1);
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
LockRequestCombo request2(&locker2);
request2.compatibleFirst = true;
- MMAPV1LockerImpl locker3;
+ LockerImpl locker3;
LockRequestCombo request3(&locker3);
- MMAPV1LockerImpl locker4;
+ LockerImpl locker4;
LockRequestCombo request4(&locker4);
// Hold the lock in X and establish the S IX|X IS queue.
@@ -808,18 +808,18 @@ TEST(LockManager, CompatibleFirstDelayedGrant) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl lockerXInitial;
+ LockerImpl lockerXInitial;
LockRequestCombo requestXInitial(&lockerXInitial);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
- MMAPV1LockerImpl locker1;
+ LockerImpl locker1;
LockRequestCombo request1(&locker1);
- MMAPV1LockerImpl locker2;
+ LockerImpl locker2;
LockRequestCombo request2(&locker2);
request2.compatibleFirst = true;
- MMAPV1LockerImpl locker3;
+ LockerImpl locker3;
LockRequestCombo request3(&locker3);
// Lock all in IS mode (should block behind the global lock)
@@ -828,7 +828,7 @@ TEST(LockManager, CompatibleFirstDelayedGrant) {
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
// Now an exclusive mode comes, which would block behind the IS modes
- MMAPV1LockerImpl lockerX;
+ LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
@@ -840,7 +840,7 @@ TEST(LockManager, CompatibleFirstDelayedGrant) {
// If an S comes, it should be granted, because of request2
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -851,7 +851,7 @@ TEST(LockManager, CompatibleFirstDelayedGrant) {
// If S comes again, it should be granted, because of request2 still there
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -861,7 +861,7 @@ TEST(LockManager, CompatibleFirstDelayedGrant) {
ASSERT(lockMgr.unlock(&request2));
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -876,22 +876,22 @@ TEST(LockManager, CompatibleFirstCancelWaiting) {
LockManager lockMgr;
const ResourceId resId(RESOURCE_GLOBAL, 0);
- MMAPV1LockerImpl lockerSInitial;
+ LockerImpl lockerSInitial;
LockRequestCombo requestSInitial(&lockerSInitial);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
- MMAPV1LockerImpl lockerX;
+ LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
- MMAPV1LockerImpl lockerPending;
+ LockerImpl lockerPending;
LockRequestCombo requestPending(&lockerPending);
requestPending.compatibleFirst = true;
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
// S1 is not granted yet, so the policy should still be FIFO
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -901,7 +901,7 @@ TEST(LockManager, CompatibleFirstCancelWaiting) {
ASSERT(lockMgr.unlock(&requestPending));
{
- MMAPV1LockerImpl lockerS;
+ LockerImpl lockerS;
LockRequestCombo requestS(&lockerS);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
ASSERT(lockMgr.unlock(&requestS));
@@ -917,21 +917,21 @@ TEST(LockManager, Fairness) {
const ResourceId resId(RESOURCE_GLOBAL, 0);
// Start with some 'regular' intent locks
- MMAPV1LockerImpl lockerIS;
+ LockerImpl lockerIS;
LockRequestCombo requestIS(&lockerIS);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestIS, MODE_IS));
- MMAPV1LockerImpl lockerIX;
+ LockerImpl lockerIX;
LockRequestCombo requestIX(&lockerIX);
ASSERT(LOCK_OK == lockMgr.lock(resId, &requestIX, MODE_IX));
// Now a conflicting lock comes
- MMAPV1LockerImpl lockerX;
+ LockerImpl lockerX;
LockRequestCombo requestX(&lockerX);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
// Now, whoever comes next should be blocked
- MMAPV1LockerImpl lockerIX1;
+ LockerImpl lockerIX1;
LockRequestCombo requestIX1(&lockerIX1);
ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestIX1, MODE_IX));
diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h
index d7c70515fab..cfdf04550b5 100644
--- a/src/mongo/db/concurrency/lock_manager_test_help.h
+++ b/src/mongo/db/concurrency/lock_manager_test_help.h
@@ -33,7 +33,7 @@
namespace mongo {
-class LockerForTests : public LockerImpl<false> {
+class LockerForTests : public LockerImpl {
public:
explicit LockerForTests(LockMode globalLockMode) {
lockGlobal(globalLockMode);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 33e3d9b1a3e..5d649f06996 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -111,12 +111,6 @@ LockManager globalLockManager;
// once. See comments in the header file (begin/endTransaction) for more information.
const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
-// Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
-// to the shared view and remaps. See the comments in the header for information on how MMAP V1
-// concurrency control works.
-const ResourceId resourceIdMMAPV1Flush =
- ResourceId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
-
// How often (in millis) to check for deadlock if a lock has not been granted for some time
const Milliseconds DeadlockTimeout = Milliseconds(500);
@@ -128,12 +122,8 @@ PartitionedInstanceWideLockStats globalStats;
} // namespace
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::_shouldDelayUnlock(ResourceId resId, LockMode mode) const {
+bool LockerImpl::_shouldDelayUnlock(ResourceId resId, LockMode mode) const {
switch (resId.getType()) {
- // The flush lock must not participate in two-phase locking because we need to temporarily
- // yield it while blocked waiting to acquire other locks.
- case RESOURCE_MMAPV1_FLUSH:
case RESOURCE_MUTEX:
return false;
@@ -161,33 +151,27 @@ bool LockerImpl<IsForMMAPV1>::_shouldDelayUnlock(ResourceId resId, LockMode mode
}
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isW() const {
+bool LockerImpl::isW() const {
return getLockMode(resourceIdGlobal) == MODE_X;
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isR() const {
+bool LockerImpl::isR() const {
return getLockMode(resourceIdGlobal) == MODE_S;
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isLocked() const {
+bool LockerImpl::isLocked() const {
return getLockMode(resourceIdGlobal) != MODE_NONE;
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
+bool LockerImpl::isWriteLocked() const {
return isLockHeldForMode(resourceIdGlobal, MODE_IX);
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
+bool LockerImpl::isReadLocked() const {
return isLockHeldForMode(resourceIdGlobal, MODE_IS);
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::dump() const {
+void LockerImpl::dump() const {
StringBuilder ss;
ss << "Locker id " << _id << " status: ";
@@ -257,27 +241,22 @@ void Locker::setGlobalThrottling(class TicketHolder* reading, class TicketHolder
ticketHolders[MODE_IX] = writing;
}
-template <bool IsForMMAPV1>
-LockerImpl<IsForMMAPV1>::LockerImpl()
+LockerImpl::LockerImpl()
: _id(idCounter.addAndFetch(1)), _wuowNestingLevel(0), _threadId(stdx::this_thread::get_id()) {}
-template <bool IsForMMAPV1>
-stdx::thread::id LockerImpl<IsForMMAPV1>::getThreadId() const {
+stdx::thread::id LockerImpl::getThreadId() const {
return _threadId;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::updateThreadIdToCurrentThread() {
+void LockerImpl::updateThreadIdToCurrentThread() {
_threadId = stdx::this_thread::get_id();
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::unsetThreadId() {
+void LockerImpl::unsetThreadId() {
_threadId = stdx::thread::id(); // Reset to represent a non-executing thread.
}
-template <bool IsForMMAPV1>
-LockerImpl<IsForMMAPV1>::~LockerImpl() {
+LockerImpl::~LockerImpl() {
// Cannot delete the Locker while there are still outstanding requests, because the
// LockManager may attempt to access deleted memory. Besides it is probably incorrect
// to delete with unaccounted locks anyways.
@@ -290,8 +269,7 @@ LockerImpl<IsForMMAPV1>::~LockerImpl() {
_stats.reset();
}
-template <bool IsForMMAPV1>
-Locker::ClientState LockerImpl<IsForMMAPV1>::getClientState() const {
+Locker::ClientState LockerImpl::getClientState() const {
auto state = _clientState.load();
if (state == kActiveReader && hasLockPending())
state = kQueuedReader;
@@ -301,32 +279,23 @@ Locker::ClientState LockerImpl<IsForMMAPV1>::getClientState() const {
return state;
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lockGlobal(OperationContext* opCtx, LockMode mode) {
+LockResult LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode) {
LockResult result = _lockGlobalBegin(opCtx, mode, Date_t::max());
if (result == LOCK_WAITING) {
result = lockGlobalComplete(opCtx, Date_t::max());
}
- if (result == LOCK_OK) {
- lockMMAPV1Flush();
- }
-
return result;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::reacquireTicket(OperationContext* opCtx) {
+void LockerImpl::reacquireTicket(OperationContext* opCtx) {
invariant(_modeForTicket != MODE_NONE);
auto acquireTicketResult = _acquireTicket(opCtx, _modeForTicket, Date_t::max());
invariant(acquireTicketResult == LOCK_OK);
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::_acquireTicket(OperationContext* opCtx,
- LockMode mode,
- Date_t deadline) {
+LockResult LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline) {
const bool reader = isSharedLockMode(mode);
auto holder = shouldAcquireTicket() ? ticketHolders[mode] : nullptr;
if (holder) {
@@ -345,10 +314,7 @@ LockResult LockerImpl<IsForMMAPV1>::_acquireTicket(OperationContext* opCtx,
return LOCK_OK;
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::_lockGlobalBegin(OperationContext* opCtx,
- LockMode mode,
- Date_t deadline) {
+LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
dassert(isLocked() == (_modeForTicket != MODE_NONE));
if (_modeForTicket == MODE_NONE) {
auto acquireTicketResult = _acquireTicket(opCtx, mode, deadline);
@@ -376,52 +342,11 @@ LockResult LockerImpl<IsForMMAPV1>::_lockGlobalBegin(OperationContext* opCtx,
return result;
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
+LockResult LockerImpl::lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
return lockComplete(opCtx, resourceIdGlobal, getLockMode(resourceIdGlobal), deadline, false);
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
- if (!IsForMMAPV1)
- return;
-
- // The flush lock always has a reference count of 1, because it is dropped at the end of
- // each write unit of work in order to allow the flush thread to run. See the comments in
- // the header for information on how the MMAP V1 journaling system works.
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- if (globalLockRequest->recursiveCount == 1) {
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
-
- dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
-}
-
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
- invariant(!inAWriteUnitOfWork());
-
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- invariant(globalLockRequest->mode == MODE_X);
- invariant(globalLockRequest->recursiveCount == 1);
- invariant(_modeForTicket == MODE_X);
- // Note that this locker will not actually have a ticket (as MODE_X has no TicketHolder) or
- // acquire one now, but at most a single thread can be in this downgraded MODE_S situation,
- // so it's OK.
-
- // Making this call here will record lock downgrades as acquisitions, which is acceptable
- globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
- _stats.recordAcquisition(resourceIdGlobal, MODE_S);
-
- globalLockManager.downgrade(globalLockRequest, MODE_S);
-
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
-}
-
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::unlockGlobal() {
+bool LockerImpl::unlockGlobal() {
if (!unlock(resourceIdGlobal)) {
return false;
}
@@ -443,17 +368,11 @@ bool LockerImpl<IsForMMAPV1>::unlockGlobal() {
return true;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
- // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
- // don't allow partial changes to be written.
- dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
-
+void LockerImpl::beginWriteUnitOfWork() {
_wuowNestingLevel++;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
+void LockerImpl::endWriteUnitOfWork() {
invariant(_wuowNestingLevel > 0);
if (--_wuowNestingLevel > 0) {
@@ -475,16 +394,9 @@ void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
}
it.next();
}
-
- // For MMAP V1, we need to yield the flush lock so that the flush thread can run
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lock(
+LockResult LockerImpl::lock(
OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline, bool checkDeadlock) {
const LockResult result = lockBegin(opCtx, resId, mode);
@@ -500,14 +412,12 @@ LockResult LockerImpl<IsForMMAPV1>::lock(
return lockComplete(opCtx, resId, mode, deadline, checkDeadlock);
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
+void LockerImpl::downgrade(ResourceId resId, LockMode newMode) {
LockRequestsMap::Iterator it = _requests.find(resId);
globalLockManager.downgrade(it.objAddr(), newMode);
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
+bool LockerImpl::unlock(ResourceId resId) {
LockRequestsMap::Iterator it = _requests.find(resId);
if (inAWriteUnitOfWork() && _shouldDelayUnlock(it.key(), (it->mode))) {
if (!it->unlockPending) {
@@ -527,8 +437,7 @@ bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
return _unlockImpl(&it);
}
-template <bool IsForMMAPV1>
-LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
+LockMode LockerImpl::getLockMode(ResourceId resId) const {
scoped_spinlock scopedLock(_lock);
const LockRequestsMap::ConstIterator it = _requests.find(resId);
@@ -538,13 +447,11 @@ LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
return it->mode;
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
+bool LockerImpl::isLockHeldForMode(ResourceId resId, LockMode mode) const {
return isModeCovered(mode, getLockMode(resId));
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName, LockMode mode) const {
+bool LockerImpl::isDbLockedForMode(StringData dbName, LockMode mode) const {
invariant(nsIsDbOnly(dbName));
if (isW())
@@ -556,8 +463,7 @@ bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName, LockMode mode
return isLockHeldForMode(resIdDb, mode);
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns, LockMode mode) const {
+bool LockerImpl::isCollectionLockedForMode(StringData ns, LockMode mode) const {
invariant(nsIsFull(ns));
if (isW())
@@ -592,8 +498,7 @@ bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns, LockMode
return false;
}
-template <bool IsForMMAPV1>
-ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
+ResourceId LockerImpl::getWaitingResource() const {
scoped_spinlock scopedLock(_lock);
LockRequestsMap::ConstIterator it = _requests.begin();
@@ -609,8 +514,7 @@ ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
return ResourceId();
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
+void LockerImpl::getLockerInfo(LockerInfo* lockerInfo) const {
invariant(lockerInfo);
// Zero-out the contents
@@ -636,15 +540,13 @@ void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
lockerInfo->stats.append(_stats);
}
-template <bool IsForMMAPV1>
-boost::optional<Locker::LockerInfo> LockerImpl<IsForMMAPV1>::getLockerInfo() const {
+boost::optional<Locker::LockerInfo> LockerImpl::getLockerInfo() const {
Locker::LockerInfo lockerInfo;
getLockerInfo(&lockerInfo);
return std::move(lockerInfo);
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
+bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
// We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
invariant(!inAWriteUnitOfWork());
@@ -682,8 +584,7 @@ bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* state
continue;
// We should never have to save and restore metadata locks.
- invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
- RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
+ invariant(RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
(RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
// And, stuff the info into the out parameter.
@@ -703,9 +604,7 @@ bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* state
return true;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::restoreLockState(OperationContext* opCtx,
- const Locker::LockSnapshot& state) {
+void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) {
// We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
invariant(!inAWriteUnitOfWork());
invariant(_modeForTicket == MODE_NONE);
@@ -719,21 +618,12 @@ void LockerImpl<IsForMMAPV1>::restoreLockState(OperationContext* opCtx,
invariant(LOCK_OK == lockGlobal(opCtx, state.globalMode));
for (; it != state.locks.end(); it++) {
- // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
- // expected mode.
- if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
- invariant(it->mode == _getModeForMMAPV1FlushLock());
- } else {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- }
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
}
invariant(_modeForTicket != MODE_NONE);
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode) {
+LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) {
dassert(!getWaitingResource().isValid());
LockRequest* request;
@@ -769,7 +659,7 @@ LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
// Give priority to the full modes for global, parallel batch writer mode,
// and flush lock so we don't stall global operations such as shutdown or flush.
const ResourceType resType = resId.getType();
- if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
+ if (resType == RESOURCE_GLOBAL) {
if (mode == MODE_S || mode == MODE_X) {
request->enqueueAtFront = true;
request->compatibleFirst = true;
@@ -781,10 +671,6 @@ LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
invariant(itGlobal->recursiveCount > 0);
invariant(itGlobal->mode != MODE_NONE);
-
- // Check the MMAP V1 flush lock is held in the appropriate mode
- invariant(!IsForMMAPV1 ||
- isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
};
}
@@ -813,25 +699,8 @@ LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
return result;
}
-template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lockComplete(
+LockResult LockerImpl::lockComplete(
OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline, bool checkDeadlock) {
- // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
- // DB lock, while holding the flush lock, so it has to be released. This is only
- // correct to do if not in a write unit of work.
- const bool yieldFlushLock = IsForMMAPV1 && !inAWriteUnitOfWork() &&
- resId.getType() != RESOURCE_GLOBAL && resId.getType() != RESOURCE_MUTEX &&
- resId != resourceIdMMAPV1Flush;
- if (yieldFlushLock) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
- auto relockFlushLockGuard = MakeGuard([&] {
- if (yieldFlushLock) {
- // We cannot obey the timeout here, because it is not correct to return from the lock
- // request with the flush lock released.
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
- });
LockResult result;
Milliseconds timeout;
@@ -932,14 +801,12 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete(
return result;
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::releaseTicket() {
+void LockerImpl::releaseTicket() {
invariant(_modeForTicket != MODE_NONE);
_releaseTicket();
}
-template <bool IsForMMAPV1>
-void LockerImpl<IsForMMAPV1>::_releaseTicket() {
+void LockerImpl::_releaseTicket() {
auto holder = shouldAcquireTicket() ? ticketHolders[_modeForTicket] : nullptr;
if (holder) {
holder->release();
@@ -947,8 +814,7 @@ void LockerImpl<IsForMMAPV1>::_releaseTicket() {
_clientState.store(kInactive);
}
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator* it) {
+bool LockerImpl::_unlockImpl(LockRequestsMap::Iterator* it) {
if (globalLockManager.unlock(it->objAddr())) {
if (it->key() == resourceIdGlobal) {
invariant(_modeForTicket != MODE_NONE);
@@ -970,26 +836,7 @@ bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator* it) {
return false;
}
-template <bool IsForMMAPV1>
-LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
- invariant(IsForMMAPV1);
-
- LockMode mode = getLockMode(resourceIdGlobal);
- switch (mode) {
- case MODE_X:
- case MODE_IX:
- return MODE_IX;
- case MODE_S:
- case MODE_IS:
- return MODE_IS;
- default:
- MONGO_UNREACHABLE;
- return MODE_NONE;
- }
-}
-
-template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::isGlobalLockedRecursively() {
+bool LockerImpl::isGlobalLockedRecursively() {
auto globalLockRequest = _requests.find(resourceIdGlobal);
return !globalLockRequest.finished() && globalLockRequest->recursiveCount > 1;
}
@@ -998,77 +845,6 @@ bool LockerImpl<IsForMMAPV1>::isGlobalLockedRecursively() {
// Auto classes
//
-AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
- // Explicit yielding of the flush lock should happen only at global synchronization points
- // such as database drop. There should not be any active writes at these points.
- invariant(!_locker->inAWriteUnitOfWork());
-
- if (isMMAPV1()) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- }
-}
-
-AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
- if (isMMAPV1()) {
- invariant(LOCK_OK ==
- _locker->lock(resourceIdMMAPV1Flush, _locker->_getModeForMMAPV1FlushLock()));
- }
-}
-
-
-AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(locker), _released(false) {
- // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
- // involving operations which do not acquire and release the Oplog collection's X lock
- // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
- // check for deadlock and back-off if one is encountered.
- //
- // This exposes theoretical chance that we might starve the journaling system, but given
- // that these deadlocks happen extremely rarely and are usually due to incorrect locking
- // policy, and we have the deadlock counters as part of the locking statistics, this is a
- // reasonable handling.
- //
- // In the worst case, if we are to starve the journaling system, the server will shut down
- // due to too much uncommitted in-memory journal, but won't have corruption.
-
- while (true) {
- LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, Date_t::max(), true);
- if (result == LOCK_OK) {
- break;
- }
-
- invariant(result == LOCK_DEADLOCK);
-
- warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal "
- << "lock acquisition. See the previous messages for information on the "
- << "involved threads.";
- }
-}
-
-void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
- // This should not be able to deadlock, since we already hold the S journal lock, which
- // means all writers are kicked out. Readers always yield the journal lock if they block
- // waiting on any other lock.
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, Date_t::max(), false));
-
- // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
- // complain.
- invariant(!_locker->unlock(resourceIdMMAPV1Flush));
-}
-
-void AutoAcquireFlushLockForMMAPV1Commit::release() {
- if (!_released) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- _released = true;
- }
-}
-
-AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
- release();
-}
-
-
namespace {
/**
* Periodically purges unused lock buckets. The first time the lock is used again after
@@ -1105,12 +881,6 @@ void resetGlobalLockStats() {
globalStats.reset();
}
-
-// Ensures that there are two instances compiled for LockerImpl for the two values of the
-// template argument.
-template class LockerImpl<true>;
-template class LockerImpl<false>;
-
// Definition for the hardcoded localdb and oplog collection info
const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
const ResourceId resourceIdOplog = ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index c0e152d73b2..119d7f26da6 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -89,10 +89,7 @@ private:
*
* All instances reference a single global lock manager.
*
- * @param IsForMMAPV1 Whether to compile-in the flush lock functionality, which is specific to
- * the way the MMAP V1 (legacy) storag engine does commit concurrency control.
*/
-template <bool IsForMMAPV1>
class LockerImpl : public Locker {
public:
/**
@@ -144,9 +141,7 @@ public:
virtual LockResult lockGlobalComplete(Date_t deadline) {
return lockGlobalComplete(nullptr, deadline);
}
- virtual void lockMMAPV1Flush();
- virtual void downgradeGlobalXtoSForMMAPV1();
virtual bool unlockGlobal();
virtual void beginWriteUnitOfWork();
@@ -257,8 +252,6 @@ public:
}
private:
- friend class AutoYieldFlushLockForMMAPV1Commit;
-
typedef FastMapNoAlloc<ResourceId, LockRequest> LockRequestsMap;
/**
@@ -274,13 +267,6 @@ private:
bool _unlockImpl(LockRequestsMap::Iterator* it);
/**
- * MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
- * allow the flush thread proceed. This call returns in what mode the flush lock should be
- * acquired. It is based on the type of the operation (IS for readers, IX for writers).
- */
- LockMode _getModeForMMAPV1FlushLock() const;
-
- /**
* Whether we should use two phase locking. Returns true if the particular lock's release should
* be delayed until the end of the operation.
*
@@ -371,72 +357,6 @@ public:
}
};
-typedef LockerImpl<false> DefaultLockerImpl;
-typedef LockerImpl<true> MMAPV1LockerImpl;
-
-
-/**
- * At global synchronization points, such as drop database we are running under a global
- * exclusive lock and without an active write unit of work, doing changes which require global
- * commit. This utility allows the flush lock to be temporarily dropped so the flush thread
- * could run in such circumstances. Should not be used where write units of work are used,
- * because these have different mechanism of yielding the flush lock.
- */
-class AutoYieldFlushLockForMMAPV1Commit {
-public:
- AutoYieldFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoYieldFlushLockForMMAPV1Commit();
-
-private:
- MMAPV1LockerImpl* const _locker;
-};
-
-
-/**
- * This explains how the MMAP V1 durability system is implemented.
- *
- * Every server operation (OperationContext), must call Locker::lockGlobal as the first lock
- * action (it is illegal to acquire any other locks without calling this first). This action
- * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
- * for write operations). Having the flush lock in one of these modes indicates to the flush
- * thread that there is an active reader or writer.
- *
- * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
- *
- * Acquire the flush lock in S mode using AutoAcquireFlushLockForMMAPV1Commit. This waits until
- * all current write activity on the system completes and does not allow any new operations to
- * start.
- *
- * Once the S lock is granted, the flush thread writes the journal entries to disk (it is
- * guaranteed that there will not be any modifications) and applies them to the shared view.
- *
- * After that, it upgrades the S lock to X and remaps the private view.
- *
- * NOTE: There should be only one usage of this class and this should be in dur.cpp
- */
-class AutoAcquireFlushLockForMMAPV1Commit {
-public:
- AutoAcquireFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoAcquireFlushLockForMMAPV1Commit();
-
- /**
- * We need the exclusive lock in order to do the shared view remap.
- */
- void upgradeFlushLockToExclusive();
-
- /**
- * Allows the acquired flush lock to be prematurely released. This is helpful for the case
- * where we know that we won't be doing a remap after gathering the write intents, so the
- * rest can be done outside of flush lock.
- */
- void release();
-
-private:
- Locker* const _locker;
- bool _released;
-};
-
-
/**
* Retrieves the global lock manager instance.
*/
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 4aa0888f51e..0bd74aeb209 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -46,7 +46,7 @@ namespace mongo {
TEST(LockerImpl, LockNoConflict) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IX);
ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
@@ -64,7 +64,7 @@ TEST(LockerImpl, LockNoConflict) {
TEST(LockerImpl, ReLockNoConflict) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- MMAPV1LockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IX);
ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
@@ -82,11 +82,11 @@ TEST(LockerImpl, ReLockNoConflict) {
TEST(LockerImpl, ConflictWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker1;
+ LockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
- DefaultLockerImpl locker2;
+ LockerImpl locker2;
ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, Date_t::now()));
@@ -101,11 +101,11 @@ TEST(LockerImpl, ConflictWithTimeout) {
TEST(LockerImpl, ConflictUpgradeWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker1;
+ LockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
- DefaultLockerImpl locker2;
+ LockerImpl locker2;
ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
@@ -118,7 +118,7 @@ TEST(LockerImpl, ConflictUpgradeWithTimeout) {
TEST(LockerImpl, ReadTransaction) {
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IS);
locker.unlockGlobal();
@@ -133,12 +133,12 @@ TEST(LockerImpl, ReadTransaction) {
}
/**
- * Test that saveMMAPV1LockerImpl works by examining the output.
+ * Test that saveLockerImpl works by examining the output.
*/
TEST(LockerImpl, saveAndRestoreGlobal) {
Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ LockerImpl locker;
// No lock requests made, no locks held.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -165,7 +165,7 @@ TEST(LockerImpl, saveAndRestoreGlobal) {
TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ LockerImpl locker;
// No lock requests made, no locks held.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -186,12 +186,12 @@ TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
}
/**
- * Tests that restoreMMAPV1LockerImpl works by locking a db and collection and saving + restoring.
+ * Tests that restoreLockerImpl works by locking a db and collection and saving + restoring.
*/
TEST(LockerImpl, saveAndRestoreDBAndCollection) {
Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ LockerImpl locker;
const ResourceId resIdDatabase(RESOURCE_DATABASE, "TestDB"_sd);
const ResourceId resIdCollection(RESOURCE_COLLECTION, "TestDB.collection"_sd);
@@ -219,7 +219,7 @@ TEST(LockerImpl, saveAndRestoreDBAndCollection) {
TEST(LockerImpl, DefaultLocker) {
const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
@@ -234,32 +234,13 @@ TEST(LockerImpl, DefaultLocker) {
ASSERT(locker.unlockGlobal());
}
-TEST(LockerImpl, MMAPV1Locker) {
- const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
-
- MMAPV1LockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
-
- // Make sure the flush lock IS held
- Locker::LockerInfo info;
- locker.getLockerInfo(&info);
- ASSERT(!info.waitingResource.isValid());
- ASSERT_EQUALS(3U, info.locks.size());
- ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
- ASSERT_EQUALS(RESOURCE_MMAPV1_FLUSH, info.locks[1].resourceId.getType());
- ASSERT_EQUALS(resId, info.locks[2].resourceId);
-
- ASSERT(locker.unlockGlobal());
-}
-
TEST(LockerImpl, CanceledDeadlockUnblocks) {
const ResourceId db1(RESOURCE_DATABASE, "db1"_sd);
const ResourceId db2(RESOURCE_DATABASE, "db2"_sd);
- DefaultLockerImpl locker1;
- DefaultLockerImpl locker2;
- DefaultLockerImpl locker3;
+ LockerImpl locker1;
+ LockerImpl locker2;
+ LockerImpl locker3;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
ASSERT(LOCK_OK == locker1.lock(db1, MODE_S));
@@ -315,7 +296,7 @@ TEST(LockerImpl, SharedLocksShouldTwoPhaseLockIsTrue) {
const ResourceId resId3(RESOURCE_COLLECTION, "TestDB.collection3"_sd);
const ResourceId resId4(RESOURCE_COLLECTION, "TestDB.collection4"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.setSharedLocksShouldTwoPhaseLock(true);
ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IS));
@@ -363,7 +344,7 @@ TEST(LockerImpl, ModeIXAndXLockParticipatesInTwoPhaseLocking) {
const ResourceId resId3(RESOURCE_COLLECTION, "TestDB.collection3"_sd);
const ResourceId resId4(RESOURCE_COLLECTION, "TestDB.collection4"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
ASSERT_EQ(locker.getLockMode(globalResId), MODE_IX);
@@ -402,8 +383,8 @@ TEST(LockerImpl, OverrideLockRequestTimeout) {
const ResourceId resIdFirstDB(RESOURCE_DATABASE, "FirstDB"_sd);
const ResourceId resIdSecondDB(RESOURCE_DATABASE, "SecondDB"_sd);
- DefaultLockerImpl locker1;
- DefaultLockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
// Set up locker2 to override lock requests' provided timeout if greater than 1000 milliseconds.
locker2.setMaxLockTimeout(Milliseconds(1000));
@@ -437,8 +418,8 @@ TEST(LockerImpl, DoNotWaitForLockAcquisition) {
const ResourceId resIdFirstDB(RESOURCE_DATABASE, "FirstDB"_sd);
const ResourceId resIdSecondDB(RESOURCE_DATABASE, "SecondDB"_sd);
- DefaultLockerImpl locker1;
- DefaultLockerImpl locker2;
+ LockerImpl locker1;
+ LockerImpl locker2;
// Set up locker2 to immediately return if a lock is unavailable, regardless of supplied
// deadlines in the lock request.
@@ -491,7 +472,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportHeldLocks) {
const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Take an exclusive lock on the collection.
- DefaultLockerImpl locker;
+ LockerImpl locker;
ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
ASSERT_EQ(LOCK_OK, locker.lock(dbId, MODE_IX));
ASSERT_EQ(LOCK_OK, locker.lock(collectionId, MODE_X));
@@ -516,13 +497,13 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Take an exclusive lock on the collection.
- DefaultLockerImpl successfulLocker;
+ LockerImpl successfulLocker;
ASSERT_EQ(LOCK_OK, successfulLocker.lockGlobal(MODE_IX));
ASSERT_EQ(LOCK_OK, successfulLocker.lock(dbId, MODE_IX));
ASSERT_EQ(LOCK_OK, successfulLocker.lock(collectionId, MODE_X));
// Now attempt to get conflicting locks.
- DefaultLockerImpl conflictingLocker;
+ LockerImpl conflictingLocker;
ASSERT_EQ(LOCK_OK, conflictingLocker.lockGlobal(MODE_IS));
ASSERT_EQ(LOCK_OK, conflictingLocker.lock(dbId, MODE_IS));
ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(nullptr, collectionId, MODE_IS));
@@ -558,7 +539,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
TEST(LockerImpl, ReaquireLockPendingUnlock) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IS);
ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
@@ -586,7 +567,7 @@ TEST(LockerImpl, ReaquireLockPendingUnlock) {
TEST(LockerImpl, AcquireLockPendingUnlockWithCoveredMode) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IS);
ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
@@ -614,7 +595,7 @@ TEST(LockerImpl, AcquireLockPendingUnlockWithCoveredMode) {
TEST(LockerImpl, ConvertLockPendingUnlock) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IS);
ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_IX));
@@ -644,7 +625,7 @@ TEST(LockerImpl, ConvertLockPendingUnlock) {
TEST(LockerImpl, ConvertLockPendingUnlockAndUnlock) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- DefaultLockerImpl locker;
+ LockerImpl locker;
locker.lockGlobal(MODE_IS);
ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_IX));
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 816d35831f1..9523bc46e92 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -151,9 +151,8 @@ public:
* @param mode Mode in which the global lock should be acquired. Also indicates the intent
* of the operation.
*
- * @return LOCK_OK, if the global lock (and the flush lock, for the MMAP V1 engine) were
- * acquired within the specified time bound. Otherwise, the respective failure
- * code and neither lock will be acquired.
+ * @return LOCK_OK, if the global lock was acquired within the specified time bound. Otherwise,
+ * the failure code and no lock will be acquired.
*/
virtual LockResult lockGlobal(OperationContext* opCtx, LockMode mode) = 0;
virtual LockResult lockGlobal(LockMode mode) = 0;
@@ -161,10 +160,10 @@ public:
/**
* Requests the global lock to be acquired in the specified mode.
*
- * See the comments for lockBegin/Complete for more information on the semantics.
- * The deadline indicates the absolute time point when this lock acquisition will time out, if
- * not yet granted. The lockGlobalBegin
- * method has a deadline for use with the TicketHolder, if there is one.
+ * See the comments for lockBegin/Complete for more information on the semantics. The deadline
+ * indicates the absolute time point when this lock acquisition will time out, if not yet
+ * granted. The lockGlobalBegin method has a deadline for use with the TicketHolder, if there
+ * is one.
*/
virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) = 0;
virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) = 0;
@@ -177,12 +176,6 @@ public:
virtual LockResult lockGlobalComplete(Date_t deadline) = 0;
/**
- * This method is used only in the MMAP V1 storage engine, otherwise it is a no-op. See the
- * comments in the implementation for more details on how MMAP V1 journaling works.
- */
- virtual void lockMMAPV1Flush() = 0;
-
- /**
* Decrements the reference count on the global lock. If the reference count on the
* global lock hits zero, the transaction is over, and unlockGlobal unlocks all other locks
* except for RESOURCE_MUTEX locks.
@@ -196,16 +189,6 @@ public:
virtual bool unlockGlobal() = 0;
/**
- * This is only necessary for the MMAP V1 engine and in particular, the fsyncLock command
- * which needs to first acquire the global lock in X-mode for truncating the journal and
- * then downgrade to S before it blocks.
- *
- * The downgrade is necessary in order to be nice and not block readers while under
- * fsyncLock.
- */
- virtual void downgradeGlobalXtoSForMMAPV1() = 0;
-
- /**
* beginWriteUnitOfWork/endWriteUnitOfWork are called at the start and end of WriteUnitOfWorks.
* They can be used to implement two-phase locking. Each call to begin should be matched with an
* eventual call to end.
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 2c3176aeb75..51d8e3dd2f3 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -105,18 +105,10 @@ public:
MONGO_UNREACHABLE;
}
- virtual void lockMMAPV1Flush() {
- MONGO_UNREACHABLE;
- }
-
virtual bool unlockGlobal() {
MONGO_UNREACHABLE;
}
- virtual void downgradeGlobalXtoSForMMAPV1() {
- MONGO_UNREACHABLE;
- }
-
virtual void beginWriteUnitOfWork() {}
virtual void endWriteUnitOfWork() {}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 851175c1a8f..279dfd50f41 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -949,7 +949,7 @@ void shutdownTask() {
// For a Windows service, dbexit does not call exit(), so we must leak the lock outside
// of this function to prevent any operations from running that need a lock.
//
- DefaultLockerImpl* globalLocker = new DefaultLockerImpl();
+ LockerImpl* globalLocker = new LockerImpl();
LockResult result = globalLocker->lockGlobalBegin(MODE_X, Date_t::max());
if (result == LOCK_WAITING) {
result = globalLocker->lockGlobalComplete(Date_t::max());
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 27a24c011ec..2016050eb0d 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -141,15 +141,14 @@ void NoopWriter::stopWritingPeriodicNoops() {
}
void NoopWriter::_writeNoop(OperationContext* opCtx) {
- // Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
- // available. It may happen when the primary steps down and a shared global lock is acquired.
+ // Use GlobalLock instead of DBLock to allow return when the lock is not available. It may
+ // happen when the primary steps down and a shared global lock is acquired.
Lock::GlobalLock lock(
opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kLeaveUnlocked);
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
return;
}
- opCtx->lockState()->lockMMAPV1Flush();
auto replCoord = ReplicationCoordinator::get(opCtx);
// Its a proxy for being a primary
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 430f58a78e5..7af5b422e8a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1816,7 +1816,7 @@ TEST_F(StepDownTest,
// locker to test this, or otherwise stepDown will be granted the lock automatically.
Lock::GlobalWrite lk(opCtx.get());
ASSERT_TRUE(opCtx->lockState()->isW());
- auto locker = opCtx.get()->swapLockState(stdx::make_unique<DefaultLockerImpl>());
+ auto locker = opCtx.get()->swapLockState(stdx::make_unique<LockerImpl>());
Status status =
getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
diff --git a/src/mongo/db/session.cpp b/src/mongo/db/session.cpp
index 46cd4d4d798..9871262b989 100644
--- a/src/mongo/db/session.cpp
+++ b/src/mongo/db/session.cpp
@@ -629,7 +629,7 @@ Session::TxnResources::TxnResources(OperationContext* opCtx) {
_ruState = opCtx->getWriteUnitOfWork()->release();
opCtx->setWriteUnitOfWork(nullptr);
- _locker = opCtx->swapLockState(stdx::make_unique<DefaultLockerImpl>());
+ _locker = opCtx->swapLockState(stdx::make_unique<LockerImpl>());
_locker->releaseTicket();
_locker->unsetThreadId();
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index c22e6915e52..4805b8105ee 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -329,11 +329,7 @@ public:
if (!storageEngine) {
return;
}
- if (storageEngine->isMmapV1()) {
- opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
- } else {
- opCtx->setLockState(stdx::make_unique<DefaultLockerImpl>());
- }
+ opCtx->setLockState(stdx::make_unique<LockerImpl>());
opCtx->setRecoveryUnit(storageEngine->newRecoveryUnit(),
WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
}