summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2018-02-05 12:20:57 -0500
committerXiangyu Yao <xiangyu.yao@mongodb.com>2018-02-09 15:54:48 -0500
commit9b062114129533e1220dca19085b3a872702a28c (patch)
tree60ebc13409d020395a94bf256f6498ab9833dd79 /src/mongo/db
parentd95018ef806aa2195a66e804a8cd7da4c4249176 (diff)
downloadmongo-9b062114129533e1220dca19085b3a872702a28c.tar.gz
SERVER-32682 Change GlobalLock timeout type to Milliseconds
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp4
-rw-r--r--src/mongo/db/commands/fsync.cpp2
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/oplog_note.cpp2
-rw-r--r--src/mongo/db/commands/restart_catalog_command.cpp2
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp4
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp14
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h12
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp84
-rw-r--r--src/mongo/db/repl/do_txn.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp2
-rw-r--r--src/mongo/db/repl/noop_writer.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp2
19 files changed, 89 insertions, 89 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index ee1de8b7627..326c0b927d5 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -618,7 +618,7 @@ void updateUUIDSchemaVersion(OperationContext* opCtx, bool upgrade) {
std::vector<std::string> dbNames;
StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
{
- Lock::GlobalLock lk(opCtx, MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IS, Milliseconds::max());
storageEngine->listDatabases(&dbNames);
}
@@ -661,7 +661,7 @@ Status updateUUIDSchemaVersionNonReplicated(OperationContext* opCtx, bool upgrad
std::vector<std::string> dbNames;
StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
{
- Lock::GlobalLock lk(opCtx, MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IS, Milliseconds::max());
storageEngine->listDatabases(&dbNames);
}
for (auto it = dbNames.begin(); it != dbNames.end(); ++it) {
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index cd14a040ccd..f595f12465d 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -147,7 +147,7 @@ public:
}
// Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(opCtx, MODE_IS, UINT_MAX);
+ Lock::GlobalLock global(opCtx, MODE_IS, Milliseconds::max());
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
result.append("numFiles", storageEngine->flushAllFiles(opCtx, sync));
return true;
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index a2112a5eeaa..64686136371 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -114,7 +114,7 @@ public:
vector<string> dbNames;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
{
- Lock::GlobalLock lk(opCtx, MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IS, Milliseconds::max());
storageEngine->listDatabases(&dbNames);
}
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 408e73fe0d7..5467ecf6723 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -57,7 +57,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
// Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
// available. It may happen when the primary steps down and a shared global lock is
// acquired.
- Lock::GlobalLock lock(opCtx, MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx, MODE_IX, Milliseconds(1));
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp
index bca80a8b856..33872a9cd1c 100644
--- a/src/mongo/db/commands/restart_catalog_command.cpp
+++ b/src/mongo/db/commands/restart_catalog_command.cpp
@@ -84,7 +84,7 @@ public:
const std::string& db,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
- Lock::GlobalLock global(opCtx, MODE_X, UINT_MAX);
+ Lock::GlobalLock global(opCtx, MODE_X, Milliseconds::max());
log() << "Closing database catalog";
catalog::closeCatalog(opCtx);
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 6383df1c6ce..cba959b67a9 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -249,7 +249,7 @@ public:
// Otherwise createCollection may determine not to add a UUID before the FCV
// change, but then actually create the collection after the update below
// identifies all of the databases to update with UUIDs.
- Lock::GlobalLock lk(opCtx, MODE_S, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_S, Milliseconds::max());
}
// First put UUIDs in the storage layer metadata. UUIDs will be generated for
@@ -330,7 +330,7 @@ public:
// Otherwise createCollection may determine to add a UUID before the FCV change, but
// then actually create the collection after the update below identifies all of the
// databases from which to remove UUIDs.
- Lock::GlobalLock lk(opCtx, MODE_S, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_S, Milliseconds::max());
}
// Fail after updating the FCV document but before removing UUIDs.
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 97a0ce50a34..e17fc65f37c 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -76,7 +76,7 @@ public:
{ErrorCodes::CommandNotSupported, ""});
}
- Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IX, Milliseconds::max());
auto status = snapshotManager->prepareForCreateSnapshot(opCtx);
if (status.isOK()) {
@@ -124,7 +124,7 @@ public:
{ErrorCodes::CommandNotSupported, ""});
}
- Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IX, Milliseconds::max());
auto timestamp = Timestamp(cmdObj.firstElement().Long());
snapshotManager->setCommittedSnapshot(timestamp);
return true;
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 7ff9c8b861f..5eda7623996 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -136,14 +136,14 @@ bool Lock::ResourceMutex::isAtLeastReadLocked(Locker* locker) {
return locker->isLockHeldForMode(_rid, MODE_IS);
}
-Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs)
+Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, Milliseconds timeoutMs)
: GlobalLock(opCtx, lockMode, timeoutMs, EnqueueOnly()) {
waitForLock(timeoutMs);
}
Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
LockMode lockMode,
- unsigned timeoutMs,
+ Milliseconds timeoutMs,
EnqueueOnly enqueueOnly)
: _opCtx(opCtx),
_result(LOCK_INVALID),
@@ -161,17 +161,17 @@ Lock::GlobalLock::GlobalLock(GlobalLock&& otherLock)
otherLock._result = LOCK_INVALID;
}
-void Lock::GlobalLock::_enqueue(LockMode lockMode, unsigned timeoutMs) {
+void Lock::GlobalLock::_enqueue(LockMode lockMode, Milliseconds timeoutMs) {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(MODE_IS);
}
- _result = _opCtx->lockState()->lockGlobalBegin(lockMode, Milliseconds(timeoutMs));
+ _result = _opCtx->lockState()->lockGlobalBegin(lockMode, timeoutMs);
}
-void Lock::GlobalLock::waitForLock(unsigned timeoutMs) {
+void Lock::GlobalLock::waitForLock(Milliseconds timeoutMs) {
if (_result == LOCK_WAITING) {
- _result = _opCtx->lockState()->lockGlobalComplete(Milliseconds(timeoutMs));
+ _result = _opCtx->lockState()->lockGlobalComplete(timeoutMs);
}
if (_result != LOCK_OK && _opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
@@ -194,7 +194,7 @@ Lock::DBLock::DBLock(OperationContext* opCtx, StringData db, LockMode mode)
: _id(RESOURCE_DATABASE, db),
_opCtx(opCtx),
_mode(mode),
- _globalLock(opCtx, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+ _globalLock(opCtx, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, Milliseconds::max()) {
massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
// Need to acquire the flush lock
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index 3bcad53b749..0d942cc7705 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -181,7 +181,7 @@ public:
public:
class EnqueueOnly {};
- GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs);
+ GlobalLock(OperationContext* opCtx, LockMode lockMode, Milliseconds timeoutMs);
GlobalLock(GlobalLock&&);
/**
@@ -193,7 +193,7 @@ public:
*/
GlobalLock(OperationContext* opCtx,
LockMode lockMode,
- unsigned timeoutMs,
+ Milliseconds timeoutMs,
EnqueueOnly enqueueOnly);
~GlobalLock() {
@@ -209,14 +209,14 @@ public:
* Waits for lock to be granted. Sets that the global lock was taken on the
* GlobalLockAcquisitionTracker.
*/
- void waitForLock(unsigned timeoutMs);
+ void waitForLock(Milliseconds timeoutMs);
bool isLocked() const {
return _result == LOCK_OK;
}
private:
- void _enqueue(LockMode lockMode, unsigned timeoutMs);
+ void _enqueue(LockMode lockMode, Milliseconds timeoutMs);
void _unlock();
OperationContext* const _opCtx;
@@ -234,7 +234,7 @@ public:
*/
class GlobalWrite : public GlobalLock {
public:
- explicit GlobalWrite(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ explicit GlobalWrite(OperationContext* opCtx, Milliseconds timeoutMs = Milliseconds::max())
: GlobalLock(opCtx, MODE_X, timeoutMs) {
if (isLocked()) {
opCtx->lockState()->lockMMAPV1Flush();
@@ -251,7 +251,7 @@ public:
*/
class GlobalRead : public GlobalLock {
public:
- explicit GlobalRead(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ explicit GlobalRead(OperationContext* opCtx, Milliseconds timeoutMs = Milliseconds::max())
: GlobalLock(opCtx, MODE_S, timeoutMs) {
if (isLocked()) {
opCtx->lockState()->lockMMAPV1Flush();
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 2b62a387ce7..2d437ac5628 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -468,19 +468,19 @@ TEST_F(DConcurrencyTestFixture,
TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, Milliseconds(0));
ASSERT(globalWrite.isLocked());
- Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, Milliseconds(1));
ASSERT(!globalReadTry.isLocked());
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, Milliseconds(0));
ASSERT(globalWrite.isLocked());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, Milliseconds(1));
ASSERT(!globalWriteTry.isLocked());
}
@@ -490,7 +490,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockXSetsGlobalLockTakenOnOperationContext
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
- Lock::GlobalLock globalWrite(opCtx, MODE_X, 0);
+ Lock::GlobalLock globalWrite(opCtx, MODE_X, Milliseconds(0));
ASSERT(globalWrite.isLocked());
}
ASSERT_TRUE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -501,7 +501,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockIXSetsGlobalLockTakenOnOperationContex
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
- Lock::GlobalLock globalWrite(opCtx, MODE_IX, 0);
+ Lock::GlobalLock globalWrite(opCtx, MODE_IX, Milliseconds(0));
ASSERT(globalWrite.isLocked());
}
ASSERT_TRUE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -512,7 +512,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockSDoesNotSetGlobalLockTakenOnOperationC
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
- Lock::GlobalLock globalRead(opCtx, MODE_S, 0);
+ Lock::GlobalLock globalRead(opCtx, MODE_S, Milliseconds(0));
ASSERT(globalRead.isLocked());
}
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -523,7 +523,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockISDoesNotSetGlobalLockTakenOnOperation
auto opCtx = clients[0].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
- Lock::GlobalLock globalRead(opCtx, MODE_IS, 0);
+ Lock::GlobalLock globalRead(opCtx, MODE_IS, Milliseconds(0));
ASSERT(globalRead.isLocked());
}
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -551,13 +551,13 @@ TEST_F(DConcurrencyTestFixture, GlobalLockXDoesNotSetGlobalLockTakenWhenLockAcqu
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
// Take a global lock so that the next one times out.
- Lock::GlobalLock globalWrite0(clients[0].second.get(), MODE_X, 0);
+ Lock::GlobalLock globalWrite0(clients[0].second.get(), MODE_X, Milliseconds(0));
ASSERT(globalWrite0.isLocked());
auto opCtx = clients[1].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
{
- Lock::GlobalLock globalWrite1(opCtx, MODE_X, 1);
+ Lock::GlobalLock globalWrite1(opCtx, MODE_X, Milliseconds(1));
ASSERT_FALSE(globalWrite1.isLocked());
}
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalExclusiveLockTaken());
@@ -567,7 +567,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_NoTimeoutDueToGlobalLockS) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
Lock::GlobalRead globalRead(clients[0].second.get());
- Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, Milliseconds(1));
ASSERT(globalReadTry.isLocked());
}
@@ -576,7 +576,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
Lock::GlobalRead globalRead(clients[0].second.get());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, Milliseconds(1));
ASSERT(!globalWriteTry.isLocked());
}
@@ -585,7 +585,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
- Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, Milliseconds(1));
ASSERT(!globalReadTry.isLocked());
}
@@ -594,7 +594,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, Milliseconds(1));
ASSERT(!globalWriteTry.isLocked());
}
@@ -978,11 +978,11 @@ TEST_F(DConcurrencyTestFixture, Throttling) {
bool overlongWait;
int tries = 0;
const int maxTries = 15;
- const int timeoutMillis = 42;
+ const Milliseconds timeoutMillis = Milliseconds(42);
do {
// Test that throttling will correctly handle timeouts.
- Lock::GlobalRead R1(opctx1, 0);
+ Lock::GlobalRead R1(opctx1, Milliseconds(0));
ASSERT(R1.isLocked());
Date_t t1 = Date_t::now();
@@ -993,7 +993,7 @@ TEST_F(DConcurrencyTestFixture, Throttling) {
Date_t t2 = Date_t::now();
// Test that the timeout did result in at least the requested wait.
- ASSERT_GTE(t2 - t1, Milliseconds(timeoutMillis));
+ ASSERT_GTE(t2 - t1, timeoutMillis);
// Timeouts should be reasonably immediate. In maxTries attempts at least one test should be
// able to complete within a second, as the theoretical test duration is less than 50 ms.
@@ -1011,14 +1011,14 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
// Build a queue of MODE_S <- MODE_X <- MODE_IS, with MODE_S granted.
Lock::GlobalRead lockS(opctx1);
ASSERT(lockS.isLocked());
- Lock::GlobalLock lockX(opctx2, MODE_X, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock lockX(opctx2, MODE_X, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockX.isLocked());
// A MODE_IS should be granted due to compatibleFirst policy.
- Lock::GlobalLock lockIS(opctx3, MODE_IS, 0);
+ Lock::GlobalLock lockIS(opctx3, MODE_IS, Milliseconds(0));
ASSERT(lockIS.isLocked());
- lockX.waitForLock(0);
+ lockX.waitForLock(Milliseconds(0));
ASSERT(!lockX.isLocked());
}
@@ -1035,25 +1035,25 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
lockX.emplace(opctx1);
ASSERT(lockX->isLocked());
boost::optional<Lock::GlobalLock> lockS;
- lockS.emplace(opctx2, MODE_S, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ lockS.emplace(opctx2, MODE_S, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockS->isLocked());
- Lock::GlobalLock lockIX(opctx3, MODE_IX, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock lockIX(opctx3, MODE_IX, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockIX.isLocked());
- Lock::GlobalLock lockIS(opctx4, MODE_IS, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock lockIS(opctx4, MODE_IS, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockIS.isLocked());
// Now release the MODE_X and ensure that MODE_S will switch policy to compatibleFirst
lockX.reset();
- lockS->waitForLock(0);
+ lockS->waitForLock(Milliseconds(0));
ASSERT(lockS->isLocked());
ASSERT(!lockIX.isLocked());
- lockIS.waitForLock(0);
+ lockIS.waitForLock(Milliseconds(0));
ASSERT(lockIS.isLocked());
// Now release the MODE_S and ensure that MODE_IX gets locked.
lockS.reset();
- lockIX.waitForLock(0);
+ lockIX.waitForLock(Milliseconds(0));
ASSERT(lockIX.isLocked());
}
@@ -1072,32 +1072,32 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSXIXIS) {
ASSERT(lockXgranted->isLocked());
boost::optional<Lock::GlobalLock> lockX;
- lockX.emplace(opctx3, MODE_X, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ lockX.emplace(opctx3, MODE_X, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockX->isLocked());
// Now request MODE_S: it will be first in the pending list due to EnqueueAtFront policy.
boost::optional<Lock::GlobalLock> lockS;
- lockS.emplace(opctx2, MODE_S, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ lockS.emplace(opctx2, MODE_S, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockS->isLocked());
- Lock::GlobalLock lockIX(opctx4, MODE_IX, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock lockIX(opctx4, MODE_IX, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockIX.isLocked());
- Lock::GlobalLock lockIS(opctx5, MODE_IS, UINT_MAX, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock lockIS(opctx5, MODE_IS, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly());
ASSERT(!lockIS.isLocked());
// Now release the granted MODE_X and ensure that MODE_S will switch policy to compatibleFirst,
// not locking the MODE_X or MODE_IX, but instead granting the final MODE_IS.
lockXgranted.reset();
- lockS->waitForLock(0);
+ lockS->waitForLock(Milliseconds(0));
ASSERT(lockS->isLocked());
- lockX->waitForLock(0);
+ lockX->waitForLock(Milliseconds(0));
ASSERT(!lockX->isLocked());
- lockIX.waitForLock(0);
+ lockIX.waitForLock(Milliseconds(0));
ASSERT(!lockIX.isLocked());
- lockIS.waitForLock(0);
+ lockIS.waitForLock(Milliseconds(0));
ASSERT(lockIS.isLocked());
}
@@ -1131,7 +1131,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
OperationContext* opCtx = clientOpctxPairs[0].second.get();
for (int iters = 0; (t.micros() < endTime); iters++) {
busyWait(0, iters % 20);
- Lock::GlobalRead readLock(opCtx, iters % 2);
+ Lock::GlobalRead readLock(opCtx, Milliseconds(iters % 2));
if (!readLock.isLocked()) {
timeoutCount[0]++;
continue;
@@ -1163,27 +1163,27 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
auto interval = readOnlyInterval.load();
lock.emplace(opCtx,
iters % 20 ? MODE_IS : MODE_S,
- 0,
+ Milliseconds(0),
Lock::GlobalLock::EnqueueOnly());
// If thread 0 is holding the MODE_S lock while we tried to acquire a
// MODE_IS or MODE_S lock, the CompatibleFirst policy guarantees success.
auto newInterval = readOnlyInterval.load();
invariant(!interval || interval != newInterval || lock->isLocked());
- lock->waitForLock(0);
+ lock->waitForLock(Milliseconds(0));
break;
}
case 5:
busyWait(threadId, iters % 150);
- lock.emplace(opCtx, MODE_X, iters % 2);
+ lock.emplace(opCtx, MODE_X, Milliseconds(iters % 2));
busyWait(threadId, iters % 10);
break;
case 6:
- lock.emplace(opCtx, iters % 25 ? MODE_IX : MODE_S, iters % 2);
+ lock.emplace(opCtx, iters % 25 ? MODE_IX : MODE_S, Milliseconds(iters % 2));
busyWait(threadId, iters % 100);
break;
case 7:
busyWait(threadId, iters % 100);
- lock.emplace(opCtx, iters % 20 ? MODE_IS : MODE_X, 0);
+ lock.emplace(opCtx, iters % 20 ? MODE_IS : MODE_X, Milliseconds(0));
break;
default:
MONGO_UNREACHABLE;
@@ -1294,12 +1294,12 @@ TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonSnapshot) {
OperationContext::RecoveryUnitState::kActiveUnitOfWork);
{
- Lock::GlobalLock gw1(opCtx, MODE_IS, 0);
+ Lock::GlobalLock gw1(opCtx, MODE_IS, Milliseconds(0));
ASSERT(gw1.isLocked());
ASSERT(recovUnitBorrowed->activeTransaction);
{
- Lock::GlobalLock gw2(opCtx, MODE_S, 0);
+ Lock::GlobalLock gw2(opCtx, MODE_S, Milliseconds(0));
ASSERT(gw2.isLocked());
ASSERT(recovUnitBorrowed->activeTransaction);
}
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
index fd27dfc4bef..333fbd4ce8a 100644
--- a/src/mongo/db/repl/do_txn.cpp
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -304,7 +304,7 @@ Status doTxn(OperationContext* opCtx,
auto hasPrecondition = _hasPrecondition(doTxnCmd);
// Acquire global lock in IX mode so that the replication state check will remain valid.
- Lock::GlobalLock globalLock(opCtx, MODE_IX, UINT_MAX);
+ Lock::GlobalLock globalLock(opCtx, MODE_IX, Milliseconds::max());
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
bool userInitiatedWritesAndNotPrimary =
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 8f771add152..0d1fe5e4847 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -1301,7 +1301,7 @@ static void replMasterThread() {
OperationContext& opCtx = *opCtxPtr;
AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization();
- Lock::GlobalWrite globalWrite(&opCtx, 1);
+ Lock::GlobalWrite globalWrite(&opCtx, Milliseconds(1));
if (globalWrite.isLocked()) {
toSleep = 10;
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 508e3bb7eee..98335441278 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -142,7 +142,7 @@ void NoopWriter::stopWritingPeriodicNoops() {
void NoopWriter::_writeNoop(OperationContext* opCtx) {
// Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
// available. It may happen when the primary steps down and a shared global lock is acquired.
- Lock::GlobalLock lock(opCtx, MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx, MODE_IX, Milliseconds(1));
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
return;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 18485cc0338..3a0a725bdd3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1588,14 +1588,14 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
}
auto globalLock = stdx::make_unique<Lock::GlobalLock>(
- opCtx, MODE_X, durationCount<Milliseconds>(stepdownTime), Lock::GlobalLock::EnqueueOnly());
+ opCtx, MODE_X, stepdownTime, Lock::GlobalLock::EnqueueOnly());
// We've requested the global exclusive lock which will stop new operations from coming in,
// but existing operations could take a long time to finish, so kill all user operations
// to help us get the global lock faster.
_externalState->killAllUserOperations(opCtx);
- globalLock->waitForLock(durationCount<Milliseconds>(stepdownTime));
+ globalLock->waitForLock(stepdownTime);
if (!globalLock->isLocked()) {
return {ErrorCodes::ExceededTimeLimit,
"Could not acquire the global shared lock within the amount of time "
@@ -1695,7 +1695,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// failed stepdown attempt, we might as well spend whatever time we need to acquire
// it
// now.
- globalLock.reset(new Lock::GlobalLock(opCtx, MODE_X, UINT_MAX));
+ globalLock.reset(new Lock::GlobalLock(opCtx, MODE_X, Milliseconds::max()));
invariant(globalLock->isLocked());
lk.lock();
});
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 66094a548f2..5b6ef9f0712 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -2261,7 +2261,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoesNotNeedToCatchUp) {
ASSERT_EQ(1, countLogLinesContaining("Caught up to the latest optime known via heartbeats"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2284,7 +2284,7 @@ TEST_F(PrimaryCatchUpTest, CatchupSucceeds) {
ASSERT_EQUALS(1, countLogLinesContaining("Caught up to the latest known optime successfully"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2304,7 +2304,7 @@ TEST_F(PrimaryCatchUpTest, CatchupTimeout) {
ASSERT_EQUALS(1, countLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2329,7 +2329,7 @@ TEST_F(PrimaryCatchUpTest, CannotSeeAllNodes) {
ASSERT_EQ(1, countLogLinesContaining("Caught up to the latest optime known via heartbeats"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2354,7 +2354,7 @@ TEST_F(PrimaryCatchUpTest, HeartbeatTimeout) {
ASSERT_EQ(1, countLogLinesContaining("Caught up to the latest optime known via heartbeats"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2377,7 +2377,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownBeforeHeartbeatRefreshing) {
ASSERT_EQUALS(0, countLogLinesContaining("Caught up to the latest"));
ASSERT_EQUALS(0, countLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2406,7 +2406,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) {
ASSERT_EQUALS(0, countLogLinesContaining("Caught up to the latest"));
ASSERT_EQUALS(0, countLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2449,11 +2449,11 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) {
ASSERT(replCoord->getApplierState() == ApplierState::Draining);
auto opCtx = makeOperationContext();
{
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT(replCoord->getApplierState() == ApplierState::Stopped);
ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2517,7 +2517,7 @@ TEST_F(PrimaryCatchUpTest, FreshestNodeBecomesAvailableLater) {
ASSERT_EQ(1, countLogLinesContaining("Caught up to the latest"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2562,7 +2562,7 @@ TEST_F(PrimaryCatchUpTest, InfiniteTimeoutAndAbort) {
ASSERT_EQUALS(0, countLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -2576,7 +2576,7 @@ TEST_F(PrimaryCatchUpTest, ZeroTimeout) {
ASSERT_EQUALS(1, countLogLinesContaining("Skipping primary catchup"));
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 45b5c7cf466..541554b7448 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -393,9 +393,9 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
auto opCtx = cc().makeOperationContext();
Lock::GlobalLock globalExclusiveLock{
- opCtx.get(), MODE_X, UINT_MAX, Lock::GlobalLock::EnqueueOnly()};
+ opCtx.get(), MODE_X, Milliseconds::max(), Lock::GlobalLock::EnqueueOnly()};
_externalState->killAllUserOperations(opCtx.get());
- globalExclusiveLock.waitForLock(UINT_MAX);
+ globalExclusiveLock.waitForLock(Milliseconds::max());
invariant(globalExclusiveLock.isLocked());
stdx::unique_lock<stdx::mutex> lk(_mutex);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 42955b0a207..7f5447186fd 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -2118,7 +2118,7 @@ TEST_F(StepDownTest, InterruptingStepDownCommandRestoresWriteAvailability) {
// This is the important check, that we didn't accidentally step back up when aborting the
// stepdown command attempt.
const auto opCtx = makeOperationContext();
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
}
@@ -2174,7 +2174,7 @@ TEST_F(StepDownTest, InterruptingAfterUnconditionalStepdownDoesNotRestoreWriteAv
// This is the important check, that we didn't accidentally step back up when aborting the
// stepdown command attempt.
- Lock::GlobalLock lock(opCtx.get(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, Milliseconds::max());
ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 63dfe97b02b..3d1dc4ee667 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -198,7 +198,7 @@ Status RollbackImpl::_awaitBgIndexCompletion(OperationContext* opCtx) {
StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
std::vector<std::string> dbs;
{
- Lock::GlobalLock lk(opCtx, MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IS, Milliseconds::max());
storageEngine->listDatabases(&dbs);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
index 1aaee94527d..28a6b23748e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
@@ -57,7 +57,7 @@ bool WiredTigerServerStatusSection::includeByDefault() const {
BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
- Lock::GlobalLock lk(opCtx, LockMode::MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, LockMode::MODE_IS, Milliseconds::max());
// The session does not open a transaction here as one is not needed and opening one would
// mean that execution could become blocked when a new transaction cannot be allocated