diff options
42 files changed, 166 insertions, 198 deletions
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp index 6ad0d0b2231..d8fb071c6c6 100644 --- a/src/mongo/client/replica_set_monitor.cpp +++ b/src/mongo/client/replica_set_monitor.cpp @@ -133,7 +133,7 @@ protected: // using it. if (!inShutdown() && !StaticObserver::_destroyingStatics) { stdx::unique_lock<stdx::mutex> sl(_monitorMutex); - _stopRequestedCV.wait_for(sl, stdx::chrono::seconds(10)); + _stopRequestedCV.wait_for(sl, Seconds(10)); } while (!inShutdown() && !StaticObserver::_destroyingStatics) { @@ -157,7 +157,7 @@ protected: break; } - _stopRequestedCV.wait_for(sl, stdx::chrono::seconds(10)); + _stopRequestedCV.wait_for(sl, Seconds(10)); } } diff --git a/src/mongo/db/collection_index_usage_tracker_test.cpp b/src/mongo/db/collection_index_usage_tracker_test.cpp index 5ce0988452c..b82583d5daf 100644 --- a/src/mongo/db/collection_index_usage_tracker_test.cpp +++ b/src/mongo/db/collection_index_usage_tracker_test.cpp @@ -141,7 +141,7 @@ TEST_F(CollectionIndexUsageTrackerTest, DateTimeAfterDeregister) { ASSERT(statsMap.find("foo") == statsMap.end()); // Increment clock source so that a new index registration has different start time. - getClockSource()->advance(stdx::chrono::milliseconds(1)); + getClockSource()->advance(Milliseconds(1)); getTracker()->registerIndex("foo", BSON("foo" << 1)); statsMap = getTracker()->getUsageStats(); diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp index a4fbd5cca12..893184fb14f 100644 --- a/src/mongo/db/query/query_yield.cpp +++ b/src/mongo/db/query/query_yield.cpp @@ -78,7 +78,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn, const BSONObj& data = customWait.getData(); BSONElement customWaitNS = data["namespace"]; if (!customWaitNS || planExecNS == customWaitNS.str()) { - sleepFor(stdx::chrono::milliseconds(data["waitForMillis"].numberInt())); + sleepFor(Milliseconds(data["waitForMillis"].numberInt())); } } diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp index 22172c0bf35..2d9e9c04bd5 100644 --- a/src/mongo/db/range_deleter.cpp +++ b/src/mongo/db/range_deleter.cpp @@ -83,8 +83,8 @@ static void logCursorsWaiting(RangeDeleteEntry* entry) { // We always log the first cursors waiting message (so we have cursor ids in the logs). // After 15 minutes (the cursor timeout period), we start logging additional messages at // a 1 minute interval. - static const auto kLogCursorsThreshold = stdx::chrono::minutes{15}; - static const auto kLogCursorsInterval = stdx::chrono::minutes{1}; + static const auto kLogCursorsThreshold = Minutes{15}; + static const auto kLogCursorsInterval = Minutes{1}; Date_t currentTime = jsTime(); Milliseconds elapsedMillisSinceQueued{0}; @@ -423,8 +423,7 @@ void RangeDeleter::doWork() { { stdx::unique_lock<stdx::mutex> sl(_queueMutex); while (_taskQueue.empty()) { - _taskQueueNotEmptyCV.wait_for(sl, - stdx::chrono::milliseconds(kNotEmptyTimeoutMillis)); + _taskQueueNotEmptyCV.wait_for(sl, Milliseconds(kNotEmptyTimeoutMillis)); if (stopRequested()) { log() << "stopping range deleter worker" << endl; diff --git a/src/mongo/db/repl/read_concern_response.cpp b/src/mongo/db/repl/read_concern_response.cpp index 0b4ffdac470..2bcf68653c0 100644 --- a/src/mongo/db/repl/read_concern_response.cpp +++ b/src/mongo/db/repl/read_concern_response.cpp @@ -42,16 +42,14 @@ namespace repl { const string ReadConcernResponse::kWaitedMSFieldName("waitedMS"); ReadConcernResponse::ReadConcernResponse(Status status) - : ReadConcernResponse(status, stdx::chrono::milliseconds(0), false) {} + : ReadConcernResponse(status, Milliseconds(0), false) {} ReadConcernResponse::ReadConcernResponse() : ReadConcernResponse(Status::OK()) {} -ReadConcernResponse::ReadConcernResponse(Status status, stdx::chrono::milliseconds duration) +ReadConcernResponse::ReadConcernResponse(Status status, Milliseconds duration) : ReadConcernResponse(status, duration, true) {} -ReadConcernResponse::ReadConcernResponse(Status status, - stdx::chrono::milliseconds duration, - bool waited) +ReadConcernResponse::ReadConcernResponse(Status status, Milliseconds duration, bool waited) : _waited(waited), _duration(duration), _status(status) {} void ReadConcernResponse::appendInfo(BSONObjBuilder* builder) { @@ -66,7 +64,7 @@ bool ReadConcernResponse::didWait() const { return _waited; } -stdx::chrono::milliseconds ReadConcernResponse::getDuration() const { +Milliseconds ReadConcernResponse::getDuration() const { return _duration; } diff --git a/src/mongo/db/repl/read_concern_response.h b/src/mongo/db/repl/read_concern_response.h index 38773b7e1ac..e16fd61bd56 100644 --- a/src/mongo/db/repl/read_concern_response.h +++ b/src/mongo/db/repl/read_concern_response.h @@ -31,7 +31,7 @@ #include <string> #include "mongo/base/status.h" -#include "mongo/stdx/chrono.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -56,7 +56,7 @@ public: /** * Constructs a response with wait set to true along with the given parameters. */ - ReadConcernResponse(Status status, stdx::chrono::milliseconds duration); + ReadConcernResponse(Status status, Milliseconds duration); /** * Appends to the builder the timeout and duration info if didWait() is true. @@ -70,7 +70,7 @@ public: * Returns the duration waited for the ReadConcern to be satisfied. * Returns 0 if didWait is false. */ - stdx::chrono::milliseconds getDuration() const; + Milliseconds getDuration() const; /** * Returns more details about an error if it occurred. @@ -78,10 +78,10 @@ public: Status getStatus() const; private: - ReadConcernResponse(Status status, stdx::chrono::milliseconds duration, bool waited); + ReadConcernResponse(Status status, Milliseconds duration, bool waited); bool _waited; - stdx::chrono::milliseconds _duration = stdx::chrono::milliseconds(0); + Milliseconds _duration = Milliseconds(0); Status _status; }; diff --git a/src/mongo/db/repl/read_concern_response_test.cpp b/src/mongo/db/repl/read_concern_response_test.cpp index 4035e47af2b..5cc05ef6868 100644 --- a/src/mongo/db/repl/read_concern_response_test.cpp +++ b/src/mongo/db/repl/read_concern_response_test.cpp @@ -61,8 +61,7 @@ TEST(ReadAfterResponse, WithStatus) { } TEST(ReadAfterResponse, WaitedWithDuration) { - ReadConcernResponse response(Status(ErrorCodes::InternalError, "test"), - stdx::chrono::milliseconds(7)); + ReadConcernResponse response(Status(ErrorCodes::InternalError, "test"), Milliseconds(7)); ASSERT_TRUE(response.didWait()); ASSERT_EQUALS(Milliseconds(7), response.getDuration()); diff --git a/src/mongo/rpc/metadata/sharding_metadata_test.cpp b/src/mongo/rpc/metadata/sharding_metadata_test.cpp index cf76750ab94..90050f6781d 100644 --- a/src/mongo/rpc/metadata/sharding_metadata_test.cpp +++ b/src/mongo/rpc/metadata/sharding_metadata_test.cpp @@ -46,7 +46,7 @@ ShardingMetadata checkParse(const BSONObj& metadata) { } const auto kElectionId = OID{"541b1a00e8a23afa832b218e"}; -const auto kLastOpTime = repl::OpTime(Timestamp(stdx::chrono::seconds{1337}, 800u), 4); +const auto kLastOpTime = repl::OpTime(Timestamp(Seconds{1337}, 800u), 4); TEST(ShardingMetadata, ReadFromMetadata) { { diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h index f3fe5ca2f21..72cfdc51b8a 100644 --- a/src/mongo/s/catalog/catalog_manager.h +++ b/src/mongo/s/catalog/catalog_manager.h @@ -446,7 +446,7 @@ public: OperationContext* txn, StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor = DistLockManager::kSingleLockAttemptTimeout) = 0; + Milliseconds waitFor = DistLockManager::kSingleLockAttemptTimeout) = 0; protected: CatalogManager() = default; diff --git a/src/mongo/s/catalog/catalog_manager_mock.cpp b/src/mongo/s/catalog/catalog_manager_mock.cpp index 49f5e3213cb..5e09bc1a7b7 100644 --- a/src/mongo/s/catalog/catalog_manager_mock.cpp +++ b/src/mongo/s/catalog/catalog_manager_mock.cpp @@ -144,11 +144,10 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerMock::getAllS return {ErrorCodes::InternalError, "Method not implemented"}; } -StatusWith<DistLockManager::ScopedDistLock> CatalogManagerMock::distLock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor) { +StatusWith<DistLockManager::ScopedDistLock> CatalogManagerMock::distLock(OperationContext* txn, + StringData name, + StringData whyMessage, + Milliseconds waitFor) { return {ErrorCodes::InternalError, "Method not implemented"}; } diff --git a/src/mongo/s/catalog/catalog_manager_mock.h b/src/mongo/s/catalog/catalog_manager_mock.h index 48a5ba066d8..51a43f7a1b5 100644 --- a/src/mongo/s/catalog/catalog_manager_mock.h +++ b/src/mongo/s/catalog/catalog_manager_mock.h @@ -156,11 +156,10 @@ public: DistLockManager* getDistLockManager() override; - StatusWith<DistLockManager::ScopedDistLock> distLock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor) override; + StatusWith<DistLockManager::ScopedDistLock> distLock(OperationContext* txn, + StringData name, + StringData whyMessage, + Milliseconds waitFor) override; Status initConfigVersion(OperationContext* txn) override; diff --git a/src/mongo/s/catalog/dist_lock_manager.cpp b/src/mongo/s/catalog/dist_lock_manager.cpp index 7c59ea07f0c..732e2fc664a 100644 --- a/src/mongo/s/catalog/dist_lock_manager.cpp +++ b/src/mongo/s/catalog/dist_lock_manager.cpp @@ -36,9 +36,9 @@ namespace mongo { -const stdx::chrono::seconds DistLockManager::kDefaultLockTimeout(20); -const stdx::chrono::milliseconds DistLockManager::kSingleLockAttemptTimeout(0); -const stdx::chrono::milliseconds DistLockManager::kDefaultLockRetryInterval(500); +const Seconds DistLockManager::kDefaultLockTimeout(20); +const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0); +const Milliseconds DistLockManager::kDefaultLockRetryInterval(500); DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* txn, DistLockHandle lockHandle, diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/s/catalog/dist_lock_manager.h index 3b60c4a09bd..f97b186e2de 100644 --- a/src/mongo/s/catalog/dist_lock_manager.h +++ b/src/mongo/s/catalog/dist_lock_manager.h @@ -62,15 +62,15 @@ class StatusWith; class DistLockManager { public: // Default timeout which will be used if one is not passed to the lock method. - static const stdx::chrono::seconds kDefaultLockTimeout; + static const Seconds kDefaultLockTimeout; // Timeout value, which specifies that if the lock is not available immediately, no attempt // should be made to wait for it to become free. - static const stdx::chrono::milliseconds kSingleLockAttemptTimeout; + static const Milliseconds kSingleLockAttemptTimeout; // If timeout is passed to the lock call, what is the default frequency with which the lock will // be checked for availability. - static const stdx::chrono::milliseconds kDefaultLockRetryInterval; + static const Milliseconds kDefaultLockRetryInterval; /** * RAII type for distributed lock. Not meant to be shared across multiple threads. @@ -134,8 +134,8 @@ public: OperationContext* txn, StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor = kDefaultLockTimeout, - stdx::chrono::milliseconds lockTryInterval = kDefaultLockRetryInterval) = 0; + Milliseconds waitFor = kDefaultLockTimeout, + Milliseconds lockTryInterval = kDefaultLockRetryInterval) = 0; /** * Same behavior as lock(...) above, except takes a specific lock session ID "lockSessionID" @@ -150,8 +150,8 @@ public: StringData name, StringData whyMessage, const OID lockSessionID, - stdx::chrono::milliseconds waitFor = kDefaultLockTimeout, - stdx::chrono::milliseconds lockTryInterval = kDefaultLockRetryInterval) = 0; + Milliseconds waitFor = kDefaultLockTimeout, + Milliseconds lockTryInterval = kDefaultLockRetryInterval) = 0; /** * Makes a best-effort attempt to unlock all locks owned by the given processID. diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp index 28ac22cb411..7c2c58e83ed 100644 --- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp +++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp @@ -35,18 +35,17 @@ #include <algorithm> #include "mongo/util/mongoutils/str.h" +#include "mongo/util/time_support.h" #include "mongo/unittest/unittest.h" namespace mongo { -using stdx::chrono::milliseconds; - namespace { void NoLockFuncSet(StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { FAIL(str::stream() << "Lock not expected to be called. " << "Name: " << name << ", whyMessage: " << whyMessage << ", waitFor: " << waitFor << ", lockTryInterval: " << lockTryInterval); @@ -71,8 +70,8 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManagerMock::lock( OperationContext* txn, StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { return lockWithSessionID( txn, name, whyMessage, DistLockHandle::gen(), waitFor, lockTryInterval); } @@ -82,8 +81,8 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManagerMock::lockWithSession StringData name, StringData whyMessage, const OID lockSessionID, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { _lockChecker(name, whyMessage, waitFor, lockTryInterval); _lockChecker = NoLockFuncSet; diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.h b/src/mongo/s/catalog/dist_lock_manager_mock.h index 21ddef99f93..5d32acda6b3 100644 --- a/src/mongo/s/catalog/dist_lock_manager_mock.h +++ b/src/mongo/s/catalog/dist_lock_manager_mock.h @@ -47,27 +47,26 @@ public: virtual std::string getProcessID() override; - virtual StatusWith<DistLockManager::ScopedDistLock> lock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) override; + virtual StatusWith<DistLockManager::ScopedDistLock> lock(OperationContext* txn, + StringData name, + StringData whyMessage, + Milliseconds waitFor, + Milliseconds lockTryInterval) override; virtual StatusWith<DistLockManager::ScopedDistLock> lockWithSessionID( OperationContext* txn, StringData name, StringData whyMessage, const OID lockSessionID, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) override; + Milliseconds waitFor, + Milliseconds lockTryInterval) override; virtual void unlockAll(OperationContext* txn, const std::string& processID) override; using LockFunc = stdx::function<void(StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval)>; + Milliseconds waitFor, + Milliseconds lockTryInterval)>; void expectLock(LockFunc checkerFunc, Status lockStatus); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp index 73d85a7e5db..9025a22b001 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp @@ -760,10 +760,7 @@ Status CatalogManagerReplicaSet::_log(OperationContext* txn, } StatusWith<DistLockManager::ScopedDistLock> CatalogManagerReplicaSet::distLock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor) { + OperationContext* txn, StringData name, StringData whyMessage, Milliseconds waitFor) { return getDistLockManager()->lock(txn, name, whyMessage, waitFor); } @@ -1114,10 +1111,10 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam LOG(1) << "dropCollection " << ns << " started"; // Lock the collection globally so that split/migrate cannot run - stdx::chrono::seconds waitFor(DistLockManager::kDefaultLockTimeout); + Seconds waitFor(DistLockManager::kDefaultLockTimeout); MONGO_FAIL_POINT_BLOCK(setDropCollDistLockWait, customWait) { const BSONObj& data = customWait.getData(); - waitFor = stdx::chrono::seconds(data["waitForSecs"].numberInt()); + waitFor = Seconds(data["waitForSecs"].numberInt()); } auto scopedDistLock = getDistLockManager()->lock(txn, ns.ns(), "drop", waitFor); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h index a3cbf6f9227..b7251dc2a37 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h @@ -88,11 +88,10 @@ public: const std::string& ns, const BSONObj& detail) override; - StatusWith<DistLockManager::ScopedDistLock> distLock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor) override; + StatusWith<DistLockManager::ScopedDistLock> distLock(OperationContext* txn, + StringData name, + StringData whyMessage, + Milliseconds waitFor) override; Status shardCollection(OperationContext* txn, const std::string& ns, diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp index bf1b8e9d5a7..6542a14a5e6 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp @@ -39,6 +39,7 @@ #include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h" #include "mongo/stdx/future.h" #include "mongo/util/log.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -46,7 +47,6 @@ namespace { using executor::NetworkInterfaceMock; using executor::RemoteCommandRequest; using executor::RemoteCommandResponse; -using stdx::chrono::milliseconds; using CatalogManagerReplSetAppendDbStatsTest = CatalogManagerReplSetTestFixture; diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp index abdb370f180..51bf688cf74 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp @@ -44,7 +44,7 @@ #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/write_ops/batched_update_request.h" -#include "mongo/stdx/chrono.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -53,7 +53,6 @@ using executor::RemoteCommandRequest; using executor::RemoteCommandResponse; using std::string; using std::vector; -using stdx::chrono::milliseconds; using unittest::assertGet; class DropColl2ShardTest : public CatalogManagerReplSetTestFixture { @@ -65,7 +64,7 @@ public: configTargeter()->setFindHostReturnValue(_configHost); distLock()->expectLock( - [this](StringData name, StringData whyMessage, milliseconds, milliseconds) { + [this](StringData name, StringData whyMessage, Milliseconds, Milliseconds) { ASSERT_EQUALS(_dropNS.ns(), name); ASSERT_EQUALS("drop", whyMessage); }, @@ -263,7 +262,7 @@ TEST_F(DropColl2ShardTest, ConfigTargeterError) { } TEST_F(DropColl2ShardTest, DistLockBusy) { - distLock()->expectLock([](StringData, StringData, milliseconds, milliseconds) {}, + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, {ErrorCodes::LockBusy, "test lock taken"}); auto future = launchAsync([this] { diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp index ee82dce9bd8..ed2a1b7e868 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp @@ -55,7 +55,7 @@ using executor::TaskExecutor; using stdx::async; using unittest::assertGet; -const stdx::chrono::seconds kFutureTimeout{5}; +const Seconds kFutureTimeout{5}; const HostAndPort configHost{"TestHost1"}; class InfoLoggingTest : public CatalogManagerReplSetTestFixture { diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp index eed5dfb3c98..c359b3f2874 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp @@ -62,7 +62,7 @@ using std::string; using std::vector; using unittest::assertGet; -static const stdx::chrono::seconds kFutureTimeout{5}; +static const Seconds kFutureTimeout{5}; const BSONObj kReplSecondaryOkMetadata{[] { BSONObjBuilder o; diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp index 8fc2cf1f984..46a8d19ece5 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp @@ -56,9 +56,9 @@ #include "mongo/s/shard_key_pattern.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/chrono.h" #include "mongo/stdx/future.h" #include "mongo/util/log.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -70,7 +70,6 @@ using executor::TaskExecutor; using std::set; using std::string; using std::vector; -using stdx::chrono::milliseconds; using unittest::assertGet; const BSONObj kReplMetadata(); @@ -225,8 +224,8 @@ TEST_F(ShardCollectionTest, distLockFails) { distLock()->expectLock( [](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS("test.foo", name); ASSERT_EQUALS("shardCollection", whyMessage); }, @@ -258,8 +257,8 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) { distLock()->expectLock( [&](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS(ns, name); ASSERT_EQUALS("shardCollection", whyMessage); }, @@ -317,8 +316,8 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) { distLock()->expectLock( [&](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS(ns, name); ASSERT_EQUALS("shardCollection", whyMessage); }, @@ -487,8 +486,8 @@ TEST_F(ShardCollectionTest, withInitialChunks) { distLock()->expectLock( [&](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS(ns, name); ASSERT_EQUALS("shardCollection", whyMessage); }, @@ -642,8 +641,8 @@ TEST_F(ShardCollectionTest, withInitialData) { distLock()->expectLock( [&](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS(ns, name); ASSERT_EQUALS("shardCollection", whyMessage); }, diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp index 8f36b7908e5..8ed21d44952 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp @@ -55,9 +55,9 @@ #include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_insert_request.h" #include "mongo/s/write_ops/batched_update_request.h" -#include "mongo/stdx/chrono.h" #include "mongo/stdx/future.h" #include "mongo/util/log.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -70,7 +70,6 @@ using rpc::ReplSetMetadata; using repl::OpTime; using std::string; using std::vector; -using stdx::chrono::milliseconds; using unittest::assertGet; using CatalogManagerReplSetTest = CatalogManagerReplSetTestFixture; @@ -706,8 +705,8 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandRewriteWriteConce distLock()->expectLock( [](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS("authorizationData", name); ASSERT_EQUALS("dropUser", whyMessage); }, @@ -1540,8 +1539,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseSuccess) { distLock()->expectLock([dbname](StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) {}, + Milliseconds waitFor, + Milliseconds lockTryInterval) {}, Status::OK()); @@ -1639,8 +1638,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDistLockHeld) { distLock()->expectLock( [dbname](StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { ASSERT_EQUALS(dbname, name); ASSERT_EQUALS("createDatabase", whyMessage); }, @@ -1658,8 +1657,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExists) { distLock()->expectLock([dbname](StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) {}, + Milliseconds waitFor, + Milliseconds lockTryInterval) {}, Status::OK()); @@ -1697,8 +1696,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExistsDifferentCase) { distLock()->expectLock([dbname](StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) {}, + Milliseconds waitFor, + Milliseconds lockTryInterval) {}, Status::OK()); @@ -1735,8 +1734,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseNoShards) { distLock()->expectLock([dbname](StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) {}, + Milliseconds waitFor, + Milliseconds lockTryInterval) {}, Status::OK()); @@ -1827,8 +1826,8 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDuplicateKeyOnInsert) { distLock()->expectLock([dbname](StringData name, StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) {}, + Milliseconds waitFor, + Milliseconds lockTryInterval) {}, Status::OK()); @@ -1934,15 +1933,10 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) { shardRegistry()->getShard(operationContext(), "shard0")->getTargeter()); shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12")); - distLock()->expectLock( - [](StringData name, - StringData whyMessage, - stdx::chrono::milliseconds, - stdx::chrono::milliseconds) { - ASSERT_EQ("test", name); - ASSERT_FALSE(whyMessage.empty()); - }, - Status::OK()); + distLock()->expectLock([](StringData name, StringData whyMessage, Milliseconds, Milliseconds) { + ASSERT_EQ("test", name); + ASSERT_FALSE(whyMessage.empty()); + }, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); @@ -2022,9 +2016,8 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) { TEST_F(CatalogManagerReplSetTest, EnableShardingLockBusy) { configTargeter()->setFindHostReturnValue(HostAndPort("config:123")); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - {ErrorCodes::LockBusy, "lock taken"}); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, + {ErrorCodes::LockBusy, "lock taken"}); auto status = catalogManager()->enableSharding(operationContext(), "test"); ASSERT_EQ(ErrorCodes::LockBusy, status.code()); @@ -2040,9 +2033,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsWithDifferentCase) { setupShards(vector<ShardType>{shard}); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - Status::OK()); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); @@ -2069,9 +2060,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExists) { setupShards(vector<ShardType>{shard}); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - Status::OK()); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); @@ -2127,9 +2116,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingFailsWhenTheDatabaseIsAlreadySha setupShards(vector<ShardType>{shard}); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - Status::OK()); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); @@ -2155,9 +2142,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsInvalidFormat) { setupShards(vector<ShardType>{shard}); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - Status::OK()); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); @@ -2177,9 +2162,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsInvalidFormat) { TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExistsNoShards) { configTargeter()->setFindHostReturnValue(HostAndPort("config:123")); - distLock()->expectLock( - [](StringData, StringData, stdx::chrono::milliseconds, stdx::chrono::milliseconds) {}, - Status::OK()); + distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK()); auto future = launchAsync([this] { auto status = catalogManager()->enableSharding(operationContext(), "test"); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp index bccc5e3cce5..27554868a9b 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp @@ -54,7 +54,6 @@ #include "mongo/s/grid.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/chrono.h" #include "mongo/stdx/future.h" #include "mongo/stdx/memory.h" #include "mongo/util/log.h" @@ -69,7 +68,6 @@ using executor::TaskExecutor; using std::set; using std::string; using std::vector; -using stdx::chrono::milliseconds; using unittest::assertGet; using InsertRetryTest = CatalogManagerReplSetTestFixture; diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp index 3bae6ce8a09..35c9be42c9b 100644 --- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp +++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp @@ -74,7 +74,7 @@ using repl::ReadConcernArgs; namespace { const HostAndPort dummyHost("dummy", 123); -static const stdx::chrono::seconds kFutureTimeout{5}; +static const Seconds kFutureTimeout{5}; /** * Sets up the mocked out objects for testing the replica-set backed catalog manager. diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp index 31fb999cdbf..a7b693fe5c4 100644 --- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp +++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp @@ -48,6 +48,7 @@ #include "mongo/util/log.h" #include "mongo/util/mongoutils/str.h" #include "mongo/util/timer.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -55,8 +56,6 @@ MONGO_FP_DECLARE(setDistLockTimeout); using std::string; using std::unique_ptr; -using stdx::chrono::milliseconds; -using stdx::chrono::duration_cast; namespace { @@ -71,8 +70,8 @@ const Minutes ReplSetDistLockManager::kDistLockExpirationTime{15}; ReplSetDistLockManager::ReplSetDistLockManager(ServiceContext* globalContext, StringData processID, unique_ptr<DistLockCatalog> catalog, - milliseconds pingInterval, - milliseconds lockExpiration) + Milliseconds pingInterval, + Milliseconds lockExpiration) : _serviceContext(globalContext), _processID(processID.toString()), _catalog(std::move(catalog)), @@ -133,7 +132,7 @@ void ReplSetDistLockManager::doTask() { warning() << "pinging failed for distributed lock pinger" << causedBy(pingStatus); } - const milliseconds elapsed(elapsedSincelastPing.millis()); + const Milliseconds elapsed(elapsedSincelastPing.millis()); if (elapsed > 10 * _pingInterval) { warning() << "Lock pinger for proc: " << _processID << " was inactive for " << elapsed << " ms"; @@ -171,7 +170,7 @@ void ReplSetDistLockManager::doTask() { StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn, LocksType lockDoc, - const milliseconds& lockExpiration) { + const Milliseconds& lockExpiration) { const auto& processID = lockDoc.getProcess(); auto pingStatus = _catalog->getPing(txn, processID); @@ -199,7 +198,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn, // Be conservative when determining that lock expiration has elapsed by // taking into account the roundtrip delay of trying to get the local // time from the config server. - milliseconds delay(timer.millis() / 2); // Assuming symmetrical delay. + Milliseconds delay(timer.millis() / 2); // Assuming symmetrical delay. const auto& serverInfo = serverInfoStatus.getValue(); @@ -249,7 +248,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn, return false; } - milliseconds elapsedSinceLastPing(configServerLocalTime - pingInfo->configLocalTime); + Milliseconds elapsedSinceLastPing(configServerLocalTime - pingInfo->configLocalTime); if (elapsedSinceLastPing >= lockExpiration) { LOG(0) << "forcing lock '" << lockDoc.getName() << "' because elapsed time " << elapsedSinceLastPing << " >= takeover time " << lockExpiration; @@ -266,8 +265,8 @@ StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lock( OperationContext* txn, StringData name, StringData whyMessage, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { return lockWithSessionID(txn, name, whyMessage, OID::gen(), waitFor, lockTryInterval); } @@ -276,8 +275,8 @@ StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lockWithSess StringData name, StringData whyMessage, const OID lockSessionID, - milliseconds waitFor, - milliseconds lockTryInterval) { + Milliseconds waitFor, + Milliseconds lockTryInterval) { Timer timer(_serviceContext->getTickSource()); Timer msgTimer(_serviceContext->getTickSource()); @@ -291,13 +290,13 @@ StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lockWithSess // the lock is currently taken, we will back off and try the acquisition again, repeating this // until the lockTryInterval has been reached. If a network error occurs at each lock // acquisition attempt, the lock acquisition will be retried immediately. - while (waitFor <= milliseconds::zero() || milliseconds(timer.millis()) < waitFor) { + while (waitFor <= Milliseconds::zero() || Milliseconds(timer.millis()) < waitFor) { const string who = str::stream() << _processID << ":" << getThreadName(); auto lockExpiration = _lockExpiration; MONGO_FAIL_POINT_BLOCK(setDistLockTimeout, customTimeout) { const BSONObj& data = customTimeout.getData(); - lockExpiration = stdx::chrono::milliseconds(data["timeoutMs"].numberInt()); + lockExpiration = Milliseconds(data["timeoutMs"].numberInt()); } LOG(1) << "trying to acquire new distributed lock for " << name @@ -399,7 +398,7 @@ StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lockWithSess LOG(1) << "distributed lock '" << name << "' was not acquired."; - if (waitFor == milliseconds::zero()) { + if (waitFor == Milliseconds::zero()) { break; } @@ -415,8 +414,8 @@ StatusWith<DistLockManager::ScopedDistLock> ReplSetDistLockManager::lockWithSess // busy, so reset the retries counter) networkErrorRetries = 0; - const milliseconds timeRemaining = - std::max(milliseconds::zero(), waitFor - milliseconds(timer.millis())); + const Milliseconds timeRemaining = + std::max(Milliseconds::zero(), waitFor - Milliseconds(timer.millis())); sleepFor(std::min(lockTryInterval, timeRemaining)); } diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset/replset_dist_lock_manager.h index 2f876f8e693..635b2ab343d 100644 --- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.h +++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.h @@ -58,8 +58,8 @@ public: ReplSetDistLockManager(ServiceContext* globalContext, StringData processID, std::unique_ptr<DistLockCatalog> catalog, - stdx::chrono::milliseconds pingInterval, - stdx::chrono::milliseconds lockExpiration); + Milliseconds pingInterval, + Milliseconds lockExpiration); virtual ~ReplSetDistLockManager(); @@ -68,20 +68,18 @@ public: virtual std::string getProcessID() override; - virtual StatusWith<DistLockManager::ScopedDistLock> lock( - OperationContext* txn, - StringData name, - StringData whyMessage, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) override; - - virtual StatusWith<ScopedDistLock> lockWithSessionID( - OperationContext* txn, - StringData name, - StringData whyMessage, - const OID lockSessionID, - stdx::chrono::milliseconds waitFor, - stdx::chrono::milliseconds lockTryInterval) override; + virtual StatusWith<DistLockManager::ScopedDistLock> lock(OperationContext* txn, + StringData name, + StringData whyMessage, + Milliseconds waitFor, + Milliseconds lockTryInterval) override; + + virtual StatusWith<ScopedDistLock> lockWithSessionID(OperationContext* txn, + StringData name, + StringData whyMessage, + const OID lockSessionID, + Milliseconds waitFor, + Milliseconds lockTryInterval) override; virtual void unlockAll(OperationContext* txn, const std::string& processID) override; @@ -112,7 +110,7 @@ private: */ StatusWith<bool> isLockExpired(OperationContext* txn, const LocksType lockDoc, - const stdx::chrono::milliseconds& lockExpiration); + const Milliseconds& lockExpiration); // // All member variables are labeled with one of the following codes indicating the @@ -126,10 +124,10 @@ private: ServiceContext* const _serviceContext; // (F) - const std::string _processID; // (I) - const std::unique_ptr<DistLockCatalog> _catalog; // (I) - const stdx::chrono::milliseconds _pingInterval; // (I) - const stdx::chrono::milliseconds _lockExpiration; // (I) + const std::string _processID; // (I) + const std::unique_ptr<DistLockCatalog> _catalog; // (I) + const Milliseconds _pingInterval; // (I) + const Milliseconds _lockExpiration; // (I) stdx::mutex _mutex; std::unique_ptr<stdx::thread> _execThread; // (S) diff --git a/src/mongo/s/client/sharding_network_connection_hook.cpp b/src/mongo/s/client/sharding_network_connection_hook.cpp index 6970de929c3..10b687c2e20 100644 --- a/src/mongo/s/client/sharding_network_connection_hook.cpp +++ b/src/mongo/s/client/sharding_network_connection_hook.cpp @@ -126,7 +126,7 @@ ShardingNetworkConnectionHook::makeRequest(const HostAndPort& remoteHost) { executor::RemoteCommandRequest request; request.dbname = "admin"; request.target = remoteHost; - request.timeout = stdx::chrono::seconds{30}; + request.timeout = Seconds{30}; request.cmdObj = ssv.toBSON(); return {request}; diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index c8b2611da29..5cc29ea502f 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -67,7 +67,7 @@ namespace { AtomicUInt32 JOB_NUMBER; -const stdx::chrono::milliseconds kNoDistLockTimeout(-1); +const Milliseconds kNoDistLockTimeout(-1); /** * Generates a unique name for the temporary M/R output collection. diff --git a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp index 714076365c9..cdc7f2f0972 100644 --- a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp +++ b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp @@ -44,7 +44,7 @@ namespace { // Period of time after which mortal cursors are killed for inactivity. Configurable with server // parameter "cursorTimeoutMillis". std::atomic<long long> cursorTimeoutMillis( // NOLINT - durationCount<stdx::chrono::milliseconds>(stdx::chrono::minutes(10))); + durationCount<Milliseconds>(Minutes(10))); ExportedServerParameter<long long, ServerParameterType::kStartupAndRuntime> cursorTimeoutMillisConfig(ServerParameterSet::getGlobal(), @@ -65,10 +65,9 @@ void ClusterCursorCleanupJob::run() { invariant(manager); while (!inShutdown()) { - manager->killMortalCursorsInactiveSince(Date_t::now() - - stdx::chrono::milliseconds(cursorTimeoutMillis)); + manager->killMortalCursorsInactiveSince(Date_t::now() - Milliseconds(cursorTimeoutMillis)); manager->reapZombieCursors(); - sleepFor(stdx::chrono::seconds(4)); + sleepFor(Seconds(4)); } } diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp index 8d8efb5d54a..1c32aa33ec2 100644 --- a/src/mongo/s/query/cluster_cursor_manager_test.cpp +++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp @@ -245,7 +245,7 @@ TEST_F(ClusterCursorManagerTest, CheckOutCursorUpdateActiveTime) { ClusterCursorManager::CursorType::NamespaceNotSharded, ClusterCursorManager::CursorLifetime::Mortal)); Date_t cursorRegistrationTime = getClockSource()->now(); - getClockSource()->advance(stdx::chrono::milliseconds(1)); + getClockSource()->advance(Milliseconds(1)); auto checkedOutCursor = getManager()->checkOutCursor(nss, cursorId); ASSERT_OK(checkedOutCursor.getStatus()); checkedOutCursor.getValue().returnCursor(ClusterCursorManager::CursorState::NotExhausted); @@ -340,7 +340,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) { // Test that killing all mortal expired cursors does not kill a cursor that is unexpired. TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) { Date_t timeBeforeCursorCreation = getClockSource()->now(); - getClockSource()->advance(stdx::chrono::milliseconds(1)); + getClockSource()->advance(Milliseconds(1)); getManager()->registerCursor(allocateMockCursor(), nss, ClusterCursorManager::CursorType::NamespaceNotSharded, @@ -377,7 +377,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors) nss, ClusterCursorManager::CursorType::NamespaceNotSharded, ClusterCursorManager::CursorLifetime::Mortal); - getClockSource()->advance(stdx::chrono::milliseconds(1)); + getClockSource()->advance(Milliseconds(1)); } getManager()->killMortalCursorsInactiveSince(cutoff); for (size_t i = 0; i < numCursors; ++i) { diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp index acbb5b7d13e..d2287936993 100644 --- a/src/mongo/s/sharding_test_fixture.cpp +++ b/src/mongo/s/sharding_test_fixture.cpp @@ -83,7 +83,7 @@ ShardingTestFixture::ShardingTestFixture() = default; ShardingTestFixture::~ShardingTestFixture() = default; -const stdx::chrono::seconds ShardingTestFixture::kFutureTimeout{5}; +const Seconds ShardingTestFixture::kFutureTimeout{5}; void ShardingTestFixture::setUp() { _service = stdx::make_unique<ServiceContextNoop>(); diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_test_fixture.h index 85284e48f44..6758cd345fe 100644 --- a/src/mongo/s/sharding_test_fixture.h +++ b/src/mongo/s/sharding_test_fixture.h @@ -67,7 +67,7 @@ public: ~ShardingTestFixture(); protected: - static const stdx::chrono::seconds kFutureTimeout; + static const Seconds kFutureTimeout; template <typename Lambda> executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync( diff --git a/src/mongo/scripting/mozjs/PosixNSPR.cpp b/src/mongo/scripting/mozjs/PosixNSPR.cpp index b6c36d760d3..9800990ac04 100644 --- a/src/mongo/scripting/mozjs/PosixNSPR.cpp +++ b/src/mongo/scripting/mozjs/PosixNSPR.cpp @@ -28,6 +28,7 @@ #include "mongo/stdx/thread.h" #include "mongo/util/concurrency/thread_name.h" #include "mongo/util/concurrency/threadlocal.h" +#include "mongo/util/time_support.h" class nspr::Thread { mongo::stdx::thread thread_; @@ -275,7 +276,7 @@ PRStatus PR_WaitCondVar(PRCondVar* cvar, uint32_t timeout) { mongo::stdx::unique_lock<mongo::stdx::mutex> lk(cvar->lock()->mutex(), mongo::stdx::adopt_lock_t()); - cvar->cond().wait_for(lk, mongo::stdx::chrono::microseconds(timeout)); + cvar->cond().wait_for(lk, mongo::Microseconds(timeout)); lk.release(); return PR_SUCCESS; diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp index c7bf42d6fd4..11eab0e06d1 100644 --- a/src/mongo/util/background.cpp +++ b/src/mongo/util/background.cpp @@ -200,7 +200,7 @@ Status BackgroundJob::cancel() { bool BackgroundJob::wait(unsigned msTimeOut) { verify(!_selfDelete); // you cannot call wait on a self-deleting job - const auto deadline = stdx::chrono::system_clock::now() + stdx::chrono::milliseconds(msTimeOut); + const auto deadline = stdx::chrono::system_clock::now() + Milliseconds(msTimeOut); stdx::unique_lock<stdx::mutex> l(_status->mutex); while (_status->state != Done) { if (msTimeOut) { @@ -304,7 +304,7 @@ Status PeriodicTaskRunner::stop(int gracePeriodMillis) { void PeriodicTaskRunner::run() { // Use a shorter cycle time in debug mode to help catch race conditions. - const stdx::chrono::seconds waitTime(kDebugBuild ? 5 : 60); + const Seconds waitTime(kDebugBuild ? 5 : 60); stdx::unique_lock<stdx::mutex> lock(_mutex); while (!_shutdownRequested) { diff --git a/src/mongo/util/clock_source_mock.cpp b/src/mongo/util/clock_source_mock.cpp index a85b3045a0f..f2257bf2604 100644 --- a/src/mongo/util/clock_source_mock.cpp +++ b/src/mongo/util/clock_source_mock.cpp @@ -41,7 +41,7 @@ Date_t ClockSourceMock::now() { return _now; } -void ClockSourceMock::advance(stdx::chrono::milliseconds ms) { +void ClockSourceMock::advance(Milliseconds ms) { _now += ms; } diff --git a/src/mongo/util/clock_source_mock.h b/src/mongo/util/clock_source_mock.h index 2ee372da48c..83af9cabb28 100644 --- a/src/mongo/util/clock_source_mock.h +++ b/src/mongo/util/clock_source_mock.h @@ -49,7 +49,7 @@ public: /** * Advances the current time by the given value. */ - void advance(stdx::chrono::milliseconds ms); + void advance(Milliseconds ms); /** * Resets the current time to the given value. diff --git a/src/mongo/util/duration.h b/src/mongo/util/duration.h index ccde7fd57a3..ba4f15ec44e 100644 --- a/src/mongo/util/duration.h +++ b/src/mongo/util/duration.h @@ -27,6 +27,7 @@ #pragma once +#include <chrono> #include <cstdint> #include <iosfwd> #include <limits> @@ -215,6 +216,10 @@ public: "precision ones"); } + constexpr operator stdx::chrono::duration<int64_t, period>() const { + return stdx::chrono::duration<int64_t, period>{_count}; + } + /** * Returns the number of periods represented by this duration. */ diff --git a/src/mongo/util/net/ssl_expiration.cpp b/src/mongo/util/net/ssl_expiration.cpp index 110568e272f..98a8b51d8ad 100644 --- a/src/mongo/util/net/ssl_expiration.cpp +++ b/src/mongo/util/net/ssl_expiration.cpp @@ -36,7 +36,7 @@ namespace mongo { -static const auto oneDay = stdx::chrono::hours(24); +static const auto oneDay = Hours(24); CertificateExpirationMonitor::CertificateExpirationMonitor(Date_t date) : _certExpiration(date), _lastCheckTime(Date_t::now()) {} @@ -66,8 +66,7 @@ void CertificateExpirationMonitor::taskDoWork() { if (remainingValidDuration <= 30 * oneDay) { // The certificate will expire in the next 30 days. warning() << "Server certificate will expire on " << dateToISOStringUTC(_certExpiration) - << " in " << durationCount<stdx::chrono::hours>(remainingValidDuration) / 24 - << " days."; + << " in " << durationCount<Hours>(remainingValidDuration) / 24 << " days."; } } diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp index 3bec1d69bf2..571614a14c9 100644 --- a/src/mongo/util/ntservice.cpp +++ b/src/mongo/util/ntservice.cpp @@ -544,7 +544,7 @@ static void serviceStop() { // so it doesn't even need a name. stdx::thread(std::move(exitCleanlyTask)).detach(); - const auto timeout = stdx::chrono::milliseconds(kStopWaitHintMillis / 2); + const auto timeout = Milliseconds(kStopWaitHintMillis / 2); // We periodically check if we are done exiting by polling at half of each wait interval while (exitedCleanly.wait_for(timeout) != stdx::future_status::ready) { diff --git a/src/mongo/util/tick_source_mock.cpp b/src/mongo/util/tick_source_mock.cpp index a64c4f146b3..c5a5596ee94 100644 --- a/src/mongo/util/tick_source_mock.cpp +++ b/src/mongo/util/tick_source_mock.cpp @@ -44,7 +44,7 @@ TickSource::Tick TickSourceMock::getTicksPerSecond() { return kTicksPerSecond; } -void TickSourceMock::advance(const stdx::chrono::milliseconds& ms) { +void TickSourceMock::advance(const Milliseconds& ms) { _currentTicks += ms.count(); } diff --git a/src/mongo/util/tick_source_mock.h b/src/mongo/util/tick_source_mock.h index 9600d8ce7c7..066bd3c9dcc 100644 --- a/src/mongo/util/tick_source_mock.h +++ b/src/mongo/util/tick_source_mock.h @@ -28,8 +28,8 @@ #pragma once -#include "mongo/stdx/chrono.h" #include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -46,7 +46,7 @@ public: /** * Advance the ticks by the given amount of milliseconds. */ - void advance(const stdx::chrono::milliseconds& ms); + void advance(const Milliseconds& ms); /** * Resets the tick count to the give value. |