summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Wlodarek <gregory.wlodarek@mongodb.com>2018-11-01 18:51:15 -0400
committerGregory Wlodarek <gregory.wlodarek@mongodb.com>2018-11-08 18:35:00 -0500
commitaff889b39301472bd2143967938a3dba468dfc63 (patch)
tree2ee4fcec926d96a674690e390d5b916b4b9c7a3b
parent4fb38d9c10123321dada6fe1be477f9cb99732a7 (diff)
downloadmongo-aff889b39301472bd2143967938a3dba468dfc63.tar.gz
SERVER-36473 Make a dedicated RAII class to manage Client lifetime
-rw-r--r--src/mongo/db/SConscript14
-rw-r--r--src/mongo/db/client.cpp20
-rw-r--r--src/mongo/db/client.h38
-rw-r--r--src/mongo/db/clientcursor.cpp3
-rw-r--r--src/mongo/db/commands/dbcheck.cpp3
-rw-r--r--src/mongo/db/commands/fsync.cpp3
-rw-r--r--src/mongo/db/index_builder.cpp3
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp5
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp3
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp24
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp78
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp9
-rw-r--r--src/mongo/db/s/implicit_create_collection_test.cpp15
-rw-r--r--src/mongo/db/service_context_test_fixture.cpp8
-rw-r--r--src/mongo/db/service_context_test_fixture.h3
-rw-r--r--src/mongo/db/session_catalog_test.cpp18
-rw-r--r--src/mongo/db/storage/SConscript2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp2
-rw-r--r--src/mongo/db/storage/test_harness_helper.cpp11
-rw-r--r--src/mongo/db/storage/test_harness_helper.h9
-rw-r--r--src/mongo/db/storage/wiredtiger/SConscript1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp39
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp3
-rw-r--r--src/mongo/db/thread_client_test.cpp92
-rw-r--r--src/mongo/db/ttl.cpp3
-rw-r--r--src/mongo/embedded/embedded.cpp52
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp3
-rw-r--r--src/mongo/s/query/cluster_cursor_cleanup_job.cpp3
33 files changed, 259 insertions, 229 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 2737954e8f0..28f2d3c1a43 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -525,6 +525,20 @@ env.Library(
],
)
+env.CppUnitTest(
+ target= 'thread_client_test',
+ source= 'thread_client_test.cpp',
+ LIBDEPS=[
+ 'common',
+ ],
+ LIBDEPS_PRIVATE=[
+ 'service_context_test_fixture',
+ '$BUILD_DIR/mongo/transport/transport_layer_common',
+ '$BUILD_DIR/mongo/transport/transport_layer_mock',
+ '$BUILD_DIR/mongo/unittest/unittest',
+ ],
+)
+
env.Library(
target='lasterror',
source=[
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 1ac7d548bd2..da08647395b 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -88,11 +88,6 @@ void Client::initThread(StringData desc,
currentClient = service->makeClient(fullDesc, std::move(session));
}
-void Client::destroy() {
- invariant(haveClient());
- currentClient.reset(nullptr);
-}
-
namespace {
int64_t generateSeed(const std::string& desc) {
size_t seed = 0;
@@ -169,4 +164,19 @@ void Client::setCurrent(ServiceContext::UniqueClient client) {
currentClient = std::move(client);
}
+ThreadClient::ThreadClient(ServiceContext* serviceContext)
+ : ThreadClient(getThreadName(), serviceContext, nullptr) {}
+
+ThreadClient::ThreadClient(StringData desc,
+ ServiceContext* serviceContext,
+ transport::SessionHandle session) {
+ invariant(!currentClient);
+ Client::initThread(desc, serviceContext, std::move(session));
+}
+
+ThreadClient::~ThreadClient() {
+ invariant(currentClient);
+ currentClient.reset(nullptr);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 0393adf46f3..75eabd72f09 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -55,6 +55,7 @@ namespace mongo {
class Collection;
class OperationContext;
+class ThreadClient;
typedef long long ConnectionId;
@@ -78,8 +79,8 @@ public:
/**
* Moves client into the thread_local for this thread. After this call, Client::getCurrent
- * and cc() will return client.get(). The client will be destroyed with the thread exits
- * or Client::destroy() is called.
+ * and cc() will return client.get(). The client will be destroyed when the thread exits
+ * or the ThreadClient RAII helper exits its scope.
*/
static void setCurrent(ServiceContext::UniqueClient client);
@@ -139,15 +140,6 @@ public:
*/
static void initThreadIfNotAlready();
- /**
- * Destroys the Client object stored in TLS for the current thread. The current thread must have
- * a Client.
- *
- * If destroy() is not called explicitly, then the Client stored in TLS will be destroyed upon
- * exit of the current thread.
- */
- static void destroy();
-
std::string clientAddress(bool includePort = false) const;
const std::string& desc() const {
return _desc;
@@ -219,6 +211,7 @@ public:
private:
friend class ServiceContext;
+ friend class ThreadClient;
explicit Client(std::string desc,
ServiceContext* serviceContext,
transport::SessionHandle session);
@@ -245,6 +238,29 @@ private:
};
/**
+ * RAII-style Client helper to manage its lifecycle.
+ * Instantiates a client on the current thread, which remains bound to this thread so long as the
+ * instance of ThreadClient is in scope.
+ *
+ * Swapping the managed Client by ThreadClient with AlternativeClientRegion is permitted so long as
+ * the AlternativeClientRegion is not used beyond the scope of ThreadClient.
+ *
+ * Calling Client::releaseCurrent() is not permitted on a Client managed by the ThreadClient and
+ * will invariant once ThreadClient goes out of scope.
+ */
+class ThreadClient {
+public:
+ explicit ThreadClient(ServiceContext* serviceContext);
+ explicit ThreadClient(StringData desc,
+ ServiceContext* serviceContext,
+ transport::SessionHandle session = nullptr);
+ ~ThreadClient();
+ ThreadClient(const ThreadClient&) = delete;
+ ThreadClient(ThreadClient&&) = delete;
+ void operator=(const ThreadClient&) = delete;
+};
+
+/**
* Utility class to temporarily swap which client is bound to the running thread.
*
* Use this class to bind a client to the current thread for the duration of the
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 68d8d790d59..8c489cb3b8e 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -286,8 +286,7 @@ public:
}
void run() {
- Client::initThread("clientcursormon");
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc("clientcursormon", getGlobalServiceContext());
while (!globalInShutdownDeprecated()) {
{
const ServiceContext::UniqueOperationContext opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 44dcddc3ff6..fa6e107ba00 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -193,8 +193,7 @@ protected:
virtual void run() override {
// Every dbCheck runs in its own client.
- Client::initThread(name());
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc(name(), getGlobalServiceContext());
for (const auto& coll : *_run) {
try {
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 2043779e567..51ad036348c 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -332,8 +332,7 @@ private:
SimpleMutex filesLockedFsync;
void FSyncLockThread::run() {
- Client::initThread("fsyncLockWorker");
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc("fsyncLockWorker", getGlobalServiceContext());
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
stdx::unique_lock<stdx::mutex> lk(fsyncCmd.lockStateMutex);
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index a95b20674f7..dde059c7254 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -120,8 +120,7 @@ std::string IndexBuilder::name() const {
}
void IndexBuilder::run() {
- Client::initThread(name().c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc(name(), getGlobalServiceContext());
LOG(2) << "IndexBuilder building index " << _index;
auto opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 8e27e960931..6bc02d4d15a 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -120,7 +120,7 @@ class InitialSyncerTest : public executor::ThreadPoolExecutorTest,
public SyncSourceSelector,
public ScopedGlobalServiceContextForTest {
public:
- InitialSyncerTest() {}
+ InitialSyncerTest() : _threadClient(getGlobalServiceContext()) {}
executor::ThreadPoolMock::Options makeThreadPoolMockOptions() const override;
@@ -331,7 +331,6 @@ protected:
_mockServer = stdx::make_unique<MockRemoteDBServer>(_target.toString());
_options1.uuid = UUID::gen();
- Client::initThreadIfNotAlready();
reset();
launchExecutorThread();
@@ -435,7 +434,6 @@ protected:
_dbWorkThreadPool.reset();
_replicationProcess.reset();
_storageInterface.reset();
- Client::destroy();
}
/**
@@ -481,6 +479,7 @@ protected:
private:
DataReplicatorExternalStateMock* _externalState;
std::unique_ptr<InitialSyncer> _initialSyncer;
+ ThreadClient _threadClient;
bool _executorThreadShutdownComplete = false;
};
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 80b227adfbf..4af83add8e2 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -521,8 +521,7 @@ void MigrationManager::_schedule(WithLock lock,
executor->scheduleRemoteCommand(
remoteRequest,
[this, itMigration](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
- Client::initThread(getThreadName());
- ON_BLOCK_EXIT([&] { Client::destroy(); });
+ ThreadClient tc(getThreadName(), getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
stdx::lock_guard<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index c9d218f4b07..c9efb7923d3 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -311,8 +311,7 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
const std::vector<MigrateInfo> migrationRequests{{kShardId1, chunk1}, {kShardId3, chunk2}};
auto future = launchAsync([this, migrationRequests] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
@@ -374,8 +373,7 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
{kShardId3, chunk2coll2}};
auto future = launchAsync([this, migrationRequests] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
@@ -429,8 +427,7 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
const std::vector<MigrateInfo> migrationRequests{{kShardId1, chunk1}, {kShardId3, chunk2}};
auto future = launchAsync([this, chunk1, chunk2, migrationRequests] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -476,8 +473,7 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
const std::vector<MigrateInfo> migrationRequests{{kShardId1, chunk1}};
auto future = launchAsync([this, chunk1, migrationRequests] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -515,8 +511,7 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
setUpChunk(collName, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -604,8 +599,7 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
_migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -659,8 +653,7 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
_migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding hosts to which to send the commands.
@@ -761,8 +754,7 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
setUpChunk(collName, BSON(kPattern << 49), kKeyPattern.globalMax(), kShardId2, version);
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 7004fb1ebc3..73095f7aed4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -407,8 +407,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
operationContext()->setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern);
auto future = launchAsync([this, expectedShardName] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName =
assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -492,8 +491,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
"TestDB2", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, ConnectionString(shardTarget), 100));
@@ -584,8 +582,7 @@ TEST_F(AddShardTest, UnreachableHost) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -612,8 +609,7 @@ TEST_F(AddShardTest, AddMongosAsShard) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -640,8 +636,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
std::string expectedShardName = "Standalone";
auto future = launchAsync([this, expectedShardName, shardTarget] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -673,8 +668,7 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -702,8 +696,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -733,8 +726,7 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -767,8 +759,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -803,8 +794,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
std::string expectedShardName = "config";
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
@@ -850,8 +840,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
auto future = launchAsync([this, expectedShardName, connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -899,8 +888,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
"shardDB", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &connString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, connString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
@@ -966,8 +954,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
"shardDB", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &seedString] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, seedString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
@@ -1044,8 +1031,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
ON_BLOCK_EXIT([&] { failPoint->setMode(FailPoint::off); });
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -1136,8 +1122,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with a different shard name should fail.
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1152,8 +1137,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with a different maxSize should fail.
auto future2 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1168,8 +1152,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// can't change the sharded cluster's notion of the shard from standalone to replica set just
// by calling addShard.
auto future3 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1184,8 +1167,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with the same options should succeed.
auto future4 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
&existingShardName,
@@ -1201,8 +1183,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with the same options (without explicitly specifying the
// shard name) should succeed.
auto future5 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
nullptr,
@@ -1245,8 +1226,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same connection string with a different shard name should fail.
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
@@ -1260,8 +1240,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same connection string with a different maxSize should fail.
auto future2 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
@@ -1279,8 +1258,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// the sharded cluster's notion of the shard from replica set to standalone just by calling
// addShard.
auto future3 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1298,8 +1276,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// change the replica set name the sharded cluster knows for it just by calling addShard again.
std::string differentSetName = "differentSet";
auto future4 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1315,8 +1292,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same host with the same options should succeed.
auto future5 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
&existingShardName,
@@ -1329,8 +1305,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same host with the same options (without explicitly specifying the shard name)
// should succeed.
auto future6 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, connString, existingShard.getMaxSizeMB()));
@@ -1354,8 +1329,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
targeterFactory()->addTargeterToReturn(otherHostConnString, std::move(otherHostTargeter));
}
auto future7 = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
nullptr,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp
index 43d378f2a3a..79f1d38c7c0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp
@@ -167,9 +167,7 @@ TEST_F(CreateCollectionTest, BaseCase) {
requestOptions.cappedSize = 256;
auto future = launchAsync([this, &testNS, &requestOptions] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
-
- Client::initThreadIfNotAlready("BaseCaseTest");
+ ThreadClient tc("BaseCaseTest", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(opCtx.get())
->createCollection(opCtx.get(), testNS, requestOptions);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
index 159fc584373..c2db2628ce5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
@@ -100,8 +100,7 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) {
// Now actually start the createDatabase work.
auto future = launchAsync([this, dbname] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(opCtx.get())->createDatabase(opCtx.get(), dbname);
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
index 699b83c44e3..ffc56af57cb 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp
@@ -165,8 +165,7 @@ public:
}
Status doDrop() {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
return ShardingCatalogManager::get(opCtx.get())->dropCollection(opCtx.get(), dropNS());
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 83ed91c88cc..1ab2be440f7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -79,8 +79,7 @@ TEST_F(EnableShardingTest, noDBExists) {
shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12"));
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(opCtx.get())->enableSharding(opCtx.get(), "db1");
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
index 62b24c06268..f286d42e89b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
@@ -172,8 +172,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
@@ -295,8 +294,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
// TODO: can we mock the ShardRegistry to return these?
set<ShardId> shards{shard0.getName(), shard1.getName(), shard2.getName()};
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
@@ -391,8 +389,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
diff --git a/src/mongo/db/s/implicit_create_collection_test.cpp b/src/mongo/db/s/implicit_create_collection_test.cpp
index 0c1a08e7b72..fa9d140ed6b 100644
--- a/src/mongo/db/s/implicit_create_collection_test.cpp
+++ b/src/mongo/db/s/implicit_create_collection_test.cpp
@@ -70,8 +70,7 @@ public:
TEST_F(ImplicitCreateTest, NormalCreate) {
const NamespaceString kNs("test.user");
auto future = launchAsync([this, &kNs] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ASSERT_OK(onCannotImplicitlyCreateCollection(opCtx.get(), kNs));
});
@@ -84,8 +83,7 @@ TEST_F(ImplicitCreateTest, NormalCreate) {
TEST_F(ImplicitCreateTest, CanCallOnCannotImplicitAgainAfterError) {
const NamespaceString kNs("test.user");
auto future = launchAsync([this, &kNs] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
auto status = onCannotImplicitlyCreateCollection(opCtx.get(), kNs);
ASSERT_EQ(ErrorCodes::FailPointEnabled, status);
@@ -100,8 +98,7 @@ TEST_F(ImplicitCreateTest, CanCallOnCannotImplicitAgainAfterError) {
// Retry, but this time config server will return success
future = launchAsync([this, &kNs] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ASSERT_OK(onCannotImplicitlyCreateCollection(opCtx.get(), kNs));
});
@@ -114,8 +111,7 @@ TEST_F(ImplicitCreateTest, CanCallOnCannotImplicitAgainAfterError) {
TEST_F(ImplicitCreateTest, ShouldNotCallConfigCreateIfCollectionExists) {
const NamespaceString kNs("test.user");
auto future = launchAsync([this, &kNs] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
auto status = onCannotImplicitlyCreateCollection(opCtx.get(), kNs);
ASSERT_EQ(ErrorCodes::FailPointEnabled, status);
@@ -135,8 +131,7 @@ TEST_F(ImplicitCreateTest, ShouldNotCallConfigCreateIfCollectionExists) {
// Retry, but this time config server will return success
future = launchAsync([this, &kNs] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = cc().makeOperationContext();
ASSERT_OK(onCannotImplicitlyCreateCollection(opCtx.get(), kNs));
});
diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp
index 46d92935bb8..e79c8ab7ea3 100644
--- a/src/mongo/db/service_context_test_fixture.cpp
+++ b/src/mongo/db/service_context_test_fixture.cpp
@@ -55,13 +55,7 @@ ServiceContext* ScopedGlobalServiceContextForTest::getServiceContext() {
return getGlobalServiceContext();
}
-ServiceContextTest::ServiceContextTest() {
- Client::initThread(getThreadName());
-}
-
-ServiceContextTest::~ServiceContextTest() {
- Client::destroy();
-}
+ServiceContextTest::ServiceContextTest() : _threadClient(getGlobalServiceContext()) {}
Client* ServiceContextTest::getClient() {
return Client::getCurrent();
diff --git a/src/mongo/db/service_context_test_fixture.h b/src/mongo/db/service_context_test_fixture.h
index 404a6ba316c..a81962478fd 100644
--- a/src/mongo/db/service_context_test_fixture.h
+++ b/src/mongo/db/service_context_test_fixture.h
@@ -66,7 +66,8 @@ public:
protected:
ServiceContextTest();
- virtual ~ServiceContextTest();
+ virtual ~ServiceContextTest() = default;
+ ThreadClient _threadClient;
};
} // namespace mongo
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index c5e4a52b7c6..1f8312b6e69 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -123,8 +123,7 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, GetOrCreateSessionAfterCheckOutSessio
ocs.emplace(_opCtx, true);
stdx::async(stdx::launch::async, [&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
auto scopedSession =
SessionCatalog::get(sideOpCtx.get())->getOrCreateSession(sideOpCtx.get(), lsid);
@@ -136,8 +135,7 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, GetOrCreateSessionAfterCheckOutSessio
ocs.reset();
stdx::async(stdx::launch::async, [&] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
auto scopedSession =
SessionCatalog::get(sideOpCtx.get())->getOrCreateSession(sideOpCtx.get(), lsid);
@@ -234,8 +232,7 @@ TEST_F(SessionCatalogTest, KillSessionWhenSessionIsNotCheckedOut) {
// Schedule a separate "regular operation" thread, which will block on checking-out the session,
// which we will use to confirm that session kill completion actually unblocks check-out
auto future = stdx::async(stdx::launch::async, [lsid] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
sideOpCtx->setLogicalSessionId(lsid);
@@ -279,8 +276,7 @@ TEST_F(SessionCatalogTest, KillSessionWhenSessionIsCheckedOut) {
// Make sure that the checkOutForKill call will wait for the owning operation context to
// check the session back in
auto future = stdx::async(stdx::launch::async, [lsid] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
sideOpCtx->setLogicalSessionId(lsid);
sideOpCtx->setDeadlineAfterNowBy(Milliseconds(10), ErrorCodes::MaxTimeMSExpired);
@@ -306,8 +302,7 @@ TEST_F(SessionCatalogTest, KillSessionWhenSessionIsCheckedOut) {
// Schedule a separate "regular operation" thread, which will block on checking-out the session,
// which we will use to confirm that session kill completion actually unblocks check-out
auto future = stdx::async(stdx::launch::async, [lsid] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
sideOpCtx->setLogicalSessionId(lsid);
@@ -387,8 +382,7 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, KillSessionsThroughScanSessions) {
for (const auto& lsid : lsids) {
futures.emplace_back(
stdx::async(stdx::launch::async, [lsid, &firstUseOfTheSessionReachedBarrier] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready();
+ ThreadClient tc(getGlobalServiceContext());
{
auto sideOpCtx = Client::getCurrent()->makeOperationContext();
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 170e7082139..ad1295dfb0a 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -116,8 +116,8 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/db/storage/storage_options',
'$BUILD_DIR/mongo/db/service_context',
+ '$BUILD_DIR/mongo/db/service_context_test_fixture',
],
-
)
env.Library(
diff --git a/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp b/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp
index fb935460769..9cf5bec545f 100644
--- a/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp
@@ -48,7 +48,7 @@
namespace mongo {
namespace {
-class SnapshotManagerTests : public ServiceContextTest {
+class SnapshotManagerTests : public unittest::Test, public ScopedGlobalServiceContextForTest {
public:
/**
* Usable as an OperationContext* but owns both the Client and the OperationContext.
diff --git a/src/mongo/db/storage/test_harness_helper.cpp b/src/mongo/db/storage/test_harness_helper.cpp
index aabdfdd7315..2f06d66f359 100644
--- a/src/mongo/db/storage/test_harness_helper.cpp
+++ b/src/mongo/db/storage/test_harness_helper.cpp
@@ -40,16 +40,7 @@ namespace {
stdx::function<std::unique_ptr<HarnessHelper>()> basicHarnessFactory;
} // namespace
-
-HarnessHelper::HarnessHelper() {
- setGlobalServiceContext(ServiceContext::make());
- Client::initThread(getThreadName());
-}
-
-HarnessHelper::~HarnessHelper() {
- Client::destroy();
- setGlobalServiceContext({});
-}
+HarnessHelper::HarnessHelper() : _threadClient(getGlobalServiceContext()) {}
void registerHarnessHelperFactory(stdx::function<std::unique_ptr<HarnessHelper>()> factory) {
basicHarnessFactory = std::move(factory);
diff --git a/src/mongo/db/storage/test_harness_helper.h b/src/mongo/db/storage/test_harness_helper.h
index c5c5216b4b6..fe0c2fa0a8f 100644
--- a/src/mongo/db/storage/test_harness_helper.h
+++ b/src/mongo/db/storage/test_harness_helper.h
@@ -38,6 +38,7 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/record_id.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_test_fixture.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
@@ -53,10 +54,9 @@ namespace mongo {
* HarnessHelper implementation. The newRecoveryUnit() implementation dictates what RecoveryUnit
* implementation the OperationContext has.
*/
-class HarnessHelper {
+class HarnessHelper : public ScopedGlobalServiceContextForTest {
public:
- virtual ~HarnessHelper();
-
+ virtual ~HarnessHelper() = default;
explicit HarnessHelper();
virtual ServiceContext::UniqueOperationContext newOperationContext(Client* const client) {
@@ -83,6 +83,9 @@ public:
}
virtual std::unique_ptr<RecoveryUnit> newRecoveryUnit() = 0;
+
+protected:
+ ThreadClient _threadClient;
};
namespace harness_helper_detail {
diff --git a/src/mongo/db/storage/wiredtiger/SConscript b/src/mongo/db/storage/wiredtiger/SConscript
index 5f66816a7f3..8d3d406d57d 100644
--- a/src/mongo/db/storage/wiredtiger/SConscript
+++ b/src/mongo/db/storage/wiredtiger/SConscript
@@ -255,6 +255,7 @@ if wiredtiger:
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/repl/replmocks',
+ '$BUILD_DIR/mongo/db/service_context_test_fixture',
],
)
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index cb390080cf2..a8187e89699 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -179,9 +179,7 @@ public:
}
virtual void run() {
- Client::initThread(name().c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
-
+ ThreadClient tc(name(), getGlobalServiceContext());
LOG(1) << "starting " << name() << " thread";
while (!_shuttingDown.load()) {
@@ -227,9 +225,7 @@ public:
}
virtual void run() {
- Client::initThread(name().c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
-
+ ThreadClient tc(name(), getGlobalServiceContext());
LOG(1) << "starting " << name() << " thread";
while (!_shuttingDown.load()) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index b8664e9725b..61942d6a0f5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_test_fixture.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
@@ -51,12 +52,11 @@
namespace mongo {
namespace {
-class WiredTigerKVHarnessHelper : public KVHarnessHelper {
+class WiredTigerKVHarnessHelper : public KVHarnessHelper, public ScopedGlobalServiceContextForTest {
public:
WiredTigerKVHarnessHelper(bool forRepair = false)
: _dbpath("wt-kv-harness"), _forRepair(forRepair) {
- if (!hasGlobalServiceContext())
- setGlobalServiceContext(ServiceContext::make());
+ invariant(hasGlobalServiceContext());
_engine.reset(makeEngine());
repl::ReplicationCoordinator::set(
getGlobalServiceContext(),
@@ -64,11 +64,6 @@ public:
getGlobalServiceContext(), repl::ReplSettings())));
}
- virtual ~WiredTigerKVHarnessHelper() {
- _engine.reset(nullptr);
- // Cannot cleanup the global service context here, the test still have clients remaining.
- }
-
virtual KVEngine* restartEngine() override {
_engine.reset(nullptr);
_engine.reset(makeEngine());
@@ -102,39 +97,23 @@ private:
bool _forRepair;
};
-class WiredTigerKVEngineTest : public unittest::Test {
+class WiredTigerKVEngineTest : public unittest::Test, public ScopedGlobalServiceContextForTest {
public:
- void setUp() override {
- setGlobalServiceContext(ServiceContext::make());
- Client::initThread(getThreadName());
-
- _helper = makeHelper();
- _engine = _helper->getWiredTigerKVEngine();
- }
-
- void tearDown() override {
- _helper.reset(nullptr);
- Client::destroy();
- setGlobalServiceContext({});
- }
+ WiredTigerKVEngineTest(bool repair = false)
+ : _helper(repair), _engine(_helper.getWiredTigerKVEngine()) {}
std::unique_ptr<OperationContext> makeOperationContext() {
return std::make_unique<OperationContextNoop>(_engine->newRecoveryUnit());
}
protected:
- virtual std::unique_ptr<WiredTigerKVHarnessHelper> makeHelper() {
- return std::make_unique<WiredTigerKVHarnessHelper>();
- }
-
- std::unique_ptr<WiredTigerKVHarnessHelper> _helper;
+ WiredTigerKVHarnessHelper _helper;
WiredTigerKVEngine* _engine;
};
class WiredTigerKVEngineRepairTest : public WiredTigerKVEngineTest {
- virtual std::unique_ptr<WiredTigerKVHarnessHelper> makeHelper() override {
- return std::make_unique<WiredTigerKVHarnessHelper>(true /* repair */);
- }
+public:
+ WiredTigerKVEngineRepairTest() : WiredTigerKVEngineTest(true /* repair */) {}
};
TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index fdc45e0c54f..375417f2e40 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -128,8 +128,7 @@ public:
}
virtual void run() {
- Client::initThread(_name.c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc(_name, getGlobalServiceContext());
while (!globalInShutdownDeprecated()) {
if (!_deleteExcessDocuments()) {
diff --git a/src/mongo/db/thread_client_test.cpp b/src/mongo/db/thread_client_test.cpp
new file mode 100644
index 00000000000..e32e94d49d6
--- /dev/null
+++ b/src/mongo/db/thread_client_test.cpp
@@ -0,0 +1,92 @@
+
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/client.h"
+#include "mongo/db/service_context_test_fixture.h"
+#include "mongo/transport/transport_layer_mock.h"
+#include "mongo/unittest/unittest.h"
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+namespace {
+
+class ThreadClientTest : public unittest::Test, public ScopedGlobalServiceContextForTest {};
+
+TEST_F(ThreadClientTest, TestNoAssignment) {
+ ASSERT_FALSE(haveClient());
+ { ThreadClient tc(getThreadName(), getGlobalServiceContext()); }
+ ASSERT_FALSE(haveClient());
+}
+
+TEST_F(ThreadClientTest, TestAssignment) {
+ ASSERT_FALSE(haveClient());
+ ThreadClient threadClient(getThreadName(), getGlobalServiceContext());
+ ASSERT_TRUE(haveClient());
+}
+
+TEST_F(ThreadClientTest, TestDifferentArgs) {
+ {
+ ASSERT_FALSE(haveClient());
+ ThreadClient tc(getGlobalServiceContext());
+ ASSERT_TRUE(haveClient());
+ }
+ {
+ ASSERT_FALSE(haveClient());
+ ThreadClient tc("Test", getGlobalServiceContext());
+ ASSERT_TRUE(haveClient());
+ }
+ {
+ ASSERT_FALSE(haveClient());
+ transport::TransportLayerMock mock;
+ transport::SessionHandle handle = mock.createSession();
+ ThreadClient tc(getThreadName(), getGlobalServiceContext(), handle);
+ ASSERT_TRUE(haveClient());
+ }
+ {
+ ASSERT_FALSE(haveClient());
+ ServiceContext sc;
+ ThreadClient tc("Test", &sc, nullptr);
+ ASSERT_TRUE(haveClient());
+ }
+}
+
+TEST_F(ThreadClientTest, TestAlternativeClientRegion) {
+ ASSERT_FALSE(haveClient());
+ ThreadClient threadClient(getThreadName(), getGlobalServiceContext());
+
+ ServiceContext::UniqueClient swapClient = getGlobalServiceContext()->makeClient("swapClient");
+ { AlternativeClientRegion altRegion(swapClient); }
+
+ ASSERT_TRUE(haveClient());
+}
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 8ade3873fb1..99a1ef15a9c 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -91,8 +91,7 @@ public:
static std::string secondsExpireField;
virtual void run() {
- Client::initThread(name().c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc(name(), getGlobalServiceContext());
AuthorizationSession::get(cc())->grantInternalAuthorization();
while (!globalInShutdownDeprecated()) {
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index 7589bb22a44..5c54a5609a3 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -139,41 +139,37 @@ using std::endl;
void shutdown(ServiceContext* srvContext) {
- Client::initThreadIfNotAlready();
- auto const client = Client::getCurrent();
- auto const serviceContext = client->getServiceContext();
- invariant(srvContext == serviceContext);
-
- serviceContext->setKillAllOperations();
-
- // We should always be able to acquire the global lock at shutdown.
- // Close all open databases, shutdown storage engine and run all deinitializers.
- auto shutdownOpCtx = serviceContext->makeOperationContext(client);
{
- UninterruptibleLockGuard noInterrupt(shutdownOpCtx->lockState());
- Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X);
- DatabaseHolder::getDatabaseHolder().closeAll(shutdownOpCtx.get(), "shutdown");
+ ThreadClient tc(srvContext);
+ auto const client = Client::getCurrent();
+ auto const serviceContext = client->getServiceContext();
- LogicalSessionCache::set(serviceContext, nullptr);
+ serviceContext->setKillAllOperations();
- // Shut down the background periodic task runner
- if (auto runner = serviceContext->getPeriodicRunner()) {
- runner->shutdown();
- }
+ // We should always be able to acquire the global lock at shutdown.
+ // Close all open databases, shutdown storage engine and run all deinitializers.
+ auto shutdownOpCtx = serviceContext->makeOperationContext(client);
+ {
+ UninterruptibleLockGuard noInterrupt(shutdownOpCtx->lockState());
+ Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X);
+ DatabaseHolder::getDatabaseHolder().closeAll(shutdownOpCtx.get(), "shutdown");
- // Global storage engine may not be started in all cases before we exit
- if (serviceContext->getStorageEngine()) {
- shutdownGlobalStorageEngineCleanly(serviceContext);
- }
+ LogicalSessionCache::set(serviceContext, nullptr);
- Status status = mongo::runGlobalDeinitializers();
- uassertStatusOKWithContext(status, "Global deinitilization failed");
- }
- shutdownOpCtx.reset();
+ // Shut down the background periodic task runner
+ if (auto runner = serviceContext->getPeriodicRunner()) {
+ runner->shutdown();
+ }
- if (Client::getCurrent())
- Client::destroy();
+ // Global storage engine may not be started in all cases before we exit
+ if (serviceContext->getStorageEngine()) {
+ shutdownGlobalStorageEngineCleanly(serviceContext);
+ }
+ Status status = mongo::runGlobalDeinitializers();
+ uassertStatusOKWithContext(status, "Global deinitilization failed");
+ }
+ }
setGlobalServiceContext(nullptr);
log(LogComponent::kControl) << "now exiting";
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
index e0c9db25298..f09b53fa36a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
@@ -92,8 +92,7 @@ protected:
auto launchOnSeparateThread(std::function<void(OperationContext*)> func) {
auto const serviceContext = getServiceContext();
return launchAsync([serviceContext, func] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
+ ThreadClient tc("Test", getGlobalServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
func(opCtx.get());
});
diff --git a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
index ec5f65796b2..c1a76fe98ab 100644
--- a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
+++ b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
@@ -50,8 +50,7 @@ std::string ClusterCursorCleanupJob::name() const {
}
void ClusterCursorCleanupJob::run() {
- Client::initThread(name().c_str());
- ON_BLOCK_EXIT([] { Client::destroy(); });
+ ThreadClient tc(name(), getGlobalServiceContext());
auto* const client = Client::getCurrent();
auto* const manager = Grid::get(client->getServiceContext())->getCursorManager();