summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-17 17:40:35 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-03-07 14:01:39 -0500
commit083647f38662195653b87b6a79ae1183d269f910 (patch)
tree1cc2a3b7a036232da1a3c1bc96bc5c39d9d90551 /src/mongo/s
parent4d2ca242fb7b9a28d1123831db99664eb0db3e23 (diff)
downloadmongo-083647f38662195653b87b6a79ae1183d269f910.tar.gz
SERVER-29908 Move OpObserver callbacks out of CollectionShardingState
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/SConscript42
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp2
-rw-r--r--src/mongo/s/catalog/SConscript5
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp380
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp158
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_log_change_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp2
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.h2
-rw-r--r--src/mongo/s/chunk_manager_index_bounds_test.cpp2
-rw-r--r--src/mongo/s/client/SConscript4
-rw-r--r--src/mongo/s/client/shard_connection_test.cpp44
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp3
-rw-r--r--src/mongo/s/cluster_identity_loader_test.cpp10
-rw-r--r--src/mongo/s/cluster_last_error_info_test.cpp2
-rw-r--r--src/mongo/s/commands/SConscript1
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp4
-rw-r--r--src/mongo/s/config_server_test_fixture.h2
-rw-r--r--src/mongo/s/query/SConscript4
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp2
-rw-r--r--src/mongo/s/query/establish_cursors_test.cpp2
-rw-r--r--src/mongo/s/shard_server_test_fixture.cpp15
-rw-r--r--src/mongo/s/shard_server_test_fixture.h14
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp38
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.h50
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp (renamed from src/mongo/s/sharding_test_fixture.cpp)55
-rw-r--r--src/mongo/s/sharding_router_test_fixture.h (renamed from src/mongo/s/sharding_test_fixture.h)32
-rw-r--r--src/mongo/s/sharding_test_fixture_common.cpp41
-rw-r--r--src/mongo/s/sharding_test_fixture_common.h79
-rw-r--r--src/mongo/s/write_ops/SConscript7
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp2
32 files changed, 476 insertions, 538 deletions
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 7339848dac1..49ccc44ba4d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -148,18 +148,28 @@ env.Library(
)
env.Library(
- target='sharding_test_fixture',
+ target='sharding_test_fixture_common',
source=[
- 'sharding_test_fixture.cpp',
+ 'sharding_test_fixture_common.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/client/remote_command_targeter_mock',
- '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
+ '$BUILD_DIR/mongo/executor/network_interface_mock',
+ '$BUILD_DIR/mongo/executor/network_test_env',
+ '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
+ '$BUILD_DIR/mongo/unittest/unittest',
+ ],
+)
+
+env.Library(
+ target='sharding_router_test_fixture',
+ source=[
+ 'sharding_router_test_fixture.cpp',
+ ],
+ LIBDEPS=[
'$BUILD_DIR/mongo/db/query/collation/collator_factory_mock',
'$BUILD_DIR/mongo/db/service_context_noop_init',
- '$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/task_executor_pool',
- '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/s/coreshard',
@@ -167,6 +177,7 @@ env.Library(
'$BUILD_DIR/mongo/util/clock_source_mock',
'sharding_egress_metadata_hook_for_mongos',
'sharding_task_executor',
+ 'sharding_test_fixture_common',
],
)
@@ -176,20 +187,11 @@ env.Library(
'sharding_mongod_test_fixture.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/client/remote_command_targeter_mock',
- '$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/op_observer_d',
'$BUILD_DIR/mongo/db/repl/drop_pending_collection_reaper',
'$BUILD_DIR/mongo/db/repl/replmocks',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
- '$BUILD_DIR/mongo/executor/network_test_env',
- '$BUILD_DIR/mongo/executor/task_executor_pool',
- '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/rpc/metadata',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
- '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
- '$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/util/clock_source_mock',
+ 'sharding_test_fixture_common',
],
)
@@ -223,7 +225,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'cluster_last_error_info',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
],
)
@@ -324,8 +326,6 @@ env.Library(
'sharding_egress_metadata_hook.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/audit',
- '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/util/concurrency/thread_pool',
'grid',
],
@@ -342,8 +342,8 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/query/query_test_service_context',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
'coreshard',
+ 'sharding_router_test_fixture',
]
)
@@ -385,7 +385,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'coreshard',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
]
)
@@ -396,7 +396,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'coreshard',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index da21e716f18..9e12adc6e7c 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -40,7 +40,7 @@
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index 9960139fb73..e0b1f9b936a 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -46,7 +46,6 @@ env.Library(
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_interface',
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/client/sharding_client',
- '$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/fail_point'
],
)
@@ -129,7 +128,7 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
- '$BUILD_DIR/mongo/s/sharding_mongod_test_fixture',
+ '$BUILD_DIR/mongo/s/shard_server_test_fixture',
'dist_lock_catalog_impl',
'replset_dist_lock_manager',
]
@@ -143,6 +142,6 @@ env.CppUnitTest(
'sharding_catalog_write_retry_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
index e584944658d..fd66f088346 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
@@ -30,20 +30,13 @@
#include <utility>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/query/find_and_modify_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/network_test_env.h"
-#include "mongo/executor/task_executor_pool.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/catalog/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
@@ -51,50 +44,31 @@
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/stdx/future.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/thread.h"
#include "mongo/util/time_support.h"
namespace mongo {
+namespace {
-using std::vector;
using executor::NetworkInterfaceMock;
using executor::NetworkTestEnv;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using repl::ReadConcernArgs;
-namespace {
-
const HostAndPort dummyHost("dummy", 123);
/**
- * Sets up the mocked out objects for testing the replica-set backed catalog manager.
+ * Sets up the mocked out objects for testing the replica-set backed catalog manager
+ *
+ * NOTE: Even though the dist lock manager only runs on the config server, this test is using the
+ * ShardServerTestFixture and emulating the network due to legacy reasons.
*/
-class DistLockCatalogFixture : public ShardingMongodTestFixture {
-public:
- std::shared_ptr<RemoteCommandTargeterMock> configTargeter() {
- return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
- }
-
+class DistLockCatalogTest : public ShardServerTestFixture {
protected:
- void setUp() override {
- ShardingMongodTestFixture::setUp();
-
- // Initialize sharding components as a shard server.
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- uassertStatusOK(initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost)));
-
- // Set the findHost() return value on the mock targeter so that later calls to the
- // targeter's findHost() return the appropriate value.
- configTargeter()->setFindHostReturnValue(dummyHost);
- }
-
std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override {
return stdx::make_unique<DistLockCatalogImpl>();
}
@@ -108,6 +82,20 @@ protected:
std::unique_ptr<DistLockManager> distLockManager) override {
return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
}
+
+ std::shared_ptr<RemoteCommandTargeterMock> configTargeter() {
+ return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
+ }
+
+ auto launchOnSeparateThread(std::function<void(OperationContext*)> func) {
+ auto const serviceContext = getServiceContext();
+ return launchAsync([serviceContext, func] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ func(opCtx.get());
+ });
+ }
};
void checkReadConcern(const BSONObj& findCmd) {
@@ -116,10 +104,10 @@ void checkReadConcern(const BSONObj& findCmd) {
ASSERT(repl::ReadConcernLevel::kMajorityReadConcern == readConcernArgs.getLevel());
}
-TEST_F(DistLockCatalogFixture, BasicPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2014-03-11T09:17:18.098Z").getValue());
- auto status = distLockCatalog()->ping(operationContext(), "abcd", ping);
+ auto status = distLockCatalog()->ping(opCtx, "abcd", ping);
ASSERT_OK(status);
});
@@ -154,13 +142,13 @@ TEST_F(DistLockCatalogFixture, BasicPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingTargetError) {
+TEST_F(DistLockCatalogTest, PingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, PingRunCmdError) {
+TEST_F(DistLockCatalogTest, PingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
@@ -168,9 +156,9 @@ TEST_F(DistLockCatalogFixture, PingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, PingCommandError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -186,9 +174,9 @@ TEST_F(DistLockCatalogFixture, PingCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingWriteError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -204,9 +192,9 @@ TEST_F(DistLockCatalogFixture, PingWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingWriteConcernError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -225,9 +213,9 @@ TEST_F(DistLockCatalogFixture, PingWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -247,9 +235,9 @@ TEST_F(DistLockCatalogFixture, PingUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -261,14 +249,13 @@ TEST_F(DistLockCatalogFixture, PingUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockNoOp) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockNoOp) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus =
- distLockCatalog()
- ->grabLock(operationContext(), "test", myID, "me", "mongos", now, "because")
- .getStatus();
+ auto resultStatus = distLockCatalog()
+ ->grabLock(opCtx, "test", myID, "me", "mongos", now, "because")
+ .getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, resultStatus.code());
});
@@ -304,12 +291,12 @@ TEST_F(DistLockCatalogFixture, GrabLockNoOp) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWithNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWithNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()->grabLock(
- operationContext(), "test", myID, "me", "mongos", now, "because");
+ auto resultStatus =
+ distLockCatalog()->grabLock(opCtx, "test", myID, "me", "mongos", now, "because");
ASSERT_OK(resultStatus.getStatus());
const auto& lockDoc = resultStatus.getValue();
@@ -368,12 +355,11 @@ TEST_F(DistLockCatalogFixture, GrabLockWithNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWithBadLockDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWithBadLockDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()
- ->grabLock(operationContext(), "test", OID(), "", "", now, "")
- .getStatus();
+ auto resultStatus =
+ distLockCatalog()->grabLock(opCtx, "test", OID(), "", "", now, "").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, resultStatus.code());
});
@@ -402,7 +388,7 @@ TEST_F(DistLockCatalogFixture, GrabLockWithBadLockDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockTargetError) {
+TEST_F(DistLockCatalogTest, GrabLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
@@ -410,7 +396,7 @@ TEST_F(DistLockCatalogFixture, GrabLockTargetError) {
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, GrabLockRunCmdError) {
+TEST_F(DistLockCatalogTest, GrabLockRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()
@@ -420,10 +406,10 @@ TEST_F(DistLockCatalogFixture, GrabLockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GrabLockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -440,10 +426,10 @@ TEST_F(DistLockCatalogFixture, GrabLockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockDupKeyError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockDupKeyError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -460,10 +446,10 @@ TEST_F(DistLockCatalogFixture, GrabLockDupKeyError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -480,8 +466,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -503,8 +489,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteConcernErrorBadType) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteConcernErrorBadType) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -524,8 +510,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteConcernErrorBadType) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockResponseMissingValueField) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockResponseMissingValueField) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -542,8 +528,8 @@ TEST_F(DistLockCatalogFixture, GrabLockResponseMissingValueField) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -566,8 +552,8 @@ TEST_F(DistLockCatalogFixture, GrabLockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -582,8 +568,8 @@ TEST_F(DistLockCatalogFixture, GrabLockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockNoOp) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockNoOp) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
@@ -631,8 +617,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockNoOp) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWithNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWithNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
@@ -700,8 +686,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWithNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWithBadLockDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWithBadLockDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
auto resultStatus =
distLockCatalog()
@@ -735,7 +721,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWithBadLockDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockTargetError) {
+TEST_F(DistLockCatalogTest, OvertakeLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status =
distLockCatalog()
@@ -744,7 +730,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockTargetError) {
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockRunCmdError) {
+TEST_F(DistLockCatalogTest, OvertakeLockRunCmdError) {
shutdownExecutorPool();
auto status =
@@ -755,8 +741,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, OvertakeLockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -776,8 +762,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -797,8 +783,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -821,8 +807,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -846,8 +832,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -863,8 +849,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlock) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlock) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
@@ -897,8 +883,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlock) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlockWithName) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlockWithName) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
@@ -931,8 +917,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlockWithName) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWithNoNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWithNoNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
@@ -961,8 +947,8 @@ TEST_F(DistLockCatalogFixture, UnlockWithNoNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWithNameWithNoNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWithNameWithNoNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
@@ -991,13 +977,13 @@ TEST_F(DistLockCatalogFixture, UnlockWithNameWithNoNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockTargetError) {
+TEST_F(DistLockCatalogTest, UnlockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, UnlockRunCmdError) {
+TEST_F(DistLockCatalogTest, UnlockRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->unlock(operationContext(), OID());
@@ -1005,8 +991,8 @@ TEST_F(DistLockCatalogFixture, UnlockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, UnlockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1023,8 +1009,8 @@ TEST_F(DistLockCatalogFixture, UnlockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1041,8 +1027,8 @@ TEST_F(DistLockCatalogFixture, UnlockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1074,8 +1060,8 @@ TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1096,8 +1082,8 @@ TEST_F(DistLockCatalogFixture, UnlockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1110,8 +1096,8 @@ TEST_F(DistLockCatalogFixture, UnlockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlockAll) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_OK(status);
});
@@ -1144,8 +1130,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockAllWriteFailed) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockAllWriteFailed) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::IllegalOperation, status);
});
@@ -1158,8 +1144,8 @@ TEST_F(DistLockCatalogFixture, UnlockAllWriteFailed) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockAllNetworkError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockAllNetworkError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status);
});
@@ -1173,8 +1159,8 @@ TEST_F(DistLockCatalogFixture, UnlockAllNetworkError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetServerInfo) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetServerInfo) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t localTime(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
OID electionID("555fa85d4d8640862a0fc79b");
auto resultStatus = distLockCatalog()->getServerInfo(operationContext());
@@ -1202,13 +1188,13 @@ TEST_F(DistLockCatalogFixture, BasicGetServerInfo) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerTargetError) {
+TEST_F(DistLockCatalogTest, GetServerTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, GetServerRunCmdError) {
+TEST_F(DistLockCatalogTest, GetServerRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
@@ -1216,8 +1202,8 @@ TEST_F(DistLockCatalogFixture, GetServerRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetServerCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1234,8 +1220,8 @@ TEST_F(DistLockCatalogFixture, GetServerCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerBadElectionId) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerBadElectionId) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1255,8 +1241,8 @@ TEST_F(DistLockCatalogFixture, GetServerBadElectionId) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerBadLocalTime) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerBadLocalTime) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1276,8 +1262,8 @@ TEST_F(DistLockCatalogFixture, GetServerBadLocalTime) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoGLEStats) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoGLEStats) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1293,8 +1279,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoGLEStats) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoElectionId) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoElectionId) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::NotMaster, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1314,8 +1300,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoElectionId) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerInvalidReplSubsectionShouldFail) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerInvalidReplSubsectionShouldFail) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1334,8 +1320,8 @@ TEST_F(DistLockCatalogFixture, GetServerInvalidReplSubsectionShouldFail) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoElectionIdButMasterShouldFail) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoElectionIdButMasterShouldFail) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_NOT_EQUALS(std::string::npos, status.reason().find("me:1234"));
@@ -1355,8 +1341,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoElectionIdButMasterShouldFail) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicStopPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicStopPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "test");
ASSERT_OK(status);
});
@@ -1387,13 +1373,13 @@ TEST_F(DistLockCatalogFixture, BasicStopPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingTargetError) {
+TEST_F(DistLockCatalogTest, StopPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, StopPingRunCmdError) {
+TEST_F(DistLockCatalogTest, StopPingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->stopPing(operationContext(), "");
@@ -1401,8 +1387,8 @@ TEST_F(DistLockCatalogFixture, StopPingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, StopPingCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1419,8 +1405,8 @@ TEST_F(DistLockCatalogFixture, StopPingCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1437,8 +1423,8 @@ TEST_F(DistLockCatalogFixture, StopPingWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1458,8 +1444,8 @@ TEST_F(DistLockCatalogFixture, StopPingWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1480,8 +1466,8 @@ TEST_F(DistLockCatalogFixture, StopPingUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1494,8 +1480,8 @@ TEST_F(DistLockCatalogFixture, StopPingUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
auto resultStatus = distLockCatalog()->getPing(operationContext(), "test");
ASSERT_OK(resultStatus.getStatus());
@@ -1531,13 +1517,13 @@ TEST_F(DistLockCatalogFixture, BasicGetPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetPingTargetError) {
+TEST_F(DistLockCatalogTest, GetPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetPingRunCmdError) {
+TEST_F(DistLockCatalogTest, GetPingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
@@ -1545,28 +1531,28 @@ TEST_F(DistLockCatalogFixture, GetPingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetPingNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetPingNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetPingUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetPingUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getPing(operationContext(), "test").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// return non-date type for ping.
BSONObj pingDoc(fromjson(R"({
_id: "test",
@@ -1582,8 +1568,8 @@ TEST_F(DistLockCatalogFixture, GetPingUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetLockByTS) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
auto resultStatus = distLockCatalog()->getLockByTS(operationContext(), ts);
ASSERT_OK(resultStatus.getStatus());
@@ -1593,7 +1579,7 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
ASSERT_EQUALS(ts, lockDoc.getLockID());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
ASSERT_EQUALS(dummyHost, request.target);
ASSERT_EQUALS("config", request.dbname);
@@ -1617,41 +1603,41 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByTSTargetError) {
+TEST_F(DistLockCatalogTest, GetLockByTSTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetLockByTSRunCmdError) {
+TEST_F(DistLockCatalogTest, GetLockByTSRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetLockByTSNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByTSNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByTSUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByTSUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// return invalid non-numeric type for state.
BSONObj lockDoc(fromjson(R"({
_id: "test",
@@ -1667,8 +1653,8 @@ TEST_F(DistLockCatalogFixture, GetLockByTSUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetLockByName) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetLockByName) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
auto resultStatus = distLockCatalog()->getLockByName(operationContext(), "abc");
ASSERT_OK(resultStatus.getStatus());
@@ -1704,13 +1690,13 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByName) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByNameTargetError) {
+TEST_F(DistLockCatalogTest, GetLockByNameTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetLockByNameRunCmdError) {
+TEST_F(DistLockCatalogTest, GetLockByNameRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
@@ -1718,28 +1704,28 @@ TEST_F(DistLockCatalogFixture, GetLockByNameRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetLockByNameNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByNameNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByNameUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByNameUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// Return non-numeric type for state.
BSONObj lockDoc(fromjson(R"({
_id: "x",
@@ -1755,5 +1741,5 @@ TEST_F(DistLockCatalogFixture, GetLockByNameUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-} // unnamed namespace
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index 40bb685867f..9ec67b40930 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -29,35 +29,19 @@
#include "mongo/platform/basic.h"
#include <boost/optional.hpp>
-#include <boost/optional/optional_io.hpp>
#include <map>
#include <string>
-#include <type_traits>
#include <vector>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
-#include "mongo/bson/util/builder.h"
-#include "mongo/client/remote_command_targeter.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/server_options.h"
-#include "mongo/db/service_context_noop.h"
-#include "mongo/executor/task_executor.h"
-#include "mongo/executor/task_executor_pool.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
#include "mongo/s/catalog/replset_dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
-#include "mongo/s/client/shard_factory.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
@@ -67,18 +51,13 @@
#include "mongo/util/time_support.h"
/**
- * Tests for ReplSetDistLockManager. Note that unlock and ping operations are executed on a
- * separate thread. And since this thread cannot capture the assertion exceptions, all the
- * assertion calls should be performed on the main thread.
+ * Tests for ReplSetDistLockManager. Note that unlock and ping operations are executed on a separate
+ * thread. And since this thread cannot capture the assertion exceptions, all the assertion calls
+ * should be performed on the main thread.
*/
-
namespace mongo {
namespace {
-using std::map;
-using std::string;
-using std::vector;
-
// Max duration to wait to satisfy test invariant before joining with main test thread.
const Seconds kJoinTimeout(30);
const Milliseconds kPingInterval(2);
@@ -88,49 +67,19 @@ const Seconds kLockExpiration(10);
* Basic fixture for ReplSetDistLockManager that starts it up before the test begins
* and shuts it down when a test finishes.
*/
-class ReplSetDistLockManagerFixture : public ShardingMongodTestFixture {
-public:
- /**
- * Returns the mocked catalog used by the lock manager being tested.
- */
- DistLockCatalogMock* getMockCatalog() {
- auto distLockCatalogMock = dynamic_cast<DistLockCatalogMock*>(distLockCatalog());
- invariant(distLockCatalogMock);
- return distLockCatalogMock;
- }
-
- /**
- * Get the process id that was initialized with the lock manager being tested.
- */
- string getProcessID() const {
- return _processID;
- }
-
+class ReplSetDistLockManagerFixture : public ShardServerTestFixture {
protected:
- virtual std::unique_ptr<TickSource> makeTickSource() {
- return stdx::make_unique<SystemTickSource>();
- }
-
void setUp() override {
- ShardingMongodTestFixture::setUp();
+ ShardServerTestFixture::setUp();
getServiceContext()->setTickSource(makeTickSource());
-
- // Initialize sharding components as a shard server.
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- ConnectionString configCS = ConnectionString::forReplicaSet(
- "configReplSet", std::vector<HostAndPort>{HostAndPort{"config"}});
- uassertStatusOK(initializeGlobalShardingStateForMongodForTest(configCS));
}
void tearDown() override {
// Don't care about what shutDown passes to stopPing here.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- ShardingMongodTestFixture::tearDown();
- }
- std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override {
- return stdx::make_unique<DistLockCatalogMock>();
+ ShardServerTestFixture::tearDown();
}
std::unique_ptr<DistLockManager> makeDistLockManager(
@@ -144,20 +93,40 @@ protected:
}
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
+ std::unique_ptr<DistLockManager> distLockManager) override {
return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
}
- std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() {
+ std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() override {
return stdx::make_unique<BalancerConfiguration>();
}
+ virtual std::unique_ptr<TickSource> makeTickSource() {
+ return stdx::make_unique<SystemTickSource>();
+ }
+
+ /**
+ * Returns the mocked catalog used by the lock manager being tested.
+ */
+ DistLockCatalogMock* getMockCatalog() {
+ auto distLockCatalogMock = dynamic_cast<DistLockCatalogMock*>(distLockCatalog());
+ invariant(distLockCatalogMock);
+ return distLockCatalogMock;
+ }
+
+ /**
+ * Get the process id that was initialized with the lock manager being tested.
+ */
+ std::string getProcessID() const {
+ return _processID;
+ }
+
private:
- string _processID = "test";
+ std::string _processID = "test";
};
class RSDistLockMgrWithMockTickSource : public ReplSetDistLockManagerFixture {
-public:
+protected:
/**
* Override the way the fixture gets the tick source to install to use a mock tick source.
*/
@@ -169,29 +138,27 @@ public:
* Returns the mock tick source.
*/
TickSourceMock* getMockTickSource() {
- return dynamic_cast<TickSourceMock*>(getGlobalServiceContext()->getTickSource());
+ return dynamic_cast<TickSourceMock*>(getServiceContext()->getTickSource());
}
};
std::string mapToString(const std::map<OID, int>& map) {
StringBuilder str;
-
for (const auto& entry : map) {
str << "(" << entry.first.toString() << ": " << entry.second << ")";
}
return str.str();
-};
+}
std::string vectorToString(const std::vector<OID>& list) {
StringBuilder str;
-
for (const auto& entry : list) {
str << "(" << entry.toString() << ")";
}
return str.str();
-};
+}
/**
* Test scenario:
@@ -200,9 +167,9 @@ std::string vectorToString(const std::vector<OID>& list) {
* 3. Check lock id used in lock and unlock are the same.
*/
TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
- string lockName("test");
+ std::string lockName("test");
Date_t now(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
LocksType retLockDoc;
retLockDoc.setName(lockName);
@@ -262,11 +229,11 @@ TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
* 4. Check lock id used in lock and unlock are the same.
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
const int kMaxRetryAttempt = 3;
@@ -297,7 +264,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -319,7 +286,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_TRUE(lockSessionID.isSet());
ASSERT_EQUALS(getProcessID(), processId);
@@ -385,7 +352,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
}
ASSERT_EQUALS(1, unlockCallCount);
- ASSERT_EQUALS(lastTS, unlockSessionIDPassed);
+ ASSERT(lastTS);
+ ASSERT_EQUALS(*lastTS, unlockSessionIDPassed);
}
/**
@@ -396,11 +364,11 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
* 4. Make sure that unlock is called to cleanup the last lock attempted that error out.
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
const int kMaxRetryAttempt = 3;
@@ -416,7 +384,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -438,7 +406,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
lastTS = lockSessionID;
ASSERT_TRUE(lockSessionID.isSet());
@@ -498,7 +466,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_FALSE(didTimeout);
ASSERT_EQUALS(1, unlockCallCount);
- ASSERT_EQUALS(lastTS, unlockSessionIDPassed);
+ ASSERT(lastTS);
+ ASSERT_EQUALS(*lastTS, unlockSessionIDPassed);
}
TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
@@ -526,11 +495,11 @@ TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
* 5. Implicitly check that unlock is not called (default setting of mock catalog).
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
@@ -544,7 +513,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -578,10 +547,10 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
OID lastTS;
- string whyMsg("because");
+ std::string whyMsg("because");
getMockCatalog()->expectGrabLock(
[this, &lockName, &lastTS, &me, &whyMsg](StringData lockID,
@@ -654,7 +623,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
stdx::mutex testMutex;
stdx::condition_variable ping3TimesCV;
- vector<string> processIDList;
+ std::vector<std::string> processIDList;
getMockCatalog()->expectPing(
[&testMutex, &ping3TimesCV, &processIDList](StringData processIDArg, Date_t ping) {
@@ -705,7 +674,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
stdx::mutex unlockMutex;
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
- vector<OID> lockSessionIDPassed;
+ std::vector<OID> lockSessionIDPassed;
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
@@ -784,9 +753,8 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
stdx::mutex testMutex;
stdx::condition_variable unlockCV;
-
- vector<OID> lockSessionIDPassed;
- map<OID, int> unlockIDMap; // id -> count
+ std::vector<OID> lockSessionIDPassed;
+ std::map<OID, int> unlockIDMap; // id -> count
/**
* Returns true if all values in the map are greater than 2.
@@ -2079,9 +2047,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
}
TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
- string lockName("test");
+ std::string lockName("test");
Date_t now(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
LocksType retLockDoc;
retLockDoc.setName(lockName);
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 9b26d5903bd..92a8ece2ff5 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -35,14 +35,10 @@
#include <iomanip>
#include <pcrecpp.h>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
-#include "mongo/client/replica_set_monitor.h"
-#include "mongo/db/audit.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/namespace_string.h"
@@ -51,7 +47,6 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/executor/network_interface.h"
-#include "mongo/executor/task_executor.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/s/catalog/config_server_version.h"
@@ -64,7 +59,6 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/s/shard_key_pattern.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
index 9e7f354c9c0..429a11ec39b 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
@@ -38,7 +38,7 @@
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/chrono.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 9f1c784a9f0..707e6500931 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -52,7 +52,7 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/versioning.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/future.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index be76d387b1a..e15217ade8e 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -51,7 +51,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/future.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/s/catalog_cache_test_fixture.h b/src/mongo/s/catalog_cache_test_fixture.h
index 439541424c5..244930a664d 100644
--- a/src/mongo/s/catalog_cache_test_fixture.h
+++ b/src/mongo/s/catalog_cache_test_fixture.h
@@ -31,7 +31,7 @@
#include <vector>
#include "mongo/db/namespace_string.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp
index 9ffc73d0a1c..31f8182db7a 100644
--- a/src/mongo/s/chunk_manager_index_bounds_test.cpp
+++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/client/SConscript b/src/mongo/s/client/SConscript
index ff5fd98ba3b..325ccc23842 100644
--- a/src/mongo/s/client/SConscript
+++ b/src/mongo/s/client/SConscript
@@ -25,7 +25,7 @@ env.CppUnitTest(
'shard_remote_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'sharding_client',
],
)
@@ -49,7 +49,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/client/shard_connection_test.cpp b/src/mongo/s/client/shard_connection_test.cpp
index dc734b82640..2b6ef24a4c9 100644
--- a/src/mongo/s/client/shard_connection_test.cpp
+++ b/src/mongo/s/client/shard_connection_test.cpp
@@ -27,11 +27,9 @@
#include "mongo/platform/basic.h"
-#include <cstdint>
#include <vector>
#include "mongo/db/client.h"
-#include "mongo/db/service_context.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/s/client/shard_connection.h"
@@ -47,20 +45,12 @@
namespace mongo {
namespace {
-using std::string;
-using std::vector;
+const std::string TARGET_HOST = "$dummy:27017";
-const string TARGET_HOST = "$dummy:27017";
-
-/**
- * Warning: cannot run in parallel
- */
-class ShardConnFixture : public mongo::unittest::Test {
+class ShardConnFixture : public unittest::Test {
public:
void setUp() {
- if (!haveClient()) {
- Client::initThread("ShardConnFixture", getGlobalServiceContext(), NULL);
- }
+ Client::initThreadIfNotAlready("ShardConnFixture");
_maxPoolSizePerHost = mongo::shardConnectionPool.getMaxPoolSize();
mongo::ConnectionString::setConnectionHook(
@@ -107,38 +97,32 @@ protected:
void checkNewConns(void (*checkFunc)(uint64_t, uint64_t),
uint64_t arg2,
size_t newConnsToCreate) {
- vector<ShardConnection*> newConnList;
+ std::vector<std::unique_ptr<ShardConnection>> newConnList;
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn =
- new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ auto newConn = std::make_unique<ShardConnection>(
+ ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
checkFunc(newConn->get()->getSockCreationMicroSec(), arg2);
- newConnList.push_back(newConn);
+ newConnList.emplace_back(std::move(newConn));
}
const uint64_t oldCreationTime = mongo::curTimeMicros64();
- for (vector<ShardConnection*>::iterator iter = newConnList.begin();
- iter != newConnList.end();
- ++iter) {
- (*iter)->done();
- delete *iter;
+ for (auto& conn : newConnList) {
+ conn->done();
}
newConnList.clear();
// Check that connections created after the purge was put back to the pool.
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn =
- new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ auto newConn = std::make_unique<ShardConnection>(
+ ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_LESS_THAN(newConn->get()->getSockCreationMicroSec(), oldCreationTime);
- newConnList.push_back(newConn);
+ newConnList.emplace_back(std::move(newConn));
}
- for (vector<ShardConnection*>::iterator iter = newConnList.begin();
- iter != newConnList.end();
- ++iter) {
- (*iter)->done();
- delete *iter;
+ for (auto& conn : newConnList) {
+ conn->done();
}
}
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index 94e19f2ee4e..8306d0a00f3 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -35,9 +35,6 @@
#include <string>
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/audit.h"
-#include "mongo/db/auth/authorization_manager_global.h"
-#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/client.h"
#include "mongo/rpc/get_status_from_command_result.h"
diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp
index 213fec88d7e..98817150d6b 100644
--- a/src/mongo/s/cluster_identity_loader_test.cpp
+++ b/src/mongo/s/cluster_identity_loader_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/query_request.h"
+#include "mongo/db/service_context_noop.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
@@ -44,7 +45,7 @@
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_identity_loader.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -72,6 +73,13 @@ public:
configTargeter()->setFindHostReturnValue(configHost);
}
+ void tearDown() override {
+ ShardingTestFixture::tearDown();
+
+ // Reset the global service context so that the cluster identity gets cleared
+ setGlobalServiceContext(std::make_unique<ServiceContextNoop>());
+ }
+
void expectConfigVersionLoad(StatusWith<OID> result) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
diff --git a/src/mongo/s/cluster_last_error_info_test.cpp b/src/mongo/s/cluster_last_error_info_test.cpp
index b7a06a58ea1..b9903192581 100644
--- a/src/mongo/s/cluster_last_error_info_test.cpp
+++ b/src/mongo/s/cluster_last_error_info_test.cpp
@@ -36,7 +36,7 @@
#include "mongo/executor/network_interface_mock.h"
#include "mongo/rpc/metadata/sharding_metadata.h"
#include "mongo/s/cluster_last_error_info.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index 9dc7ae2124a..7d53bbe70eb 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -13,7 +13,6 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/commands',
- '$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/db/commands/killcursors_common',
'$BUILD_DIR/mongo/db/commands/current_op_common',
'$BUILD_DIR/mongo/s/async_requests_sender',
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 4df783c1055..6df09647b8f 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -162,10 +162,6 @@ std::unique_ptr<ShardingCatalogClient> ConfigServerTestFixture::makeShardingCata
return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
}
-std::unique_ptr<CatalogCache> ConfigServerTestFixture::makeCatalogCache() {
- return stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext()));
-}
-
std::unique_ptr<BalancerConfiguration> ConfigServerTestFixture::makeBalancerConfiguration() {
return stdx::make_unique<BalancerConfiguration>();
}
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index cfbe682aa5e..b2e2bed6a6d 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -163,8 +163,6 @@ protected:
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
std::unique_ptr<DistLockManager> distLockManager) override;
- std::unique_ptr<CatalogCache> makeCatalogCache() override;
-
std::unique_ptr<ClusterCursorManager> makeClusterCursorManager() override;
std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() override;
diff --git a/src/mongo/s/query/SConscript b/src/mongo/s/query/SConscript
index 0c90f644c5e..4ff2e432ae6 100644
--- a/src/mongo/s/query/SConscript
+++ b/src/mongo/s/query/SConscript
@@ -102,7 +102,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
],
)
@@ -116,7 +116,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
],
)
env.Library(
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 9106a3f1941..4c0e32cba51 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -41,7 +41,7 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/s/query/establish_cursors_test.cpp b/src/mongo/s/query/establish_cursors_test.cpp
index c7437b3eb7e..b36a531b2a2 100644
--- a/src/mongo/s/query/establish_cursors_test.cpp
+++ b/src/mongo/s/query/establish_cursors_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/query/establish_cursors.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/s/shard_server_test_fixture.cpp b/src/mongo/s/shard_server_test_fixture.cpp
index e246e4b621a..36fca36f3a1 100644
--- a/src/mongo/s/shard_server_test_fixture.cpp
+++ b/src/mongo/s/shard_server_test_fixture.cpp
@@ -42,7 +42,6 @@
#include "mongo/stdx/memory.h"
namespace mongo {
-
namespace {
const HostAndPort kConfigHostAndPort("dummy", 123);
@@ -57,16 +56,6 @@ std::shared_ptr<RemoteCommandTargeterMock> ShardServerTestFixture::configTargete
return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
}
-void ShardServerTestFixture::expectFindOnConfigSendErrorCode(ErrorCodes::Error code) {
- onCommand([&, code](const executor::RemoteCommandRequest& request) {
- ASSERT_EQ(request.target, kConfigHostAndPort);
- ASSERT_EQ(request.dbname, "config");
- BSONObjBuilder responseBuilder;
- CommandHelpers::appendCommandStatus(responseBuilder, Status(code, ""));
- return responseBuilder.obj();
- });
-}
-
void ShardServerTestFixture::setUp() {
ShardingMongodTestFixture::setUp();
@@ -109,8 +98,4 @@ std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatal
return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
}
-std::unique_ptr<CatalogCache> ShardServerTestFixture::makeCatalogCache() {
- return stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext()));
-}
-
} // namespace mongo
diff --git a/src/mongo/s/shard_server_test_fixture.h b/src/mongo/s/shard_server_test_fixture.h
index 6fe244964aa..4ca620f2792 100644
--- a/src/mongo/s/shard_server_test_fixture.h
+++ b/src/mongo/s/shard_server_test_fixture.h
@@ -36,9 +36,9 @@ namespace mongo {
class RemoteCommandTargeterMock;
/**
- * Test fixture for shard components, as opposed to config or mongos components.
- * Has a mock network and ephemeral storage engine provided by ShardingMongodTestFixture,
- * additionally sets up mock dist lock catalog and manager with a real catalog client.
+ * Test fixture for shard components, as opposed to config or mongos components. Provides a mock
+ * network and ephemeral storage engine via ShardingMongodTestFixture. Additionally sets up mock
+ * dist lock catalog and manager with a real catalog client.
*/
class ShardServerTestFixture : public ShardingMongodTestFixture {
public:
@@ -56,13 +56,7 @@ public:
*/
std::shared_ptr<RemoteCommandTargeterMock> configTargeterMock();
- void expectFindOnConfigSendErrorCode(ErrorCodes::Error code);
-
protected:
- /**
- * Sets up a ClusterRole::ShardServer replica set with a real catalog client and mock dist lock
- * catalog and manager.
- */
void setUp() override;
void tearDown() override;
@@ -83,8 +77,6 @@ protected:
*/
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
std::unique_ptr<DistLockManager> distLockManager) override;
-
- std::unique_ptr<CatalogCache> makeCatalogCache() override;
};
} // namespace mongo
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 83215c99999..cbf5099a2df 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -36,6 +36,7 @@
#include "mongo/base/status_with.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
@@ -49,11 +50,11 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_consistency_markers_mock.h"
-#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/repl/replication_recovery_mock.h"
#include "mongo/db/repl/storage_interface_mock.h"
+#include "mongo/db/s/config_server_op_observer.h"
+#include "mongo/db/s/shard_server_op_observer.h"
#include "mongo/db/service_context_noop.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor_pool.h"
@@ -90,16 +91,10 @@ using repl::ReplicationCoordinatorMock;
using repl::ReplSettings;
using unittest::assertGet;
-using std::string;
-using std::vector;
-using unittest::assertGet;
-
ShardingMongodTestFixture::ShardingMongodTestFixture() = default;
ShardingMongodTestFixture::~ShardingMongodTestFixture() = default;
-const Seconds ShardingMongodTestFixture::kFutureTimeout{5};
-
void ShardingMongodTestFixture::setUp() {
ServiceContextMongoDTest::setUp();
@@ -119,10 +114,9 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- replSetConfig
- .initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
- << serversBob.arr()))
- .transitional_ignore();
+ ASSERT_OK(replSetConfig.initialize(
+ BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
+ << serversBob.arr())));
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
@@ -137,15 +131,16 @@ void ShardingMongodTestFixture::setUp() {
storagePtr.get(),
stdx::make_unique<repl::ReplicationConsistencyMarkersMock>(),
stdx::make_unique<repl::ReplicationRecoveryMock>()));
- repl::ReplicationProcess::get(_opCtx.get())
- ->initializeRollbackID(_opCtx.get())
- .transitional_ignore();
+
+ ASSERT_OK(repl::ReplicationProcess::get(_opCtx.get())->initializeRollbackID(_opCtx.get()));
repl::StorageInterface::set(service, std::move(storagePtr));
auto makeOpObserver = [&] {
auto opObserver = stdx::make_unique<OpObserverRegistry>();
opObserver->addObserver(stdx::make_unique<OpObserverImpl>());
+ opObserver->addObserver(stdx::make_unique<ConfigServerOpObserver>());
+ opObserver->addObserver(stdx::make_unique<ShardServerOpObserver>());
return opObserver;
};
service->setOpObserver(makeOpObserver());
@@ -250,10 +245,6 @@ std::unique_ptr<ShardingCatalogClient> ShardingMongodTestFixture::makeShardingCa
return nullptr;
}
-std::unique_ptr<CatalogCache> ShardingMongodTestFixture::makeCatalogCache() {
- return nullptr;
-}
-
std::unique_ptr<ClusterCursorManager> ShardingMongodTestFixture::makeClusterCursorManager() {
return nullptr;
}
@@ -283,7 +274,7 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
auto const grid = Grid::get(operationContext());
grid->init(makeShardingCatalogClient(std::move(distLockManagerPtr)),
- makeCatalogCache(),
+ stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext())),
makeShardRegistry(configConnStr),
makeClusterCursorManager(),
makeBalancerConfiguration(),
@@ -302,7 +293,7 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
}
void ShardingMongodTestFixture::tearDown() {
- // Only shut down components that were actually initialized and not already shut down.
+ ReplicaSetMonitor::cleanup();
if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) {
Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin();
@@ -351,11 +342,6 @@ void ShardingMongodTestFixture::shutdownExecutorPool() {
_executorPoolShutDown = true;
}
-executor::NetworkInterfaceMock* ShardingMongodTestFixture::network() const {
- invariant(_mockNetwork);
- return _mockNetwork;
-}
-
executor::TaskExecutor* ShardingMongodTestFixture::executor() const {
invariant(Grid::get(operationContext())->getExecutorPool());
return Grid::get(operationContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/s/sharding_mongod_test_fixture.h b/src/mongo/s/sharding_mongod_test_fixture.h
index aca1a3f22c0..25f549f0128 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.h
+++ b/src/mongo/s/sharding_mongod_test_fixture.h
@@ -28,13 +28,9 @@
#pragma once
-#include <utility>
-
-#include "mongo/db/service_context.h"
+#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/executor/network_test_env.h"
-#include "mongo/s/grid.h"
-#include "mongo/unittest/unittest.h"
+#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
@@ -42,12 +38,9 @@ class CatalogCacheLoader;
class ConnectionString;
class DistLockCatalog;
class DistLockManager;
-class NamespaceString;
class RemoteCommandTargeterFactoryMock;
-class ShardRegistry;
namespace repl {
-class ReplicationCoordinatorMock;
class ReplSettings;
} // namespace repl
@@ -60,19 +53,12 @@ class ReplSettings;
* components (including a NetworkInterface/TaskExecutor subsystem backed by the NetworkTestEnv),
* but allows subclasses to replace any component with its real implementation, a mock, or nullptr.
*/
-class ShardingMongodTestFixture : public ServiceContextMongoDTest {
+class ShardingMongodTestFixture : public ServiceContextMongoDTest,
+ public ShardingTestFixtureCommon {
public:
ShardingMongodTestFixture();
~ShardingMongodTestFixture();
- static const Seconds kFutureTimeout;
-
- template <typename Lambda>
- executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
- Lambda&& func) const {
- return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
- }
-
/**
* Initializes sharding components according to the cluster role in
* serverGlobalParams.clusterRole and puts the components on the Grid, mimicking the
@@ -103,7 +89,6 @@ public:
// if they have been initialized.
executor::TaskExecutor* executor() const;
- executor::NetworkInterfaceMock* network() const;
repl::ReplicationCoordinatorMock* replicationCoordinator() const;
@@ -151,13 +136,6 @@ protected:
*/
void tearDown() override;
- // Allow subclasses to modify this node's hostname and port, set name, and replica set members.
-
- const HostAndPort _host{"node1:12345"};
- const std::string _setName = "mySet";
- const std::vector<HostAndPort> _servers{
- _host, HostAndPort("node2:12345"), HostAndPort("node3:12345")};
-
// Methods for creating and returning sharding components. Some of these methods have been
// implemented to return the real implementation of the component as the default, while others
// return a mock or nullptr. Subclasses can override any of these methods to create and
@@ -211,11 +189,6 @@ protected:
/**
* Base class returns nullptr.
*/
- virtual std::unique_ptr<CatalogCache> makeCatalogCache();
-
- /**
- * Base class returns nullptr.
- */
virtual std::unique_ptr<ClusterCursorManager> makeClusterCursorManager();
/**
@@ -224,16 +197,14 @@ protected:
virtual std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration();
private:
+ const HostAndPort _host{"node1:12345"};
+ const std::string _setName = "mySet";
+ const std::vector<HostAndPort> _servers{
+ _host, HostAndPort("node2:12345"), HostAndPort("node3:12345")};
+
ServiceContext::UniqueClient _client;
ServiceContext::UniqueOperationContext _opCtx;
- // Since a NetworkInterface is a private member of a TaskExecutor, we store a raw pointer to the
- // fixed TaskExecutor's NetworkInterface here.
- // TODO(esha): Currently, some fine-grained synchronization of the network and task executor is
- // is outside of NetworkTestEnv's capabilities. If all control of the network is done through
- // _networkTestEnv, storing this raw pointer is not necessary.
- executor::NetworkInterfaceMock* _mockNetwork = nullptr;
-
// Since the RemoteCommandTargeterFactory is currently a private member of ShardFactory, we
// store a raw pointer to it here.
RemoteCommandTargeterFactoryMock* _targeterFactory = nullptr;
@@ -248,9 +219,6 @@ private:
repl::ReplicationCoordinatorMock* _replCoord = nullptr;
- // Allows for processing tasks through the NetworkInterfaceMock/ThreadPoolMock subsystem.
- std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
-
// Records if a component has been shut down, so that it is only shut down once.
bool _executorPoolShutDown = false;
};
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index b9495d65e7a..05dc4a44069 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -28,12 +28,11 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include <algorithm>
#include <vector>
-#include "mongo/base/status_with.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
@@ -69,7 +68,6 @@
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/memory.h"
#include "mongo/transport/mock_session.h"
-#include "mongo/transport/transport_layer.h"
#include "mongo/transport/transport_layer_mock.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/tick_source_mock.h"
@@ -83,49 +81,44 @@ using executor::RemoteCommandResponse;
using executor::ShardingTaskExecutor;
using unittest::assertGet;
-using std::string;
-using std::vector;
-using unittest::assertGet;
-
namespace {
+
std::unique_ptr<ShardingTaskExecutor> makeShardingTestExecutor(
std::unique_ptr<NetworkInterfaceMock> net) {
auto testExecutor = makeThreadPoolTestExecutor(std::move(net));
return stdx::make_unique<ShardingTaskExecutor>(std::move(testExecutor));
}
-}
+
+} // namespace
ShardingTestFixture::ShardingTestFixture() = default;
ShardingTestFixture::~ShardingTestFixture() = default;
-const Seconds ShardingTestFixture::kFutureTimeout{5};
-
void ShardingTestFixture::setUp() {
+ auto const service = serviceContext();
+
+ // Configure the service context
+ service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setTickSource(stdx::make_unique<TickSourceMock>());
+
{
- auto service = stdx::make_unique<ServiceContextNoop>();
- service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
- service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
- service->setTickSource(stdx::make_unique<TickSourceMock>());
auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
_transportLayer = tlMock.get();
+ ASSERT_OK(_transportLayer->start());
service->setTransportLayer(std::move(tlMock));
- _transportLayer->start().transitional_ignore();
-
- // Set the newly created service context to be the current global context so that tests,
- // which invoke code still referencing getGlobalServiceContext will work properly.
- setGlobalServiceContext(std::move(service));
}
- CollatorFactoryInterface::set(serviceContext(), stdx::make_unique<CollatorFactoryMock>());
+ CollatorFactoryInterface::set(service, stdx::make_unique<CollatorFactoryMock>());
_transportSession = transport::MockSession::create(_transportLayer);
- _client = serviceContext()->makeClient("ShardingTestFixture", _transportSession);
+ _client = service->makeClient("ShardingTestFixture", _transportSession);
_opCtx = _client->makeOperationContext();
// Set up executor pool used for most operations.
auto fixedNet = stdx::make_unique<executor::NetworkInterfaceMock>();
fixedNet->setEgressMetadataHook(
- stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext()));
+ stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(service));
_mockNetwork = fixedNet.get();
auto fixedExec = makeShardingTestExecutor(std::move(fixedNet));
_networkTestEnv = stdx::make_unique<NetworkTestEnv>(fixedExec.get(), _mockNetwork);
@@ -133,7 +126,7 @@ void ShardingTestFixture::setUp() {
auto netForPool = stdx::make_unique<executor::NetworkInterfaceMock>();
netForPool->setEgressMetadataHook(
- stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext()));
+ stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(service));
auto _mockNetworkForPool = netForPool.get();
auto execForPool = makeShardingTestExecutor(std::move(netForPool));
_networkTestEnvForPool =
@@ -186,15 +179,15 @@ void ShardingTestFixture::setUp() {
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configCS));
executorPool->startup();
- CatalogCacheLoader::set(serviceContext(), stdx::make_unique<ConfigServerCatalogCacheLoader>());
+ CatalogCacheLoader::set(service, stdx::make_unique<ConfigServerCatalogCacheLoader>());
// For now initialize the global grid object. All sharding objects will be accessible from there
// until we get rid of it.
Grid::get(operationContext())
->init(std::move(catalogClient),
- stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(serviceContext())),
+ stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(service)),
std::move(shardRegistry),
- stdx::make_unique<ClusterCursorManager>(serviceContext()->getPreciseClockSource()),
+ stdx::make_unique<ClusterCursorManager>(service->getPreciseClockSource()),
stdx::make_unique<BalancerConfiguration>(),
std::move(executorPool),
_mockNetwork);
@@ -241,12 +234,6 @@ RemoteCommandTargeterMock* ShardingTestFixture::configTargeter() const {
return _configTargeter;
}
-executor::NetworkInterfaceMock* ShardingTestFixture::network() const {
- invariant(_mockNetwork);
-
- return _mockNetwork;
-}
-
executor::TaskExecutor* ShardingTestFixture::executor() const {
invariant(_executor);
@@ -316,7 +303,7 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- vector<BSONObj> shardsToReturn;
+ std::vector<BSONObj> shardsToReturn;
std::transform(shards.begin(),
shards.end(),
@@ -498,7 +485,7 @@ void ShardingTestFixture::expectCount(const HostAndPort& configHost,
const StatusWith<long long>& response) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- string cmdName = request.cmdObj.firstElement().fieldName();
+ const std::string cmdName(request.cmdObj.firstElement().fieldName());
ASSERT_EQUALS("count", cmdName);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQUALS(expectedNs.toString(), nss.toString());
diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_router_test_fixture.h
index 49b28d46efa..bc17b831764 100644
--- a/src/mongo/s/sharding_test_fixture.h
+++ b/src/mongo/s/sharding_router_test_fixture.h
@@ -28,12 +28,7 @@
#pragma once
-#include <utility>
-
-#include "mongo/db/service_context.h"
-#include "mongo/executor/network_test_env.h"
-#include "mongo/transport/session.h"
-#include "mongo/unittest/unittest.h"
+#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
@@ -44,42 +39,25 @@ class ShardingCatalogClientImpl;
struct ChunkVersion;
class CollectionType;
class DistLockManagerMock;
-class NamespaceString;
-class ShardFactoryMock;
class RemoteCommandTargeterFactoryMock;
class RemoteCommandTargeterMock;
class ShardRegistry;
class ShardType;
-template <typename T>
-class StatusWith;
-
-namespace executor {
-class NetworkInterfaceMock;
-class TaskExecutor;
-} // namespace executor
namespace transport {
class TransportLayerMock;
-} // namepsace transport
+} // namespace transport
/**
* Sets up the mocked out objects for testing the replica-set backed catalog manager and catalog
* client.
*/
-class ShardingTestFixture : public mongo::unittest::Test {
+class ShardingTestFixture : public unittest::Test, public ShardingTestFixtureCommon {
public:
ShardingTestFixture();
~ShardingTestFixture();
protected:
- static const Seconds kFutureTimeout;
-
- template <typename Lambda>
- executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
- Lambda&& func) const {
- return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
- }
-
ShardingCatalogClient* catalogClient() const;
/**
@@ -93,8 +71,6 @@ protected:
RemoteCommandTargeterMock* configTargeter() const;
- executor::NetworkInterfaceMock* network() const;
-
executor::TaskExecutor* executor() const;
DistLockManagerMock* distLock() const;
@@ -226,9 +202,7 @@ private:
RemoteCommandTargeterMock* _configTargeter;
// For the Grid's fixed executor.
- executor::NetworkInterfaceMock* _mockNetwork;
executor::TaskExecutor* _executor;
- std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
// For the Grid's arbitrary executor in its executorPool.
std::unique_ptr<executor::NetworkTestEnv> _networkTestEnvForPool;
diff --git a/src/mongo/s/sharding_test_fixture_common.cpp b/src/mongo/s/sharding_test_fixture_common.cpp
new file mode 100644
index 00000000000..ebff5f0aceb
--- /dev/null
+++ b/src/mongo/s/sharding_test_fixture_common.cpp
@@ -0,0 +1,41 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/s/sharding_test_fixture_common.h"
+
+namespace mongo {
+
+constexpr Seconds ShardingTestFixtureCommon::kFutureTimeout;
+
+ShardingTestFixtureCommon::ShardingTestFixtureCommon() = default;
+
+ShardingTestFixtureCommon::~ShardingTestFixtureCommon() = default;
+
+} // namespace mongo
diff --git a/src/mongo/s/sharding_test_fixture_common.h b/src/mongo/s/sharding_test_fixture_common.h
new file mode 100644
index 00000000000..88d9ebaa845
--- /dev/null
+++ b/src/mongo/s/sharding_test_fixture_common.h
@@ -0,0 +1,79 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
+#include "mongo/executor/network_test_env.h"
+#include "mongo/s/grid.h"
+#include "mongo/transport/session.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+
+namespace executor {
+class NetworkInterfaceMock;
+class TaskExecutor;
+} // namespace executor
+
+/**
+ * Contains common functionality and tools, which apply to both mongos and mongod unit-tests.
+ */
+class ShardingTestFixtureCommon {
+public:
+ static constexpr Seconds kFutureTimeout{5};
+
+ ShardingTestFixtureCommon();
+ ~ShardingTestFixtureCommon();
+
+ template <typename Lambda>
+ executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
+ Lambda&& func) const {
+ return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
+ }
+
+ executor::NetworkInterfaceMock* network() const {
+ invariant(_mockNetwork);
+ return _mockNetwork;
+ }
+
+protected:
+ // Since a NetworkInterface is a private member of a TaskExecutor, we store a raw pointer to the
+ // fixed TaskExecutor's NetworkInterface here.
+ //
+ // TODO(Esha): Currently, some fine-grained synchronization of the network and task executor is
+ // outside of NetworkTestEnv's capabilities. If all control of the network is done through
+ // _networkTestEnv, storing this raw pointer is not necessary.
+ executor::NetworkInterfaceMock* _mockNetwork{nullptr};
+
+ // Allows for processing tasks through the NetworkInterfaceMock/ThreadPoolMock subsystem
+ std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript
index 89401a022d6..0bfc32d72c6 100644
--- a/src/mongo/s/write_ops/SConscript
+++ b/src/mongo/s/write_ops/SConscript
@@ -31,11 +31,8 @@ env.Library(
'write_op.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/client/connection_string',
'$BUILD_DIR/mongo/s/async_requests_sender',
- '$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/commands/shared_cluster_commands',
- '$BUILD_DIR/mongo/s/coreshard',
'batch_write_types',
],
)
@@ -70,8 +67,8 @@ env.CppUnitTest(
'write_op_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/service_context',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'cluster_write_op',
]
)
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 045785754a0..e39bc0bc930 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"