summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Russotto <matthew.russotto@mongodb.com>2020-09-15 14:48:09 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-15 22:22:28 +0000
commit3ea1720a10ebac984a3d117cab721058bb7f170e (patch)
tree183977be7f614a448be5a361d31cb7c8799bbd9a
parentfbeae95f75fa02164810e89811cf2b56c0b7170a (diff)
downloadmongo-3ea1720a10ebac984a3d117cab721058bb7f170e.tar.gz
SERVER-48804 Connection initiation in MigrationServiceInstance
-rw-r--r--src/mongo/db/repl/SConscript4
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp129
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.h46
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp239
4 files changed, 407 insertions, 11 deletions
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 0ef88b0a410..dd28cc7cf14 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -1283,11 +1283,13 @@ env.Library(
'tenant_migration_recipient_service.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/client/read_preference',
'primary_only_service',
'tenant_migration_recipient_utils',
'wait_for_majority_service',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/client/clientdriver_network',
'tenant_migration_state_machine_idl',
]
)
@@ -1461,6 +1463,7 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/bson/mutable/mutable_bson',
+ '$BUILD_DIR/mongo/client/replica_set_monitor_protocol_test_util',
'$BUILD_DIR/mongo/db/auth/authmocks',
'$BUILD_DIR/mongo/db/auth/authorization_manager_global',
'$BUILD_DIR/mongo/db/catalog_raii',
@@ -1702,6 +1705,7 @@ env.Library(
'replica_set_aware_service',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/db_raii',
'$BUILD_DIR/mongo/db/dbdirectclient',
'$BUILD_DIR/mongo/db/logical_time_metadata_hook',
'$BUILD_DIR/mongo/executor/connection_pool_executor',
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 02b0f7be1ab..9d2461b8360 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -31,7 +31,12 @@
#include "mongo/platform/basic.h"
+#include "mongo/base/checked_cast.h"
+#include "mongo/client/dbclient_connection.h"
+#include "mongo/client/replica_set_monitor.h"
+#include "mongo/client/replica_set_monitor_manager.h"
#include "mongo/db/client.h"
+#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/repl_client_info.h"
@@ -48,6 +53,9 @@ namespace repl {
// Fails before waiting for the state doc to be majority replicated.
MONGO_FAIL_POINT_DEFINE(failWhilePersistingTenantMigrationRecipientInstanceStateDoc);
+MONGO_FAIL_POINT_DEFINE(stopAfterPersistingTenantMigrationRecipientInstanceStateDoc);
+MONGO_FAIL_POINT_DEFINE(stopAfterConnectingTenantMigrationRecipientInstance);
+MONGO_FAIL_POINT_DEFINE(setTenantMigrationRecipientInstanceHostTimeout);
TenantMigrationRecipientService::TenantMigrationRecipientService(ServiceContext* serviceContext)
: PrimaryOnlyService(serviceContext) {}
@@ -71,13 +79,110 @@ std::shared_ptr<PrimaryOnlyService::Instance> TenantMigrationRecipientService::c
}
TenantMigrationRecipientService::Instance::Instance(BSONObj stateDoc)
- : PrimaryOnlyService::TypedInstance<Instance>() {
- _stateDoc = TenantMigrationRecipientDocument::parse(IDLParserErrorContext("recipientStateDoc"),
- stateDoc);
+ : PrimaryOnlyService::TypedInstance<Instance>(),
+ _stateDoc(TenantMigrationRecipientDocument::parse(IDLParserErrorContext("recipientStateDoc"),
+ stateDoc)),
+ _tenantId(_stateDoc.getDatabasePrefix().toString()),
+ _migrationUuid(_stateDoc.getId()),
+ _donorConnectionString(_stateDoc.getDonorConnectionString().toString()),
+ _readPreference(_stateDoc.getReadPreference()) {}
+
+std::unique_ptr<DBClientConnection> TenantMigrationRecipientService::Instance::_connectAndAuth(
+ const HostAndPort& serverAddress, StringData applicationName, BSONObj authParams) {
+ std::string errMsg;
+ auto clientBase = ConnectionString(serverAddress).connect(applicationName, errMsg);
+ if (!clientBase) {
+ LOGV2_ERROR(4880400,
+ "Failed to connect to migration donor",
+ "tenantId"_attr = getTenantId(),
+ "migrationId"_attr = getMigrationUUID(),
+ "serverAddress"_attr = serverAddress,
+ "applicationName"_attr = applicationName,
+ "error"_attr = errMsg);
+ uasserted(ErrorCodes::HostNotFound, errMsg);
+ }
+ // ConnectionString::connect() always returns a DBClientConnection in a unique_ptr of
+ // DBClientBase type.
+ std::unique_ptr<DBClientConnection> client(
+ checked_cast<DBClientConnection*>(clientBase.release()));
+ if (!authParams.isEmpty()) {
+ client->auth(authParams);
+ } else {
+ // Tenant migration in production should always require auth.
+ uassert(4880405, "No auth data provided to tenant migration", getTestCommandsEnabled());
+ }
+
+ return client;
}
+SemiFuture<void> TenantMigrationRecipientService::Instance::_createAndConnectClients() {
+ LOGV2_DEBUG(4880401,
+ 1,
+ "Recipient migration service connecting clients",
+ "tenantId"_attr = getTenantId(),
+ "migrationId"_attr = getMigrationUUID(),
+ "connectionString"_attr = _donorConnectionString,
+ "readPreference"_attr = _readPreference,
+ "authParams"_attr = redact(_authParams));
+ auto connectionStringWithStatus = ConnectionString::parse(_donorConnectionString);
+ if (!connectionStringWithStatus.isOK()) {
+ LOGV2_ERROR(4880403,
+ "Failed to parse connection string",
+ "tenantId"_attr = getTenantId(),
+ "migrationId"_attr = getMigrationUUID(),
+ "connectionString"_attr = _donorConnectionString,
+ "error"_attr = connectionStringWithStatus.getStatus());
+
+ return SemiFuture<void>::makeReady(connectionStringWithStatus.getStatus());
+ }
+ auto connectionString = std::move(connectionStringWithStatus.getValue());
+ const auto& servers = connectionString.getServers();
+ stdx::lock_guard lk(_mutex);
+ _donorReplicaSetMonitor = ReplicaSetMonitor::createIfNeeded(
+ connectionString.getSetName(), std::set<HostAndPort>(servers.begin(), servers.end()));
+ Milliseconds findHostTimeout = ReplicaSetMonitorInterface::kDefaultFindHostTimeout;
+ setTenantMigrationRecipientInstanceHostTimeout.execute([&](const BSONObj& data) {
+ findHostTimeout = Milliseconds(data["findHostTimeoutMillis"].safeNumberLong());
+ });
+ return _donorReplicaSetMonitor->getHostOrRefresh(_readPreference, findHostTimeout)
+ .thenRunOn(**_scopedExecutor)
+ .then([this](const HostAndPort& serverAddress) {
+ stdx::lock_guard lk(_mutex);
+ auto applicationName =
+ "TenantMigrationRecipient_" + getTenantId() + "_" + getMigrationUUID().toString();
+ _client = _connectAndAuth(serverAddress, applicationName, _authParams);
+
+ applicationName += "_fetcher";
+ _oplogFetcherClient = _connectAndAuth(serverAddress, applicationName, _authParams);
+ })
+ .onError([this](const Status& status) {
+ LOGV2_ERROR(4880404,
+ "Connecting to donor failed",
+ "tenantId"_attr = getTenantId(),
+ "migrationId"_attr = getMigrationUUID(),
+ "error"_attr = status);
+
+ // Make sure we don't end up with a partially initialized set of connections.
+ stdx::lock_guard lk(_mutex);
+ _client = nullptr;
+ _oplogFetcherClient = nullptr;
+ return status;
+ })
+ .semi();
+}
+
+namespace {
+constexpr std::int32_t stopFailPointErrorCode = 4880402;
+void stopOnFailPoint(FailPoint* fp) {
+ uassert(stopFailPointErrorCode,
+ "Skipping remaining processing due to fail point",
+ MONGO_likely(!fp->shouldFail()));
+}
+} // namespace
+
SemiFuture<void> TenantMigrationRecipientService::Instance::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor) noexcept {
+ _scopedExecutor = executor;
return ExecutorFuture(**executor)
.then([this]() -> SharedSemiFuture<void> {
auto uniqueOpCtx = cc().makeOperationContext();
@@ -128,23 +233,33 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::run(
.waitUntilMajority(lastOpAfterRun);
})
.then([this] {
+ stopOnFailPoint(&stopAfterPersistingTenantMigrationRecipientInstanceStateDoc);
+ return _createAndConnectClients();
+ })
+ .then([this] {
+ stopOnFailPoint(&stopAfterConnectingTenantMigrationRecipientInstance);
// TODO SERVER-48808: Run cloners in MigrationServiceInstance
// TODO SERVER-48811: Oplog fetching in MigrationServiceInstance
})
.onCompletion([this](Status status) {
LOGV2(4878501,
"Tenant Recipient data sync completed.",
+ "tenantId"_attr = getTenantId(),
"migrationId"_attr = getMigrationUUID(),
- "dbPrefix"_attr = _stateDoc.getDatabasePrefix(),
- "status"_attr = status);
+ "error"_attr = status);
+ if (status.code() == stopFailPointErrorCode)
+ return Status::OK();
return status;
})
.semi();
}
const UUID& TenantMigrationRecipientService::Instance::getMigrationUUID() const {
- stdx::lock_guard lk(_mutex);
- return _stateDoc.getId();
+ return _migrationUuid;
+}
+
+const std::string& TenantMigrationRecipientService::Instance::getTenantId() const {
+ return _tenantId;
}
bool TenantMigrationRecipientService::Instance::isMarkedForGarbageCollect() const {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.h b/src/mongo/db/repl/tenant_migration_recipient_service.h
index c57fdb1a062..91cc62bf07d 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.h
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.h
@@ -37,7 +37,9 @@
namespace mongo {
+class DBClientConnection;
class OperationContext;
+class ReplicaSetMonitor;
class ServiceContext;
namespace repl {
@@ -79,15 +81,57 @@ public:
const UUID& getMigrationUUID() const;
/*
+ * Returns the tenant id (database prefix).
+ */
+ const std::string& getTenantId() const;
+
+ /*
* Returns true if the instance state doc is marked for garbage collect.
*/
bool isMarkedForGarbageCollect() const;
+
private:
- // Protects below data members.
+ friend class TenantMigrationRecipientServiceTest;
+
+ /**
+ * Creates a client, connects it to the donor, and authenticates it if authParams is
+ * non-empty. Throws a user assertion on failure.
+ *
+ */
+ std::unique_ptr<DBClientConnection> _connectAndAuth(const HostAndPort& serverAddress,
+ StringData applicationName,
+ BSONObj authParams);
+
+ /**
+ * Creates and connects both the oplog fetcher client and the client used for other
+ * operations.
+ */
+ SemiFuture<void> _createAndConnectClients();
+
+ std::shared_ptr<executor::ScopedTaskExecutor> _scopedExecutor;
+
+ // Protects below non-const data members.
mutable Mutex _mutex = MONGO_MAKE_LATCH("TenantMigrationRecipientService::_mutex");
TenantMigrationRecipientDocument _stateDoc;
+
+ // This data is provided in the initial state doc and never changes. We keep copies to
+ // avoid having to obtain the mutex to access them.
+ const std::string _tenantId;
+ const UUID _migrationUuid;
+ const std::string _donorConnectionString;
+ const ReadPreferenceSetting _readPreference;
+ // TODO(SERVER-50670): Populate authParams
+ const BSONObj _authParams;
+
+ std::shared_ptr<ReplicaSetMonitor> _donorReplicaSetMonitor;
+
+ // Because the cloners and oplog fetcher use exhaust, we need a separate connection for
+ // each. The '_client' will be used for the cloners and other operations such as fetching
+ // optimes while the '_oplogFetcherClient' will be reserved for the oplog fetcher only.
+ std::unique_ptr<DBClientConnection> _client;
+ std::unique_ptr<DBClientConnection> _oplogFetcherClient;
};
};
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index 8e9994bd16a..27e7d77f9c8 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -29,6 +29,9 @@
#include <memory>
+#include "mongo/client/connpool.h"
+#include "mongo/client/replica_set_monitor.h"
+#include "mongo/client/replica_set_monitor_protocol_test_util.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/op_observer_impl.h"
@@ -41,23 +44,29 @@
#include "mongo/db/repl/tenant_migration_state_machine_gen.h"
#include "mongo/db/repl/wait_for_majority_service.h"
#include "mongo/db/service_context_d_test_fixture.h"
+#include "mongo/dbtests/mock/mock_conn_registry.h"
+#include "mongo/dbtests/mock/mock_replica_set.h"
#include "mongo/executor/network_interface.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/thread_pool_task_executor.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
+#include "mongo/unittest/log_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/future.h"
-using namespace mongo;
-using namespace mongo::repl;
+namespace mongo {
+namespace repl {
class TenantMigrationRecipientServiceTest : public ServiceContextMongoDTest {
public:
void setUp() override {
ServiceContextMongoDTest::setUp();
auto serviceContext = getServiceContext();
+ // Only the ReplicaSetMonitor scanning protocol supports mock connections.
+ ReplicaSetMonitorProtocolTestUtil::setRSMProtocol(ReplicaSetMonitorProtocol::kScanning);
+ ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook());
WaitForMajorityService::get(getServiceContext()).setUp(getServiceContext());
@@ -95,6 +104,10 @@ public:
_registry->onShutdown();
_service = nullptr;
+ // Clearing the connection pool is necessary when doing tests which use the
+ // ReplicaSetMonitor. See src/mongo/dbtests/mock/mock_replica_set.h for details.
+ ScopedDbConnection::clearPool();
+ ReplicaSetMonitorProtocolTestUtil::resetRSMProtocol();
ServiceContextMongoDTest::tearDown();
}
@@ -123,10 +136,26 @@ protected:
PrimaryOnlyServiceRegistry* _registry;
PrimaryOnlyService* _service;
long long _term = 0;
+
+ // Accessors to class private members
+ DBClientConnection* getClient(const TenantMigrationRecipientService::Instance* instance) const {
+ return instance->_client.get();
+ }
+
+ DBClientConnection* getOplogFetcherClient(
+ const TenantMigrationRecipientService::Instance* instance) const {
+ return instance->_oplogFetcherClient.get();
+ }
+
+private:
+ unittest::MinimumLoggedSeverityGuard _replicationSeverityGuard{
+ logv2::LogComponent::kReplication, logv2::LogSeverity::Debug(1)};
};
TEST_F(TenantMigrationRecipientServiceTest, BasicTenantMigrationRecipientServiceInstanceCreation) {
+ FailPointEnableBlock fp("stopAfterPersistingTenantMigrationRecipientInstanceStateDoc");
+
const UUID migrationUUID = UUID::gen();
TenantMigrationRecipientDocument TenantMigrationRecipientInstance(
@@ -166,4 +195,208 @@ TEST_F(TenantMigrationRecipientServiceTest, InstanceReportsErrorOnFailureWhilePe
// Should be able to see the instance task failure error.
auto status = instance->getCompletionFuture().getNoThrow();
ASSERT_EQ(ErrorCodes::NotWritablePrimary, status.code());
-} \ No newline at end of file
+}
+
+TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientConnection_Primary) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+
+ const UUID migrationUUID = UUID::gen();
+
+ MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /*dollarPrefixHosts */);
+
+ TenantMigrationRecipientDocument TenantMigrationRecipientInstance(
+ migrationUUID,
+ replSet.getConnectionString(),
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, TenantMigrationRecipientInstance.toBSON());
+ ASSERT(instance.get());
+
+ // Wait for task completion success.
+ ASSERT_OK(instance->getCompletionFuture().getNoThrow());
+
+ auto* client = getClient(instance.get());
+ auto* oplogFetcherClient = getOplogFetcherClient(instance.get());
+ // Both clients should be populated.
+ ASSERT(client);
+ ASSERT(oplogFetcherClient);
+
+ // Clients should be distinct.
+ ASSERT(client != oplogFetcherClient);
+
+ // Clients should be connected to primary.
+ auto primary = replSet.getHosts()[0].toString();
+ ASSERT_EQ(primary, client->getServerAddress());
+ ASSERT(client->isStillConnected());
+ ASSERT_EQ(primary, oplogFetcherClient->getServerAddress());
+ ASSERT(oplogFetcherClient->isStillConnected());
+}
+
+TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientConnection_Secondary) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+
+ const UUID migrationUUID = UUID::gen();
+
+ MockReplicaSet replSet("donorSet", 2, true /* hasPrimary */, true /*dollarPrefixHosts */);
+
+ TenantMigrationRecipientDocument TenantMigrationRecipientInstance(
+ migrationUUID,
+ replSet.getConnectionString(),
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::SecondaryOnly));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, TenantMigrationRecipientInstance.toBSON());
+ ASSERT(instance.get());
+
+ // Wait for task completion success.
+ ASSERT_OK(instance->getCompletionFuture().getNoThrow());
+
+ auto* client = getClient(instance.get());
+ auto* oplogFetcherClient = getOplogFetcherClient(instance.get());
+ // Both clients should be populated.
+ ASSERT(client);
+ ASSERT(oplogFetcherClient);
+
+ // Clients should be distinct.
+ ASSERT(client != oplogFetcherClient);
+
+ // Clients should be connected to secondary.
+ auto secondary = replSet.getHosts()[1].toString();
+ ASSERT_EQ(secondary, client->getServerAddress());
+ ASSERT(client->isStillConnected());
+ ASSERT_EQ(secondary, oplogFetcherClient->getServerAddress());
+ ASSERT(oplogFetcherClient->isStillConnected());
+}
+
+TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientConnection_PrimaryFails) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+ FailPointEnableBlock timeoutFp("setTenantMigrationRecipientInstanceHostTimeout",
+ BSON("findHostTimeoutMillis" << 100));
+
+ const UUID migrationUUID = UUID::gen();
+
+ MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /*dollarPrefixHosts */);
+ // Primary is unavailable.
+ replSet.kill(replSet.getHosts()[0].toString());
+
+ TenantMigrationRecipientDocument initialStateDocument(
+ migrationUUID,
+ replSet.getConnectionString(),
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, initialStateDocument.toBSON());
+ ASSERT(instance.get());
+
+ // Keep scanning the replica set while waiting for task completion. This would normally
+ // be automatic but that doesn't work with mock replica sets.
+ while (!instance->getCompletionFuture().isReady()) {
+ auto monitor = ReplicaSetMonitor::get(replSet.getSetName());
+ // Monitor may not have been created yet.
+ if (monitor) {
+ monitor->runScanForMockReplicaSet();
+ }
+ mongo::sleepmillis(50);
+ }
+ // Wait for task completion failure.
+ ASSERT_EQUALS(ErrorCodes::FailedToSatisfyReadPreference,
+ instance->getCompletionFuture().getNoThrow().code());
+
+ auto* client = getClient(instance.get());
+ auto* oplogFetcherClient = getOplogFetcherClient(instance.get());
+ // Neither client should be populated.
+ ASSERT_FALSE(client);
+ ASSERT_FALSE(oplogFetcherClient);
+}
+
+TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientConnection_PrimaryFailsOver) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+
+ const UUID migrationUUID = UUID::gen();
+
+ MockReplicaSet replSet("donorSet", 2, true /* hasPrimary */, true /*dollarPrefixHosts */);
+
+ // Primary is unavailable.
+ replSet.kill(replSet.getHosts()[0].toString());
+
+ TenantMigrationRecipientDocument initialStateDocument(
+ migrationUUID,
+ replSet.getConnectionString(),
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::PrimaryPreferred));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, initialStateDocument.toBSON());
+ ASSERT(instance.get());
+
+ // Wait for task completion success.
+ ASSERT_OK(instance->getCompletionFuture().getNoThrow());
+
+ auto* client = getClient(instance.get());
+ auto* oplogFetcherClient = getOplogFetcherClient(instance.get());
+ // Both clients should be populated.
+ ASSERT(client);
+ ASSERT(oplogFetcherClient);
+
+ // Clients should be distinct.
+ ASSERT(client != oplogFetcherClient);
+
+ // Clients should be connected to secondary.
+ auto secondary = replSet.getHosts()[1].toString();
+ ASSERT_EQ(secondary, client->getServerAddress());
+ ASSERT(client->isStillConnected());
+ ASSERT_EQ(secondary, oplogFetcherClient->getServerAddress());
+ ASSERT(oplogFetcherClient->isStillConnected());
+}
+
+TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientConnection_BadConnectString) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+
+ const UUID migrationUUID = UUID::gen();
+
+ TenantMigrationRecipientDocument initialStateDocument(
+ migrationUUID,
+ "broken,connect,string,no,set,name",
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, initialStateDocument.toBSON());
+ ASSERT(instance.get());
+
+ // Wait for task completion failure.
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, instance->getCompletionFuture().getNoThrow().code());
+}
+
+TEST_F(TenantMigrationRecipientServiceTest,
+ TenantMigrationRecipientConnection_NonSetConnectString) {
+ FailPointEnableBlock fp("stopAfterConnectingTenantMigrationRecipientInstance");
+
+ const UUID migrationUUID = UUID::gen();
+
+ TenantMigrationRecipientDocument initialStateDocument(
+ migrationUUID,
+ "localhost:12345",
+ "tenantA",
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly));
+
+ // Create and start the instance.
+ auto instance = TenantMigrationRecipientService::Instance::getOrCreate(
+ _service, initialStateDocument.toBSON());
+ ASSERT(instance.get());
+
+ // Wait for task completion failure.
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, instance->getCompletionFuture().getNoThrow().code());
+}
+
+} // namespace repl
+} // namespace mongo