summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service.cpp19
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service_test.cpp34
-rw-r--r--src/mongo/db/serverless/shard_split_utils.cpp61
-rw-r--r--src/mongo/db/serverless/shard_split_utils.h6
-rw-r--r--src/mongo/db/serverless/shard_split_utils_test.cpp75
5 files changed, 192 insertions, 3 deletions
diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp
index 5c78aceaceb..d240f9c5496 100644
--- a/src/mongo/db/serverless/shard_split_donor_service.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service.cpp
@@ -55,6 +55,7 @@ namespace mongo {
namespace {
+MONGO_FAIL_POINT_DEFINE(pauseShardSplitBeforeBlocking);
MONGO_FAIL_POINT_DEFINE(pauseShardSplitAfterBlocking);
MONGO_FAIL_POINT_DEFINE(skipShardSplitWaitForSplitAcceptance);
MONGO_FAIL_POINT_DEFINE(pauseShardSplitBeforeRecipientCleanup);
@@ -358,6 +359,23 @@ SemiFuture<void> ShardSplitDonorService::DonorStateMachine::run(
return _completionPromise.getFuture().semi();
}
+ auto isConfigValidWithStatus = [&]() {
+ auto replCoord = repl::ReplicationCoordinator::get(cc().getServiceContext());
+ invariant(replCoord);
+ stdx::lock_guard<Latch> lg(_mutex);
+ return serverless::validateRecipientNodesForShardSplit(_stateDoc, replCoord->getConfig());
+ }();
+ if (!isConfigValidWithStatus.isOK()) {
+ LOGV2_ERROR(6395900,
+ "Failed to validate recipient nodes for shard split",
+ "id"_attr = _migrationId,
+ "error"_attr = isConfigValidWithStatus.reason());
+ _decisionPromise.emplaceValue(DurableState{ShardSplitDonorStateEnum::kCommitted});
+ _completionPromise.setFrom(
+ _decisionPromise.getFuture().semi().ignoreValue().unsafeToInlineFuture());
+ return _completionPromise.getFuture().semi();
+ }
+
_initiateTimeout(executor, abortToken);
LOGV2(6086506,
@@ -895,5 +913,4 @@ ExecutorFuture<void> ShardSplitDonorService::DonorStateMachine::_cleanRecipientS
.on(**executor, token)
.ignoreValue();
}
-
} // namespace mongo
diff --git a/src/mongo/db/serverless/shard_split_donor_service_test.cpp b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
index a937dc6ec77..5cb62f8b513 100644
--- a/src/mongo/db/serverless/shard_split_donor_service_test.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
@@ -303,11 +303,13 @@ TEST_F(ShardSplitDonorServiceTest, ShardSplitDonorServiceTimeout) {
// Abort scenario : abortSplit called before startSplit.
TEST_F(ShardSplitDonorServiceTest, CreateInstanceInAbortState) {
auto opCtx = makeOperationContext();
+ auto serviceContext = getServiceContext();
test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get());
+ test::shard_split::reconfigToAddRecipientNodes(
+ serviceContext, _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts());
- auto stateDocument = ShardSplitDonorDocument::parse(
- {"donor.document"}, BSON("_id" << _uuid << "tenantIds" << _tenantIds));
+ auto stateDocument = defaultStateDocument();
stateDocument.setState(ShardSplitDonorStateEnum::kAborted);
auto serviceInstance = ShardSplitDonorService::DonorStateMachine::getOrCreate(
@@ -429,6 +431,34 @@ TEST_F(ShardSplitDonorServiceTest, DeleteStateDocMarkedGarbageCollectable) {
ErrorCodes::NoMatchingDocument);
}
+TEST_F(ShardSplitDonorServiceTest, AbortDueToRecipientNodesValidation) {
+ auto opCtx = makeOperationContext();
+ auto serviceContext = getServiceContext();
+ test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get());
+
+ // Matching recipientSetName to the replSetName to fail validation and abort shard split.
+ test::shard_split::reconfigToAddRecipientNodes(
+ serviceContext, _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts());
+
+ auto stateDocument = defaultStateDocument();
+ stateDocument.setRecipientSetName("donor"_sd);
+
+ // Create and start the instance.
+ auto serviceInstance = ShardSplitDonorService::DonorStateMachine::getOrCreate(
+ opCtx.get(), _service, stateDocument.toBSON());
+ ASSERT(serviceInstance.get());
+ ASSERT_EQ(_uuid, serviceInstance->getId());
+
+ auto decisionFuture = serviceInstance->decisionFuture();
+
+ auto result = decisionFuture.get();
+
+ ASSERT_EQ(result.state, mongo::ShardSplitDonorStateEnum::kCommitted);
+
+ ASSERT_OK(serviceInstance->completionFuture().getNoThrow());
+ ASSERT_TRUE(!serviceInstance->isGarbageCollectable());
+}
+
class SplitReplicaSetObserverTest : public ServiceContextTest {
public:
void setUp() override {
diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp
index 16e9b7f5a59..00910164810 100644
--- a/src/mongo/db/serverless/shard_split_utils.cpp
+++ b/src/mongo/db/serverless/shard_split_utils.cpp
@@ -39,6 +39,9 @@
namespace mongo {
namespace serverless {
+
+const size_t kMinimumRequiredRecipientNodes = 3;
+
std::vector<repl::MemberConfig> getRecipientMembers(const repl::ReplSetConfig& config,
const StringData& recipientTagName) {
std::vector<repl::MemberConfig> result;
@@ -238,6 +241,64 @@ bool shouldRemoveStateDocumentOnRecipient(OperationContext* opCtx,
stateDocument.getState() == ShardSplitDonorStateEnum::kBlocking;
}
+Status validateRecipientNodesForShardSplit(const ShardSplitDonorDocument& stateDocument,
+ const repl::ReplSetConfig& localConfig) {
+ if (stateDocument.getState() == ShardSplitDonorStateEnum::kAborted) {
+ return Status::OK();
+ }
+
+ auto recipientSetName = stateDocument.getRecipientSetName();
+ auto recipientTagName = stateDocument.getRecipientTagName();
+ uassert(6395901, "Missing recipientTagName when validating recipient nodes.", recipientTagName);
+ uassert(6395902, "Missing recipientSetName when validating recipient nodes.", recipientSetName);
+
+ if (*recipientSetName == localConfig.getReplSetName()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Recipient set name '" << *recipientSetName << "' and local set name '"
+ << localConfig.getReplSetName() << "' must be different.");
+ }
+
+ auto recipientNodes = getRecipientMembers(localConfig, *recipientTagName);
+ if (recipientNodes.size() < kMinimumRequiredRecipientNodes) {
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream() << "Local set config has " << recipientNodes.size()
+ << " nodes when it requires at least "
+ << kMinimumRequiredRecipientNodes << " in its config.");
+ }
+
+ stdx::unordered_set<std::string> uniqueTagValues;
+ const auto& tagConfig = localConfig.getTagConfig();
+ for (auto member : recipientNodes) {
+ for (repl::MemberConfig::TagIterator it = member.tagsBegin(); it != member.tagsEnd();
+ ++it) {
+ if (tagConfig.getTagKey(*it) == *recipientTagName) {
+ auto tagValue = tagConfig.getTagValue(*it);
+ if (!uniqueTagValues.insert(tagValue).second) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "Local member '" << member.getId().toString()
+ << "' does not have a unique value for the tag '"
+ << *recipientTagName << ". Current value is '"
+ << tagValue << "'.");
+ }
+ }
+ }
+ }
+
+ bool allRecipientNodesNonVoting =
+ std::none_of(recipientNodes.cbegin(), recipientNodes.cend(), [&](const auto& member) {
+ return member.isVoter() || member.getPriority() != 0;
+ });
+
+ if (!allRecipientNodesNonVoting) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "Local members tagged with '" << *recipientTagName
+ << "' must be non-voting and with a priority set to 0.");
+ }
+
+ return Status::OK();
+}
+
RecipientAcceptSplitListener::RecipientAcceptSplitListener(
const ConnectionString& recipientConnectionString)
: _numberOfRecipient(recipientConnectionString.getServers().size()),
diff --git a/src/mongo/db/serverless/shard_split_utils.h b/src/mongo/db/serverless/shard_split_utils.h
index 19978fba1da..e896f9f97cf 100644
--- a/src/mongo/db/serverless/shard_split_utils.h
+++ b/src/mongo/db/serverless/shard_split_utils.h
@@ -118,6 +118,12 @@ bool shouldRemoveStateDocumentOnRecipient(OperationContext* opCtx,
const ShardSplitDonorDocument& stateDocument);
/**
+ * Returns StatusWith true if the validation succeeds otherwise returns different error status with
+ * the proper error causing the failure.
+ */
+Status validateRecipientNodesForShardSplit(const ShardSplitDonorDocument& stateDocument,
+ const repl::ReplSetConfig& localConfig);
+/**
* Listener that receives heartbeat events and fulfills a future once it sees the expected number
* of nodes in the recipient replica set to monitor.
*/
diff --git a/src/mongo/db/serverless/shard_split_utils_test.cpp b/src/mongo/db/serverless/shard_split_utils_test.cpp
index 98d0c80b3aa..736e6b90f88 100644
--- a/src/mongo/db/serverless/shard_split_utils_test.cpp
+++ b/src/mongo/db/serverless/shard_split_utils_test.cpp
@@ -173,6 +173,81 @@ TEST(MakeSplitConfig, SplitConfigAssertionsTest) {
AssertionException,
6201802 /*no donor members created*/);
}
+
+TEST(MakeSplitConfig, RecipientConfigValidationTest) {
+ std::vector<std::string> tenantIds = {"tenant1", "tenantAB"};
+ std::string recipientSetName{"recipientSetName"};
+ const std::string recipientTagName{"recipient"};
+ const std::string donorConfigSetName{"rs0"};
+ const std::string recipientConfigSetName{"newSet"};
+
+ auto statedoc = ShardSplitDonorDocument::parse(
+ {"donor.document"},
+ BSON("_id" << UUID::gen() << "tenantIds" << tenantIds << "recipientTagName"
+ << recipientTagName << "recipientSetName" << recipientSetName));
+
+ auto makeConfig = [&](auto setName, bool shouldVote, bool uniqueTagValue) {
+ auto vote = shouldVote ? 1 : 0;
+ return ReplSetConfig::parse(BSON(
+ "_id"
+ << setName << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(
+ BSON("_id" << 0 << "host"
+ << "localhost:20001"
+ << "priority" << 0 << "votes" << vote << "tags"
+ << BSON(recipientTagName
+ << (uniqueTagValue ? UUID::gen().toString() : "") + "one"))
+ << BSON("_id" << 1 << "host"
+ << "localhost:20002"
+ << "priority" << 0 << "votes" << vote << "tags"
+ << BSON(recipientTagName
+ << (uniqueTagValue ? UUID::gen().toString() : "") + "one"))
+ << BSON(
+ "_id" << 2 << "host"
+ << "localhost:20003"
+ << "priority" << 0 << "votes" << vote << "tags"
+ << BSON(recipientTagName
+ << (uniqueTagValue ? UUID::gen().toString() : "") + "one")))
+ << "settings" << BSON("electionTimeoutMillis" << 1000)));
+ };
+
+ auto recipientSetNameOptional = boost::make_optional<StringData>(recipientSetName);
+ auto recipientTagNameOptional = boost::make_optional<StringData>(recipientTagName);
+
+ // Test we fail here because recipientSetName == localConfig.getReplSetName.
+ ReplSetConfig config = makeConfig(recipientSetName, false, true);
+ ASSERT_EQ(serverless::validateRecipientNodesForShardSplit(statedoc, config).code(),
+ ErrorCodes::BadValue);
+
+ // Test we fail here with insufficient recipient member nodes.
+ config = ReplSetConfig::parse(
+ BSON("_id" << donorConfigSetName << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:20001"
+ << "priority" << 0 << "votes" << 0 << "tags"
+ << BSON(recipientTagName << "one"))
+ << BSON("_id" << 1 << "host"
+ << "localhost:20002"
+ << "priority" << 0 << "votes" << 0 << "tags"
+ << BSON(recipientTagName << "one")))
+ << "settings" << BSON("electionTimeoutMillis" << 1000)));
+ ASSERT_EQ(serverless::validateRecipientNodesForShardSplit(statedoc, config).code(),
+ ErrorCodes::InvalidReplicaSetConfig);
+
+ // Test we fail since recipient tags don't have unique value associated.
+ config = makeConfig(donorConfigSetName, false, false);
+ ASSERT_EQ(serverless::validateRecipientNodesForShardSplit(statedoc, config),
+ ErrorCodes::InvalidOptions);
+
+ // Test we fail since recipient nodes should be non-voting.
+ config = makeConfig(donorConfigSetName, true, true);
+ ASSERT_EQ(serverless::validateRecipientNodesForShardSplit(statedoc, config),
+ ErrorCodes::InvalidOptions);
+
+ config = makeConfig(donorConfigSetName, false, true);
+ ASSERT_OK(serverless::validateRecipientNodesForShardSplit(statedoc, config));
+}
+
} // namespace
} // namespace repl
} // namespace mongo