summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source.h3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp43
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp85
-rw-r--r--src/mongo/db/s/migration_destination_manager.h3
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp78
-rw-r--r--src/mongo/db/s/migration_util.cpp1
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp3
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.cpp30
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.h23
-rw-r--r--src/mongo/util/uuid.h1
12 files changed, 195 insertions, 85 deletions
diff --git a/src/mongo/db/s/migration_chunk_cloner_source.h b/src/mongo/db/s/migration_chunk_cloner_source.h
index c871a3e08a8..f59efc0f16b 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source.h
@@ -37,6 +37,7 @@ class BSONObj;
class OperationContext;
class Status;
class Timestamp;
+class UUID;
namespace repl {
class OpTime;
@@ -71,7 +72,7 @@ public:
* NOTE: Must be called without any locks and must succeed, before any other methods are called
* (except for cancelClone and [insert/update/delete]Op).
*/
- virtual Status startClone(OperationContext* opCtx) = 0;
+ virtual Status startClone(OperationContext* opCtx, const UUID& migrationId) = 0;
/**
* Blocking method, which uses some custom selected logic for deciding whether it is appropriate
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index f8920c66800..5ba3a2068e7 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -236,7 +236,8 @@ MigrationChunkClonerSourceLegacy::~MigrationChunkClonerSourceLegacy() {
invariant(_state == kDone);
}
-Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
+Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx,
+ const UUID& migrationId) {
invariant(_state == kNew);
invariant(!opCtx->lockState()->isLocked());
@@ -276,19 +277,33 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
// Tell the recipient shard to start cloning
BSONObjBuilder cmdBuilder;
- StartChunkCloneRequest::appendAsCommand(&cmdBuilder,
- _args.getNss(),
- // TODO (SERVER-44161): Replace with UUID provided by
- // migration donor.
- UUID::gen(),
- _sessionId,
- _donorConnStr,
- _args.getFromShardId(),
- _args.getToShardId(),
- _args.getMinKey(),
- _args.getMaxKey(),
- _shardKeyPattern.toBSON(),
- _args.getSecondaryThrottle());
+
+ auto fcvVersion = serverGlobalParams.featureCompatibility.getVersion();
+ if (fcvVersion == ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ StartChunkCloneRequest::appendAsCommand(&cmdBuilder,
+ _args.getNss(),
+ migrationId,
+ _sessionId,
+ _donorConnStr,
+ _args.getFromShardId(),
+ _args.getToShardId(),
+ _args.getMinKey(),
+ _args.getMaxKey(),
+ _shardKeyPattern.toBSON(),
+ _args.getSecondaryThrottle());
+ } else {
+ // TODO (SERVER-44787): Remove this overload after 4.4 is released.
+ StartChunkCloneRequest::appendAsCommand(&cmdBuilder,
+ _args.getNss(),
+ _sessionId,
+ _donorConnStr,
+ _args.getFromShardId(),
+ _args.getToShardId(),
+ _args.getMinKey(),
+ _args.getMaxKey(),
+ _shardKeyPattern.toBSON(),
+ _args.getSecondaryThrottle());
+ }
auto startChunkCloneResponseStatus = _callRecipient(cmdBuilder.obj());
if (!startChunkCloneResponseStatus.isOK()) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 2653f401ef1..a7236c136ce 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -90,7 +90,7 @@ public:
HostAndPort recipientHost);
~MigrationChunkClonerSourceLegacy();
- Status startClone(OperationContext* opCtx) override;
+ Status startClone(OperationContext* opCtx, const UUID& migrationId) override;
Status awaitUntilCriticalSectionIsAppropriate(OperationContext* opCtx,
Milliseconds maxTimeToWait) override;
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index c117aa5b26a..89ca8742cfc 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -214,7 +214,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, CorrectDocumentsFetched) {
onCommand([&](const RemoteCommandRequest& request) { return BSON("ok" << true); });
});
- ASSERT_OK(cloner.startClone(operationContext()));
+ ASSERT_OK(cloner.startClone(operationContext(), UUID::gen()));
futureStartClone.default_timed_get();
}
@@ -312,7 +312,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, CollectionNotFound) {
kDonorConnStr,
kRecipientConnStr.getServers()[0]);
- ASSERT_NOT_OK(cloner.startClone(operationContext()));
+ ASSERT_NOT_OK(cloner.startClone(operationContext(), UUID::gen()));
cloner.cancelClone(operationContext());
}
@@ -325,7 +325,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, ShardKeyIndexNotFound) {
kDonorConnStr,
kRecipientConnStr.getServers()[0]);
- ASSERT_NOT_OK(cloner.startClone(operationContext()));
+ ASSERT_NOT_OK(cloner.startClone(operationContext(), UUID::gen()));
cloner.cancelClone(operationContext());
}
@@ -351,7 +351,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, FailedToEngageRecipientShard) {
});
});
- auto startCloneStatus = cloner.startClone(operationContext());
+ auto startCloneStatus = cloner.startClone(operationContext(), UUID::gen());
ASSERT_EQ(ErrorCodes::NetworkTimeout, startCloneStatus.code());
futureStartClone.default_timed_get();
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index ac34640554c..d33e5fbcbc9 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -213,7 +213,7 @@ MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep4);
MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep5);
MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep6);
-MONGO_FAIL_POINT_DEFINE(failMigrationLeaveOrphans);
+MONGO_FAIL_POINT_DEFINE(failMigrationOnRecipient);
MONGO_FAIL_POINT_DEFINE(failMigrationReceivedOutOfRangeOperation);
} // namespace
@@ -341,10 +341,24 @@ Status MigrationDestinationManager::start(OperationContext* opCtx,
return Status(ErrorCodes::ConflictingOperationInProgress,
"Can't receive chunk while FCV is upgrading/downgrading");
+ // Note: It is expected that the FCV cannot change while the node is donating or receiving a
+ // chunk. This is guaranteed by the setFCV command serializing with donating and receiving
+ // chunks via the ActiveMigrationsRegistry.
+ _useFCV44Protocol =
+ fcvVersion == ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44;
+
_state = READY;
_stateChangedCV.notify_all();
_errmsg = "";
+ if (_useFCV44Protocol) {
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Missing migrationId in FCV 4.4",
+ cloneRequest.hasMigrationId());
+
+ _migrationId = cloneRequest.getMigrationId();
+ }
+
_nss = nss;
_fromShard = cloneRequest.getFromShardId();
_fromShardConnString =
@@ -748,8 +762,10 @@ void MigrationDestinationManager::_migrateThread() {
_setStateFail(str::stream() << "migrate failed: " << redact(exceptionToStatus()));
}
- if (getState() != DONE && !MONGO_unlikely(failMigrationLeaveOrphans.shouldFail())) {
- _forgetPending(opCtx.get(), ChunkRange(_min, _max));
+ if (!_useFCV44Protocol) {
+ if (getState() != DONE) {
+ _forgetPending(opCtx.get(), ChunkRange(_min, _max));
+ }
}
stdx::lock_guard<Latch> lk(_mutex);
@@ -799,39 +815,46 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
{
const ChunkRange range(_min, _max);
- while (migrationutil::checkForConflictingDeletions(opCtx, range, collectionUuid)) {
- LOG(0) << "Migration paused because range overlaps with a "
- "range that is scheduled for deletion: collection: "
- << _nss.ns() << " range: " << redact(range.toString());
+ // 2. Ensure any data which might have been left orphaned in the range being moved has been
+ // deleted.
+ if (_useFCV44Protocol) {
+ while (migrationutil::checkForConflictingDeletions(opCtx, range, collectionUuid)) {
+ LOG(0) << "Migration paused because range overlaps with a "
+ "range that is scheduled for deletion: collection: "
+ << _nss.ns() << " range: " << redact(range.toString());
- auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, range);
+ auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, range);
- if (!status.isOK()) {
- _setStateFail(redact(status.reason()));
- return;
- }
+ if (!status.isOK()) {
+ _setStateFail(redact(status.reason()));
+ return;
+ }
- opCtx->sleepFor(Milliseconds(1000));
- }
+ opCtx->sleepFor(Milliseconds(1000));
+ }
- // TODO(SERVER-44163): Delete this block after the MigrationCoordinator has been integrated
- // into the source. It will be replaced by the checkForOverlapping call.
+ RangeDeletionTask recipientDeletionTask(
+ _migrationId, _nss, collectionUuid, _fromShard, range, CleanWhenEnum::kNow);
+ recipientDeletionTask.setPending(true);
- // 2. Synchronously delete any data which might have been left orphaned in the range
- // being moved, and wait for completion
+ migrationutil::persistRangeDeletionTaskLocally(opCtx, recipientDeletionTask);
+ } else {
+ // Synchronously delete any data which might have been left orphaned in the range
+ // being moved, and wait for completion
- auto notification = _notePending(opCtx, range);
- // Wait for the range deletion to report back
- if (!notification.waitStatus(opCtx).isOK()) {
- _setStateFail(redact(notification.waitStatus(opCtx).reason()));
- return;
- }
+ auto notification = _notePending(opCtx, range);
+ // Wait for the range deletion to report back
+ if (!notification.waitStatus(opCtx).isOK()) {
+ _setStateFail(redact(notification.waitStatus(opCtx).reason()));
+ return;
+ }
- // Wait for any other, overlapping queued deletions to drain
- auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, range);
- if (!status.isOK()) {
- _setStateFail(redact(status.reason()));
- return;
+ // Wait for any other, overlapping queued deletions to drain
+ auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, range);
+ if (!status.isOK()) {
+ _setStateFail(redact(status.reason()));
+ return;
+ }
}
timing.done(2);
@@ -934,9 +957,9 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
timing.done(3);
migrateThreadHangAtStep3.pauseWhileSet();
- if (MONGO_unlikely(failMigrationLeaveOrphans.shouldFail())) {
+ if (MONGO_unlikely(failMigrationOnRecipient.shouldFail())) {
_setStateFail(str::stream() << "failing migration after cloning " << _numCloned
- << " docs due to failMigrationLeaveOrphans failpoint");
+ << " docs due to failMigrationOnRecipient failpoint");
return;
}
}
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index eff5aae8eb5..41841c96abe 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -188,6 +188,9 @@ private:
stdx::thread _migrateThreadHandle;
+ bool _useFCV44Protocol{false};
+
+ UUID _migrationId;
NamespaceString _nss;
ConnectionString _fromShardConnString;
ShardId _fromShard;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 0644506b536..dd131735106 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -251,6 +251,8 @@ Status MigrationSourceManager::startClone() {
auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
auto replEnabled = replCoord->isReplEnabled();
+ UUID migrationId = UUID::gen();
+
{
const auto metadata = _getCurrentMetadataAndCheckEpoch();
@@ -283,10 +285,6 @@ Status MigrationSourceManager::startClone() {
invariant(nullptr == std::exchange(msmForCsr(csr), this));
if (_useFCV44Protocol) {
- // TODO (SERVER-45175): Unify the migration UUID used by the MigrationCoordinator and
- // MigrationChunkClonerSourceLegacy
- UUID migrationId = UUID::gen();
-
// TODO (SERVER-xxx): Allow re-using the same session (though different transaction
// number) across migrations.
auto lsid = makeLogicalSessionId(_opCtx);
@@ -320,7 +318,7 @@ Status MigrationSourceManager::startClone() {
_coordinator->startMigration(_opCtx, _args.getWaitForDelete());
}
- Status startCloneStatus = _cloneDriver->startClone(_opCtx);
+ Status startCloneStatus = _cloneDriver->startClone(_opCtx, migrationId);
if (!startCloneStatus.isOK()) {
return startCloneStatus;
}
@@ -648,16 +646,9 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
const ChunkRange range(_args.getMinKey(), _args.getMaxKey());
- auto notification = [&] {
- auto const whenToClean = _args.getWaitForDelete() ? CollectionShardingRuntime::kNow
- : CollectionShardingRuntime::kDelayed;
- UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
- AutoGetCollection autoColl(_opCtx, getNss(), MODE_IS);
- return CollectionShardingRuntime::get(_opCtx, getNss())->cleanUpRange(range, whenToClean);
- }();
-
if (!MONGO_unlikely(doNotRefreshRecipientAfterCommit.shouldFail())) {
- // Best-effort make the recipient refresh its routing table to the new collection version.
+ // Best-effort make the recipient refresh its routing table to the new collection
+ // version.
refreshRecipientRoutingTable(_opCtx,
getNss(),
_args.getToShardId(),
@@ -669,24 +660,51 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
<< "Moved chunks successfully but failed to clean up " << getNss().ns() << " range "
<< redact(range.toString()) << " due to: ";
- if (_args.getWaitForDelete()) {
- log() << "Waiting for cleanup of " << getNss().ns() << " range "
- << redact(range.toString());
- auto deleteStatus = notification.waitStatus(_opCtx);
- if (!deleteStatus.isOK()) {
- return {ErrorCodes::OrphanedRangeCleanUpFailed,
- orphanedRangeCleanUpErrMsg + redact(deleteStatus)};
- }
- return Status::OK();
- }
+ if (_useFCV44Protocol) {
+ if (_args.getWaitForDelete()) {
+ log() << "Waiting for cleanup of " << getNss().ns() << " range "
+ << redact(range.toString());
- if (notification.ready() && !notification.waitStatus(_opCtx).isOK()) {
- return {ErrorCodes::OrphanedRangeCleanUpFailed,
- orphanedRangeCleanUpErrMsg + redact(notification.waitStatus(_opCtx))};
+ auto deleteStatus =
+ CollectionShardingRuntime::waitForClean(_opCtx, getNss(), _collectionEpoch, range);
+
+ if (!deleteStatus.isOK()) {
+ return {ErrorCodes::OrphanedRangeCleanUpFailed,
+ orphanedRangeCleanUpErrMsg + redact(deleteStatus)};
+ }
+ }
} else {
- log() << "Leaving cleanup of " << getNss().ns() << " range " << redact(range.toString())
- << " to complete in background";
- notification.abandon();
+ auto notification = [&] {
+ auto const whenToClean = _args.getWaitForDelete() ? CollectionShardingRuntime::kNow
+ : CollectionShardingRuntime::kDelayed;
+ UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
+ AutoGetCollection autoColl(_opCtx, getNss(), MODE_IS);
+ return CollectionShardingRuntime::get(_opCtx, getNss())
+ ->cleanUpRange(range, whenToClean);
+ }();
+
+ if (_args.getWaitForDelete()) {
+ log() << "Waiting for cleanup of " << getNss().ns() << " range "
+ << redact(range.toString());
+
+ auto deleteStatus = notification.waitStatus(_opCtx);
+
+ if (!deleteStatus.isOK()) {
+ return {ErrorCodes::OrphanedRangeCleanUpFailed,
+ orphanedRangeCleanUpErrMsg + redact(deleteStatus)};
+ }
+
+ return Status::OK();
+ }
+
+ if (notification.ready() && !notification.waitStatus(_opCtx).isOK()) {
+ return {ErrorCodes::OrphanedRangeCleanUpFailed,
+ orphanedRangeCleanUpErrMsg + redact(notification.waitStatus(_opCtx))};
+ } else {
+ log() << "Leaving cleanup of " << getNss().ns() << " range " << redact(range.toString())
+ << " to complete in background";
+ notification.abandon();
+ }
}
return Status::OK();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index c6bee0d5377..7994dce7251 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -348,6 +348,7 @@ void persistAbortDecision(OperationContext* opCtx, const UUID& migrationId) {
QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationId),
BSON("$set" << BSON(MigrationCoordinatorDocument::kDecisionFieldName << "aborted")));
}
+
void deleteRangeDeletionTaskOnRecipient(OperationContext* opCtx,
const ShardId& recipientId,
const UUID& migrationId,
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 1fa019e7ebd..68abc0bbb00 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -72,7 +72,7 @@ const WriteConcernOptions kMajorityWriteConcern(WriteConcernOptions::kMajority,
// writeConcernMajorityJournalDefault is set to true
// in the ReplSetConfig.
WriteConcernOptions::SyncMode::UNSET,
- -1);
+ WriteConcernOptions::kWriteConcernTimeoutSharding);
// Tests can pause and resume moveChunk's progress at each step by enabling/disabling each failpoint
MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep1);
@@ -174,6 +174,7 @@ public:
writeConcernResult.wTimedOut = false;
Status majorityStatus = waitForWriteConcern(
opCtx, replClient.getLastOp(), kMajorityWriteConcern, &writeConcernResult);
+
if (!majorityStatus.isOK()) {
if (!writeConcernResult.wTimedOut) {
uassertStatusOK(majorityStatus);
diff --git a/src/mongo/db/s/start_chunk_clone_request.cpp b/src/mongo/db/s/start_chunk_clone_request.cpp
index ad350452346..30d2813a428 100644
--- a/src/mongo/db/s/start_chunk_clone_request.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request.cpp
@@ -77,7 +77,8 @@ StatusWith<StartChunkCloneRequest> StartChunkCloneRequest::createFromCommand(Nam
// TODO (SERVER-44787): Remove this FCV check after 4.4 is released.
if (serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- request._migrationId = UUID::parse(obj);
+ if (obj.getField("uuid"))
+ request._migrationId = UUID::parse(obj);
}
{
@@ -187,4 +188,31 @@ void StartChunkCloneRequest::appendAsCommand(
secondaryThrottle.append(builder);
}
+// TODO (SERVER-44787): Remove this overload after 4.4 is released.
+void StartChunkCloneRequest::appendAsCommand(
+ BSONObjBuilder* builder,
+ const NamespaceString& nss,
+ const MigrationSessionId& sessionId,
+ const ConnectionString& fromShardConnectionString,
+ const ShardId& fromShardId,
+ const ShardId& toShardId,
+ const BSONObj& chunkMinKey,
+ const BSONObj& chunkMaxKey,
+ const BSONObj& shardKeyPattern,
+ const MigrationSecondaryThrottleOptions& secondaryThrottle) {
+ invariant(builder->asTempObj().isEmpty());
+ invariant(nss.isValid());
+ invariant(fromShardConnectionString.isValid());
+
+ builder->append(kRecvChunkStart, nss.ns());
+ sessionId.append(builder);
+ builder->append(kFromShardConnectionString, fromShardConnectionString.toString());
+ builder->append(kFromShardId, fromShardId.toString());
+ builder->append(kToShardId, toShardId.toString());
+ builder->append(kChunkMinKey, chunkMinKey);
+ builder->append(kChunkMaxKey, chunkMaxKey);
+ builder->append(kShardKeyPattern, shardKeyPattern);
+ secondaryThrottle.append(builder);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h
index 055ba59a459..826c6e94371 100644
--- a/src/mongo/db/s/start_chunk_clone_request.h
+++ b/src/mongo/db/s/start_chunk_clone_request.h
@@ -71,6 +71,18 @@ public:
const BSONObj& shardKeyPattern,
const MigrationSecondaryThrottleOptions& secondaryThrottle);
+ // TODO (SERVER-44787): Remove this overload after 4.4 is released.
+ static void appendAsCommand(BSONObjBuilder* builder,
+ const NamespaceString& nss,
+ const MigrationSessionId& sessionId,
+ const ConnectionString& fromShardConnectionString,
+ const ShardId& fromShardId,
+ const ShardId& toShardId,
+ const BSONObj& chunkMinKey,
+ const BSONObj& chunkMaxKey,
+ const BSONObj& shardKeyPattern,
+ const MigrationSecondaryThrottleOptions& secondaryThrottle);
+
const NamespaceString& getNss() const {
return _nss;
}
@@ -83,9 +95,16 @@ public:
return _fromShardCS;
}
+ // TODO (SERVER-44787): Remove this function after 4.4 is released.
+ // Use this check so that getMigrationId() is never called in a cluster that's not fully
+ // upgraded to 4.4.
+ bool hasMigrationId() const {
+ return _migrationId.is_initialized();
+ }
+
const UUID& getMigrationId() const {
- // getMigrationId() should never be called in a cluster that's not fully upgraded to 4.4.
- // TODO (SERVER-44787): Remove this invariant after 4.4 is released.
+ // TODO (SERVER-44787): change _migrationId to non-optional and remove invariant after 4.4
+ // is released.
invariant(_migrationId);
return *_migrationId;
}
diff --git a/src/mongo/util/uuid.h b/src/mongo/util/uuid.h
index 5f7e27d35b9..b90b4d29d30 100644
--- a/src/mongo/util/uuid.h
+++ b/src/mongo/util/uuid.h
@@ -77,6 +77,7 @@ class UUID {
friend class LogicalSessionIdToClient;
friend class LogicalSessionFromClient;
friend class MigrationCoordinatorDocument;
+ friend class MigrationDestinationManager;
friend class RangeDeletionTask;
friend class ResolvedKeyId;
friend class repl::CollectionInfo;