diff options
author | Billy Donahue <BillyDonahue@users.noreply.github.com> | 2022-07-27 18:17:24 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-07-27 19:38:08 +0000 |
commit | 958ad9abfc80861d3f43f44da694e83464b01e1d (patch) | |
tree | ca14e7097c1cb8ab20dfad7fa6888511f0226650 /src/mongo/db/s | |
parent | f8a1ac19be6279e7ace012dafa8cfcaa028d49e1 (diff) | |
download | mongo-958ad9abfc80861d3f43f44da694e83464b01e1d.tar.gz |
SERVER-68246 rewrite calls to boost::optional get and is_initialized
Diffstat (limited to 'src/mongo/db/s')
57 files changed, 197 insertions, 194 deletions
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp index 61646702239..c8c0343eda0 100644 --- a/src/mongo/db/s/active_migrations_registry.cpp +++ b/src/mongo/db/s/active_migrations_registry.cpp @@ -231,8 +231,8 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex // desireable for reporting, and then diagnosing, migrations that are stuck. if (nss) { // Lock the collection so nothing changes while we're getting the migration report. - AutoGetCollection autoColl(opCtx, nss.get(), MODE_IS); - auto csr = CollectionShardingRuntime::get(opCtx, nss.get()); + AutoGetCollection autoColl(opCtx, nss.value(), MODE_IS); + auto csr = CollectionShardingRuntime::get(opCtx, nss.value()); auto csrLock = CollectionShardingRuntime::CSRLock::lockShared(opCtx, csr); if (auto msm = MigrationSourceManager::get(csr, csrLock)) { diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index 6822de95f8a..0db96d41a27 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -119,7 +119,7 @@ public: BSONObj toBSON() const { BSONObjBuilder builder; builder.append("executionTimeMillis", _executionTimer.millis()); - builder.append("errorOccurred", _errMsg.is_initialized()); + builder.append("errorOccurred", _errMsg.has_value()); if (_errMsg) { builder.append("errmsg", *_errMsg); @@ -551,7 +551,7 @@ void Balancer::_consumeActionStreamLoop() { _newInfoOnStreamingActions.store(false); auto nextAction = selectedStream->getNextStreamingAction(opCtx.get()); - if ((streamDrained = !nextAction.is_initialized())) { + if ((streamDrained = !nextAction.has_value())) { continue; } diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp index 1503fa49b9e..e1899a66eac 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp @@ -813,7 +813,7 @@ private: _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); return; } - const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().get(); + const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().value(); _collectionChunks.emplace_back(chunk.getRange(), chunk.getShard(), estimatedChunkSize); } @@ -1189,7 +1189,7 @@ public: // with no estimated size. for (const auto& chunk : collectionChunks) { auto chunkSize = chunk.getEstimatedSizeBytes(); - if (!chunkSize || (uint64_t)chunkSize.get() > maxChunkSizeBytes) { + if (!chunkSize || (uint64_t)chunkSize.value() > maxChunkSizeBytes) { pendingActionsByShards[chunk.getShard()].rangesToFindSplitPoints.emplace_back( chunk.getMin(), chunk.getMax()); } @@ -1668,10 +1668,10 @@ void BalancerDefragmentationPolicyImpl::_initializeCollectionState(WithLock, return; } auto phaseToBuild = coll.getDefragmentationPhase() - ? coll.getDefragmentationPhase().get() + ? coll.getDefragmentationPhase().value() : DefragmentationPhaseEnum::kMergeAndMeasureChunks; - auto collectionPhase = _transitionPhases( - opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().is_initialized()); + auto collectionPhase = + _transitionPhases(opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().has_value()); while (collectionPhase && collectionPhase->isComplete() && MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail())) { collectionPhase = _transitionPhases(opCtx, coll, collectionPhase->getNextPhase()); diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp index 1512beb054c..ee56d9f32ea 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp @@ -254,7 +254,7 @@ TEST_F(BalancerDefragmentationPolicyTest, setDefaultClusterStats(); _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); auto resp = StatusWith(DataSizeResponse(2000, 4)); @@ -354,7 +354,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneUserCancellationBeginsPhas // Defragmentation should transition to phase 3 auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kSplitChunks); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); auto splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction); } @@ -373,7 +373,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorRebuildsCurrentPh ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); // 2. The action returned by the stream should be now an actionable DataSizeCommand... - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); // 3. with the expected content ASSERT_EQ(coll.getNss(), dataSizeAction.nss); @@ -409,8 +409,8 @@ TEST_F(BalancerDefragmentationPolicyTest, // Phase 1 should restart. nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); - ASSERT_TRUE(nextAction2.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); + ASSERT_TRUE(nextAction2.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); DataSizeInfo dataSizeAction2 = stdx::get<DataSizeInfo>(*nextAction2); } @@ -470,7 +470,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeSuccessfulMerge ASSERT_TRUE(nextAction == boost::none); _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); MergeInfo mergeInfoAction = stdx::get<MergeInfo>(*nextAction); ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMin(), kKeyAtMin); ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMax(), kKeyAtMax); @@ -478,7 +478,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeSuccessfulMerge ASSERT_TRUE(nextAction == boost::none); _defragmentationPolicy.applyActionResult(operationContext(), mergeInfoAction, Status::OK()); nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); ASSERT_EQ(mergeInfoAction.nss, dataSizeAction.nss); ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMin(), dataSizeAction.chunkRange.getMin()); @@ -514,9 +514,9 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) { _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); // Test auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); auto nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction2.is_initialized()); + ASSERT_TRUE(nextAction2.has_value()); // Verify the content of the received merge actions // (Note: there is no guarantee on the order provided by the stream) MergeInfo mergeAction = stdx::get<MergeInfo>(*nextAction); @@ -533,7 +533,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) { ASSERT_BSONOBJ_EQ(mergeAction.chunkRange.getMax(), kKeyAtMax); } auto nextAction3 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_FALSE(nextAction3.is_initialized()); + ASSERT_FALSE(nextAction3.has_value()); } TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) { @@ -554,11 +554,11 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) { _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); // Three actions (in an unspecified order) should be immediately available. auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); auto nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction2.is_initialized()); + ASSERT_TRUE(nextAction2.has_value()); auto nextAction3 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction3.is_initialized()); + ASSERT_TRUE(nextAction3.has_value()); // Verify their content of the received merge actions uint8_t timesLowerRangeMergeFound = 0; uint8_t timesUpperRangeMergeFound = 0; @@ -595,7 +595,7 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) { ASSERT_EQ(1, timesMiddleRangeDataSizeFound); auto nextAction4 = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_FALSE(nextAction4.is_initialized()); + ASSERT_FALSE(nextAction4.has_value()); } // Phase 2 tests. @@ -615,7 +615,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoMissingDataSizeRestartsPha _defragmentationPolicy.selectChunksToMove(operationContext(), &usedShards); ASSERT_EQ(0, pendingMigrations.size()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); auto dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); } @@ -660,7 +660,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoChunkCanBeMovedAndMergedWi _defragmentationPolicy.applyActionResult(operationContext(), moveAction, Status::OK()); nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); usedShards.clear(); pendingMigrations = _defragmentationPolicy.selectChunksToMove(operationContext(), &usedShards); ASSERT_TRUE(pendingMigrations.empty()); @@ -777,7 +777,7 @@ TEST_F(BalancerDefragmentationPolicyTest, SingleLargeChunkCausesAutoSplitAndSpli // The new action returned by the stream should be an actionable AutoSplitVector command... nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); AutoSplitVectorInfo splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction); // with the expected content ASSERT_EQ(coll.getNss(), splitVectorAction.nss); @@ -797,7 +797,7 @@ TEST_F(BalancerDefragmentationPolicyTest, CollectionMaxChunkSizeIsUsedForPhase3) auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); // The action returned by the stream should be now an actionable AutoSplitVector command... - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); AutoSplitVectorInfo splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction); // with the expected content ASSERT_EQ(coll.getNss(), splitVectorAction.nss); diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp index 4ea6fa8b63f..e29e39a0f41 100644 --- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp +++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp @@ -213,7 +213,7 @@ SharedSemiFuture<void> ClusterChunksResizePolicyImpl::activate(OperationContext* "maxChunkSizeBytes"_attr = defaultMaxChunksSizeBytes); stdx::lock_guard<Latch> lk(_stateMutex); - if (!_activeRequestPromise.is_initialized()) { + if (!_activeRequestPromise.has_value()) { invariant(!_unprocessedCollections && _collectionsBeingProcessed.empty()); _defaultMaxChunksSizeBytes = defaultMaxChunksSizeBytes; invariant(_defaultMaxChunksSizeBytes > 0); @@ -236,13 +236,13 @@ SharedSemiFuture<void> ClusterChunksResizePolicyImpl::activate(OperationContext* bool ClusterChunksResizePolicyImpl::isActive() { stdx::lock_guard<Latch> lk(_stateMutex); - return _activeRequestPromise.is_initialized(); + return _activeRequestPromise.has_value(); } void ClusterChunksResizePolicyImpl::stop() { { stdx::lock_guard<Latch> lk(_stateMutex); - if (_activeRequestPromise.is_initialized()) { + if (_activeRequestPromise.has_value()) { _collectionsBeingProcessed.clear(); _unprocessedCollections = nullptr; _activeRequestPromise->setFrom( @@ -261,7 +261,7 @@ StringData ClusterChunksResizePolicyImpl::getName() const { boost::optional<DefragmentationAction> ClusterChunksResizePolicyImpl::getNextStreamingAction( OperationContext* opCtx) { stdx::lock_guard<Latch> lk(_stateMutex); - if (!_activeRequestPromise.is_initialized()) { + if (!_activeRequestPromise.has_value()) { return boost::none; } @@ -296,7 +296,7 @@ boost::optional<DefragmentationAction> ClusterChunksResizePolicyImpl::getNextStr } auto nextAction = collState.popNextAction(opCtx); - if (nextAction.is_initialized()) { + if (nextAction.has_value()) { return nextAction; } @@ -378,7 +378,7 @@ void ClusterChunksResizePolicyImpl::applyActionResult(OperationContext* opCtx, }); stdx::lock_guard<Latch> lk(_stateMutex); - if (!_activeRequestPromise.is_initialized()) { + if (!_activeRequestPromise.has_value()) { return; } diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp index 94b6e874cbf..d08b453d4b5 100644 --- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp +++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp @@ -78,13 +78,13 @@ protected: bool markAsAlreadyProcessed = false, boost::optional<int64_t> maxChunkSizeBytes = boost::none) { setupCollection(nss, shardKeyPattern, chunkList); - if (markAsAlreadyProcessed || maxChunkSizeBytes.is_initialized()) { + if (markAsAlreadyProcessed || maxChunkSizeBytes.has_value()) { BSONObjBuilder updateQueryBuilder; BSONObjBuilder setObj(updateQueryBuilder.subobjStart("$set")); if (markAsAlreadyProcessed) { setObj.append(CollectionType::kChunksAlreadySplitForDowngradeFieldName, true); } - if (maxChunkSizeBytes.is_initialized()) { + if (maxChunkSizeBytes.has_value()) { setObj.append(CollectionType::kMaxChunkSizeBytesFieldName, *maxChunkSizeBytes); } setObj.done(); @@ -126,7 +126,7 @@ TEST_F(ClusterChunksResizePolicyTest, ResizeAClusterWithNoChunksEndsImmediately) // evaluated/updated. auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); - ASSERT_FALSE(nextAction.is_initialized()); + ASSERT_FALSE(nextAction.has_value()); ASSERT_TRUE(completionFuture.isReady()); ASSERT_FALSE(_clusterChunksResizePolicy.isActive()); } @@ -234,7 +234,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyGeneratesNoActionAfterReceivingAn nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); - ASSERT_FALSE(nextAction.is_initialized()); + ASSERT_FALSE(nextAction.has_value()); // The process of the chunk is completed; being the only entry in config.chunks, the process of // the whole cluster should also be complete ASSERT_TRUE(completionFuture.isReady()); @@ -329,7 +329,7 @@ TEST_F(ClusterChunksResizePolicyTest, _opCtx, *nextAction, Status(ErrorCodes::OperationFailed, "Testing nonRetriable error")); nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); - ASSERT_TRUE(nextAction.is_initialized()); + ASSERT_TRUE(nextAction.has_value()); auto reissuedSplitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction); ASSERT_BSONOBJ_EQ(originalSplitVectorAction.keyPattern, reissuedSplitVectorAction.keyPattern); @@ -364,7 +364,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyCompletesWhenAllActionsAreAcknowl auto noAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); ASSERT_TRUE(_clusterChunksResizePolicy.isActive()); ASSERT_FALSE(completionFuture.isReady()); - ASSERT_FALSE(noAction.is_initialized()); + ASSERT_FALSE(noAction.has_value()); // As splitVectors are acknowledged, splitChunk Actions are generated StatusWith<AutoSplitVectorResponse> splitVectorResult1 = @@ -397,7 +397,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyCompletesWhenAllActionsAreAcknowl ASSERT_EQ(1, numFullyProcessedCollections); auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); - ASSERT_FALSE(nextAction.is_initialized()); + ASSERT_FALSE(nextAction.has_value()); ASSERT_FALSE(_clusterChunksResizePolicy.isActive()); ASSERT_TRUE(completionFuture.isReady()); @@ -419,7 +419,7 @@ TEST_F(ClusterChunksResizePolicyTest, CollectionsMarkedAsAlreadyProcessedGetIgno ASSERT_FALSE(completionFuture.isReady()); auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx); - ASSERT_FALSE(nextAction.is_initialized()); + ASSERT_FALSE(nextAction.has_value()); ASSERT_TRUE(completionFuture.isReady()); ASSERT_FALSE(_clusterChunksResizePolicy.isActive()); } diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp index 4f1a4ac71b1..4da7deb522f 100644 --- a/src/mongo/db/s/balancer/type_migration.cpp +++ b/src/mongo/db/s/balancer/type_migration.cpp @@ -178,10 +178,10 @@ BSONObj MigrationType::toBSON() const { builder.append(waitForDelete.name(), _waitForDelete); builder.append(forceJumbo.name(), _forceJumbo); - if (_maxChunkSizeBytes.is_initialized()) { + if (_maxChunkSizeBytes.has_value()) { builder.appendNumber(maxChunkSizeBytes.name(), static_cast<long long>(*_maxChunkSizeBytes)); } - if (_secondaryThrottle.is_initialized()) { + if (_secondaryThrottle.has_value()) { _secondaryThrottle->append(&builder); } return builder.obj(); diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index 87a773ed7fd..8107bb3485d 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -71,7 +71,7 @@ boost::optional<ShardKeyPattern> CollectionMetadata::getReshardingKeyIfShouldFor // Used a switch statement so that the compiler warns anyone who modifies the coordinator // states enum. - switch (reshardingFields.get().getState()) { + switch (reshardingFields.value().getState()) { case CoordinatorStateEnum::kUnused: case CoordinatorStateEnum::kInitializing: case CoordinatorStateEnum::kBlockingWrites: diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp index 69c67d89dcb..6700e4c99b1 100644 --- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp +++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp @@ -90,12 +90,12 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state, *skipCompact = true; return; } else if (hasEcocRenameNow) { - if (ecocRenameUuid.get() != state.getEcocRenameUuid().value()) { + if (ecocRenameUuid.value() != state.getEcocRenameUuid().value()) { LOGV2_DEBUG(6517002, 1, "Skipping compaction due to mismatched collection uuid", "ecocRenameNss"_attr = ecocRenameNss, - "uuid"_attr = ecocRenameUuid.get(), + "uuid"_attr = ecocRenameUuid.value(), "expectedUUID"_attr = state.getEcocRenameUuid().value()); *skipCompact = true; } @@ -119,14 +119,14 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state, "ecocNss"_attr = ecocNss); *skipCompact = true; return; - } else if (ecocUuid.get() != state.getEcocUuid().value()) { + } else if (ecocUuid.value() != state.getEcocUuid().value()) { // The generation of the collection to be compacted is different than the one which was // requested. LOGV2_DEBUG(6350491, 1, "Skipping rename of mismatched collection uuid", "ecocNss"_attr = ecocNss, - "uuid"_attr = ecocUuid.get(), + "uuid"_attr = ecocUuid.value(), "expectedUUID"_attr = state.getEcocUuid().value()); *skipCompact = true; return; @@ -135,7 +135,7 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state, LOGV2(6517004, "Renaming the encrypted compaction collection", "ecocNss"_attr = ecocNss, - "ecocUuid"_attr = ecocUuid.get(), + "ecocUuid"_attr = ecocUuid.value(), "ecocRenameNss"_attr = ecocRenameNss); // Otherwise, perform the rename so long as the target namespace does not exist. diff --git a/src/mongo/db/s/config/configsvr_collmod_command.cpp b/src/mongo/db/s/config/configsvr_collmod_command.cpp index 6d224756002..1b81a326878 100644 --- a/src/mongo/db/s/config/configsvr_collmod_command.cpp +++ b/src/mongo/db/s/config/configsvr_collmod_command.cpp @@ -89,8 +89,8 @@ public: repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); const auto& collMod = request().getCollModRequest(); - if (collMod.getTimeseries() && collMod.getTimeseries().get().getGranularity()) { - auto granularity = collMod.getTimeseries().get().getGranularity().get(); + if (collMod.getTimeseries() && collMod.getTimeseries().value().getGranularity()) { + auto granularity = collMod.getTimeseries().value().getGranularity().value(); ShardingCatalogManager::get(opCtx)->updateTimeSeriesGranularity( opCtx, ns(), granularity); } diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp index 1a094c7db5f..2d4c84b7e8e 100644 --- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp +++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp @@ -111,7 +111,7 @@ public: !request().getUnique().get_value_or(false)); if (request().getCollation()) { - auto& collation = request().getCollation().get(); + auto& collation = request().getCollation().value(); auto collator = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext()) ->makeFromBSON(collation)); @@ -161,7 +161,7 @@ public: // etc. opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); reshardCollectionJoinedExistingOperation.pauseWhileSet(opCtx); - existingInstance.get()->getCoordinatorDocWrittenFuture().get(opCtx); + existingInstance.value()->getCoordinatorDocWrittenFuture().get(opCtx); return existingInstance; } @@ -227,7 +227,7 @@ public: if (instance) { // There is work to be done in order to have the collection's shard key match the // requested shard key. Wait until the work is complete. - instance.get()->getCompletionFuture().get(opCtx); + instance.value()->getCompletionFuture().get(opCtx); } repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index 3739fbaf277..b589ccad80d 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -152,7 +152,7 @@ BSONObj commitOrAbortTransaction(OperationContext* opCtx, newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); AuthorizationSession::get(newOpCtx.get()->getClient()) ->grantInternalAuthorization(newOpCtx.get()->getClient()); - newOpCtx.get()->setLogicalSessionId(opCtx->getLogicalSessionId().get()); + newOpCtx.get()->setLogicalSessionId(opCtx->getLogicalSessionId().value()); newOpCtx.get()->setTxnNumber(txnNumber); BSONObjBuilder bob; @@ -639,7 +639,7 @@ void ShardingCatalogManager::insertConfigDocuments(OperationContext* opCtx, }()); if (txnNumber) { - writeToConfigDocumentInTxn(opCtx, nss, request, txnNumber.get()); + writeToConfigDocumentInTxn(opCtx, nss, request, txnNumber.value()); } else { uassertStatusOK( getStatusFromWriteCommandReply(executeConfigRequest(opCtx, nss, request))); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 78b231c38ab..78af77db426 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -776,7 +776,7 @@ void ShardingCatalogManager::_mergeChunksInTransaction( mergedChunk.setEstimatedSizeBytes(boost::none); mergedChunk.setHistory( - {ChunkHistory(validAfter.get(), mergedChunk.getShard())}); + {ChunkHistory(validAfter.value(), mergedChunk.getShard())}); entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( mergedChunk.toConfigBSON())); @@ -1153,7 +1153,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( int entriesDeleted = 0; while (newHistory.size() > 1 && newHistory.back().getValidAfter().getSecs() + windowInSeconds < - validAfter.get().getSecs()) { + validAfter.value().getSecs()) { newHistory.pop_back(); ++entriesDeleted; } @@ -1167,16 +1167,16 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( LOGV2_DEBUG(4778500, 1, "Deleted old chunk history entries", attrs); } - if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.get()) { + if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.value()) { return {ErrorCodes::IncompatibleShardingMetadata, str::stream() << "The chunk history for chunk with namespace " << nss.ns() << " and min key " << migratedChunk.getMin() << " is corrupted. The last validAfter " << newHistory.back().getValidAfter().toString() << " is greater or equal to the new validAfter " - << validAfter.get().toString()}; + << validAfter.value().toString()}; } - newHistory.emplace(newHistory.begin(), ChunkHistory(validAfter.get(), toShard)); + newHistory.emplace(newHistory.begin(), ChunkHistory(validAfter.value(), toShard)); newMigratedChunk->setHistory(std::move(newHistory)); std::shared_ptr<std::vector<ChunkType>> newSplitChunks = diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp index fb045aae478..dfd843674ed 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp @@ -508,7 +508,7 @@ void ShardingCatalogManager::configureCollectionBalancing( updatedFields++; } if (defragmentCollection) { - bool doDefragmentation = defragmentCollection.get(); + bool doDefragmentation = defragmentCollection.value(); if (doDefragmentation) { setBuilder.append(CollectionType::kDefragmentCollectionFieldName, doDefragmentation); @@ -518,7 +518,7 @@ void ShardingCatalogManager::configureCollectionBalancing( } } if (enableAutoSplitter) { - bool doSplit = enableAutoSplitter.get(); + bool doSplit = enableAutoSplitter.value(); setBuilder.append(CollectionType::kNoAutoSplitFieldName, !doSplit); updatedFields++; } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index b54338947b1..7e191f0525a 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -155,13 +155,13 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) { auto result = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status); - ASSERT_EQUALS(false, result.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, result.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard1.getName())); auto result2 = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard2.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, result2.status); - ASSERT_EQUALS(false, result2.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, result2.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard2.getName())); } @@ -200,7 +200,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) { auto result = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status); - ASSERT_EQUALS(false, result.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, result.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard1.getName())); } @@ -245,13 +245,13 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); - ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, startedResult.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); - ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value()); ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks); ASSERT_EQUALS(1, ongoingResult.remainingCounts->jumboChunks); ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases); @@ -278,13 +278,13 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) { auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); - ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, startedResult.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); - ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value()); ASSERT_EQUALS(0, ongoingResult.remainingCounts->totalChunks); ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks); ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases); @@ -332,13 +332,13 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); - ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, startedResult.remainingCounts.has_value()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); - ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value()); ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks); ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks); ASSERT_EQUALS(0, ongoingResult.remainingCounts->databases); @@ -356,7 +356,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { auto completedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); ASSERT_EQUALS(RemoveShardProgress::COMPLETED, completedResult.status); - ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(false, startedResult.remainingCounts.has_value()); // Now make sure that the shard no longer exists on config. auto response = assertGet(shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp index 0d5be679227..95c9557c7f8 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp @@ -110,8 +110,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones) ASSERT_THROWS_CODE( InitialSplitPolicy::calculateOptimizationStrategy(operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), tags, 2 /* numShards */, @@ -122,8 +122,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones) ASSERT_THROWS_CODE( InitialSplitPolicy::calculateOptimizationStrategy(operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), tags, 2 /* numShards */, @@ -164,8 +164,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromSplitVector_Man auto optimization = InitialSplitPolicy::calculateOptimizationStrategy( operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), {}, /* tags */ 3 /* numShards */, @@ -214,8 +214,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromClient_ManyChun auto optimization = InitialSplitPolicy::calculateOptimizationStrategy( operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), zones, 3 /* numShards */, @@ -250,8 +250,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_WithZones_OneChunkToPrimary) { auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), zones, 3 /* numShards */, @@ -296,8 +296,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_SplitPoints_FromClient_ManyChunksD auto optimization = InitialSplitPolicy::calculateOptimizationStrategy( operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), zones, 3 /* numShards */, @@ -346,8 +346,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_NoSplitPoints_OneChunkToPrimary) { auto optimization = InitialSplitPolicy::calculateOptimizationStrategy( operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), zones, 3 /* numShards */, @@ -382,8 +382,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(operationContext(), kShardKeyPattern, - request.getNumInitialChunks().get(), - request.getPresplitHashedZones().get(), + request.getNumInitialChunks().value(), + request.getPresplitHashedZones().value(), request.getInitialSplitPoints(), zones, 3 /* numShards */, diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index c02b9e38ad0..333ea221c12 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -439,7 +439,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard( << " The CWWC on the shard is (" << cwwcOnShard << ")."}; } - auto cwwcOnConfig = cachedCWWC.get().toBSON(); + auto cwwcOnConfig = cachedCWWC.value().toBSON(); BSONObjComparator comparator( BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, nullptr); if (comparator.compare(cwwcOnShard, cwwcOnConfig) != 0) { diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp index aa622b0bd03..58f11c66e02 100644 --- a/src/mongo/db/s/create_collection_coordinator.cpp +++ b/src/mongo/db/s/create_collection_coordinator.cpp @@ -617,7 +617,7 @@ void CreateCollectionCoordinator::_createCollectionAndIndexes(OperationContext* // We need to implicitly create a timeseries view and underlying bucket collection. if (_collectionEmpty && _request.getTimeseries()) { const auto viewName = nss().getTimeseriesViewNamespace(); - auto createCmd = makeCreateCommand(viewName, collation, _request.getTimeseries().get()); + auto createCmd = makeCreateCommand(viewName, collation, _request.getTimeseries().value()); BSONObj createRes; DBDirectClient localClient(opCtx); diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp index 0262090f6af..70cf4e2ea34 100644 --- a/src/mongo/db/s/drop_collection_coordinator.cpp +++ b/src/mongo/db/s/drop_collection_coordinator.cpp @@ -139,7 +139,7 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl( if (collIsSharded) { invariant(_doc.getCollInfo()); - const auto& coll = _doc.getCollInfo().get(); + const auto& coll = _doc.getCollInfo().value(); sharding_ddl_util::removeCollAndChunksMetadataFromConfig( opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern); } diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp index a6676e133b4..72d8ab50647 100644 --- a/src/mongo/db/s/drop_database_coordinator.cpp +++ b/src/mongo/db/s/drop_database_coordinator.cpp @@ -215,7 +215,7 @@ ExecutorFuture<void> DropDatabaseCoordinator::_runImpl( } if (_doc.getCollInfo()) { - const auto& coll = _doc.getCollInfo().get(); + const auto& coll = _doc.getCollInfo().value(); LOGV2_DEBUG(5494504, 2, "Completing collection drop from previous primary", diff --git a/src/mongo/db/s/forwardable_operation_metadata.cpp b/src/mongo/db/s/forwardable_operation_metadata.cpp index 51c06f80347..458484deb7f 100644 --- a/src/mongo/db/s/forwardable_operation_metadata.cpp +++ b/src/mongo/db/s/forwardable_operation_metadata.cpp @@ -57,11 +57,11 @@ void ForwardableOperationMetadata::setOn(OperationContext* opCtx) const { Client* client = opCtx->getClient(); if (const auto& comment = getComment()) { stdx::lock_guard<Client> lk(*client); - opCtx->setComment(comment.get()); + opCtx->setComment(comment.value()); } if (const auto& optAuthMetadata = getImpersonatedUserMetadata()) { - const auto& authMetadata = optAuthMetadata.get(); + const auto& authMetadata = optAuthMetadata.value(); const auto& users = authMetadata.getUsers(); if (!users.empty() || !authMetadata.getRoles().empty()) { fassert(ErrorCodes::InternalError, users.size() == 1); diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp index 1f8755fc12a..79de7a46fd2 100644 --- a/src/mongo/db/s/metadata_manager.cpp +++ b/src/mongo/db/s/metadata_manager.cpp @@ -69,7 +69,7 @@ bool metadataOverlapsRange(const boost::optional<CollectionMetadata>& metadata, if (!metadata) { return false; } - return metadataOverlapsRange(metadata.get(), range); + return metadataOverlapsRange(metadata.value(), range); } } // namespace @@ -105,7 +105,7 @@ public: // boost::none const CollectionMetadata& get() { invariant(_metadataTracker->metadata); - return _metadataTracker->metadata.get(); + return _metadataTracker->metadata.value(); } private: @@ -178,7 +178,7 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) { invariant(!_metadata.empty()); // The active metadata should always be available (not boost::none) invariant(_metadata.back()->metadata); - const auto& activeMetadata = _metadata.back()->metadata.get(); + const auto& activeMetadata = _metadata.back()->metadata.value(); const auto remoteCollVersion = remoteMetadata.getCollVersion(); const auto activeCollVersion = activeMetadata.getCollVersion(); diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index c751ee64f89..8d6b5050f1e 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -227,8 +227,8 @@ void LogTransactionOperationsForShardingHandler::commit(boost::optional<Timestam continue; } - auto const& minKey = cloner->_args.getMin().get(); - auto const& maxKey = cloner->_args.getMax().get(); + auto const& minKey = cloner->_args.getMin().value(); + auto const& maxKey = cloner->_args.getMax().value(); auto const& shardKeyPattern = cloner->_shardKeyPattern; if (!isInRange(documentKey, minKey, maxKey, shardKeyPattern)) { diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp index 8be0acd90df..376f3d35880 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp @@ -316,7 +316,7 @@ public: WriteConcernOptions majorityWC{WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, WriteConcernOptions::kNoTimeout}; - uassertStatusOK(waitForWriteConcern(opCtx, opTime.get(), majorityWC, &wcResult)); + uassertStatusOK(waitForWriteConcern(opCtx, opTime.value(), majorityWC, &wcResult)); auto rollbackIdAtMigrationInit = [&]() { AutoGetActiveCloner autoCloner(opCtx, migrationSessionId, false); diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 58fe7659d21..d9be4951278 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -369,7 +369,7 @@ bool MigrationDestinationManager::isActive() const { } bool MigrationDestinationManager::_isActive(WithLock) const { - return _sessionId.is_initialized(); + return _sessionId.has_value(); } void MigrationDestinationManager::report(BSONObjBuilder& b, @@ -389,7 +389,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b, } stdx::lock_guard<Latch> sl(_mutex); - b.appendBool("active", _sessionId.is_initialized()); + b.appendBool("active", _sessionId.has_value()); if (_sessionId) { b.append("sessionId", _sessionId->toString()); @@ -1372,7 +1372,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, } migrationutil::persistUpdatedNumOrphans( - opCtx, _migrationId.get(), *_collectionUuid, batchNumCloned); + opCtx, _migrationId.value(), *_collectionUuid, batchNumCloned); { stdx::lock_guard<Latch> statsLock(_mutex); @@ -1821,7 +1821,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const if (changeInOrphans != 0) { migrationutil::persistUpdatedNumOrphans( - opCtx, _migrationId.get(), *_collectionUuid, changeInOrphans); + opCtx, _migrationId.value(), *_collectionUuid, changeInOrphans); } return didAnything; } diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 08998b7ca32..dd5c1b73c83 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -253,7 +253,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, shardVersion.majorVersion() > 0); // Compute the max bound in case only `min` is set (moveRange) - if (!_args.getMax().is_initialized()) { + if (!_args.getMax().has_value()) { // TODO SERVER-64926 do not assume min always present const auto& min = *_args.getMin(); diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp index 11a00d34f04..00c8bb6a0b9 100644 --- a/src/mongo/db/s/rename_collection_coordinator.cpp +++ b/src/mongo/db/s/rename_collection_coordinator.cpp @@ -295,7 +295,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl( // - Locally drop the target collection // - Locally rename source to target ShardsvrRenameCollectionParticipant renameCollParticipantRequest( - fromNss, _doc.getSourceUUID().get()); + fromNss, _doc.getSourceUUID().value()); renameCollParticipantRequest.setDbName(fromNss.db()); renameCollParticipantRequest.setTargetUUID(_doc.getTargetUUID()); renameCollParticipantRequest.setRenameCollectionRequest(_request); @@ -367,7 +367,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl( // On participant shards: // - Unblock CRUD on participants for both source and destination collections ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest( - fromNss, _doc.getSourceUUID().get()); + fromNss, _doc.getSourceUUID().value()); unblockParticipantRequest.setDbName(fromNss.db()); unblockParticipantRequest.setRenameCollectionRequest(_request); auto const cmdObj = CommandHelpers::appendMajorityWriteConcern( diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp index 6fc4a2bed2f..ee8aebded3e 100644 --- a/src/mongo/db/s/rename_collection_participant_service.cpp +++ b/src/mongo/db/s/rename_collection_participant_service.cpp @@ -169,7 +169,7 @@ boost::optional<BSONObj> RenameParticipantInstance::reportForCurrentOp( BSONObjBuilder cmdBob; if (const auto& optComment = _doc.getForwardableOpMetadata().getComment()) { - cmdBob.append(optComment.get().firstElement()); + cmdBob.append(optComment.value().firstElement()); } BSONObjBuilder bob; bob.append("type", "op"); diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp index afb1a0f7ab4..28872ee8268 100644 --- a/src/mongo/db/s/reshard_collection_coordinator.cpp +++ b/src/mongo/db/s/reshard_collection_coordinator.cpp @@ -65,10 +65,10 @@ void notifyChangeStreamsOnReshardCollectionComplete(OperationContext* opCtx, cmdBuilder.append("unique", doc.getUnique().get_value_or(false)); if (doc.getNumInitialChunks()) { - cmdBuilder.append("numInitialChunks", doc.getNumInitialChunks().get()); + cmdBuilder.append("numInitialChunks", doc.getNumInitialChunks().value()); } if (doc.getCollation()) { - cmdBuilder.append("collation", doc.getCollation().get()); + cmdBuilder.append("collation", doc.getCollation().value()); } if (doc.getZones()) { diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp index 61eb1a620c4..0d517aed6b1 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp @@ -169,11 +169,11 @@ CoordinatorCommitMonitor::queryRemainingOperationTimeForRecipients() const { const auto remainingTime = extractOperationRemainingTime(shardResponse.data); // A recipient shard does not report the remaining operation time when there is no data // to copy and no oplog entry to apply. - if (remainingTime && remainingTime.get() < minRemainingTime) { - minRemainingTime = remainingTime.get(); + if (remainingTime && remainingTime.value() < minRemainingTime) { + minRemainingTime = remainingTime.value(); } - if (remainingTime && remainingTime.get() > maxRemainingTime) { - maxRemainingTime = remainingTime.get(); + if (remainingTime && remainingTime.value() > maxRemainingTime) { + maxRemainingTime = remainingTime.value(); } } diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp index da457d8eab3..6899a54e4f7 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp @@ -162,7 +162,7 @@ void ReshardingCoordinatorObserver::onReshardingParticipantTransition( const ReshardingCoordinatorDocument& updatedStateDoc) { stdx::lock_guard<Latch> lk(_mutex); if (auto abortReason = getAbortReasonIfExists(updatedStateDoc)) { - _onAbortOrStepdown(lk, abortReason.get()); + _onAbortOrStepdown(lk, abortReason.value()); // Don't exit early since the coordinator waits for all participants to report state 'done'. } diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp index 181024995d2..a787cea9775 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp @@ -355,13 +355,14 @@ BSONObj createReshardingFieldsUpdateForOriginalNss( BSONObj setFields = BSON("uuid" << coordinatorDoc.getReshardingUUID() << "key" << coordinatorDoc.getReshardingKey().toBSON() << "lastmodEpoch" - << newCollectionEpoch.get() << "lastmod" + << newCollectionEpoch.value() << "lastmod" << opCtx->getServiceContext()->getPreciseClockSource()->now() << "reshardingFields.state" << CoordinatorState_serializer(coordinatorDoc.getState()).toString() << "reshardingFields.recipientFields" << recipientFields.toBSON()); if (newCollectionTimestamp.has_value()) { - setFields = setFields.addFields(BSON("timestamp" << newCollectionTimestamp.get())); + setFields = + setFields.addFields(BSON("timestamp" << newCollectionTimestamp.value())); } return BSON("$set" << setFields); @@ -441,7 +442,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx, case CoordinatorStateEnum::kPreparingToDonate: { // Insert new entry for the temporary nss into config.collections auto collType = resharding::createTempReshardingCollectionType( - opCtx, coordinatorDoc, chunkVersion.get(), collation.get()); + opCtx, coordinatorDoc, chunkVersion.value(), collation.value()); return BatchedCommandRequest::buildInsertOp( CollectionType::ConfigNS, std::vector<BSONObj>{collType.toBSON()}); } @@ -465,11 +466,11 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx, "reshardingFields.state" << CoordinatorState_serializer(nextState).toString() << "reshardingFields.recipientFields.approxDocumentsToCopy" - << coordinatorDoc.getApproxDocumentsToCopy().get() + << coordinatorDoc.getApproxDocumentsToCopy().value() << "reshardingFields.recipientFields.approxBytesToCopy" - << coordinatorDoc.getApproxBytesToCopy().get() + << coordinatorDoc.getApproxBytesToCopy().value() << "reshardingFields.recipientFields.cloneTimestamp" - << coordinatorDoc.getCloneTimestamp().get() + << coordinatorDoc.getCloneTimestamp().value() << "reshardingFields.recipientFields.donorShards" << donorShardsBuilder.arr() << "lastmod" << opCtx->getServiceContext()->getPreciseClockSource()->now())), diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp index a849cc5ca87..f7e4aaf9f0e 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp @@ -351,7 +351,7 @@ public: std::move(uuid), shardKey); if (reshardingFields) - collType.setReshardingFields(std::move(reshardingFields.get())); + collType.setReshardingFields(std::move(reshardingFields.value())); if (coordinatorDoc.getState() == CoordinatorStateEnum::kDone || coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) { diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index a5236d91c5b..40e6a2296e6 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -128,7 +128,7 @@ protected: std::move(uuid), shardKey); if (reshardingFields) - collType.setReshardingFields(std::move(reshardingFields.get())); + collType.setReshardingFields(std::move(reshardingFields.value())); if (coordinatorDoc.getState() == CoordinatorStateEnum::kDone || coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) { @@ -262,8 +262,8 @@ protected: ASSERT(coordinatorDoc.getActive()); if (expectedCoordinatorDoc.getCloneTimestamp()) { ASSERT(coordinatorDoc.getCloneTimestamp()); - ASSERT_EQUALS(coordinatorDoc.getCloneTimestamp().get(), - expectedCoordinatorDoc.getCloneTimestamp().get()); + ASSERT_EQUALS(coordinatorDoc.getCloneTimestamp().value(), + expectedCoordinatorDoc.getCloneTimestamp().value()); } else { ASSERT(!coordinatorDoc.getCloneTimestamp()); } @@ -271,8 +271,8 @@ protected: // Confirm the (non)existence of the CoordinatorDocument abortReason. if (expectedCoordinatorDoc.getAbortReason()) { ASSERT(coordinatorDoc.getAbortReason()); - ASSERT_BSONOBJ_EQ(coordinatorDoc.getAbortReason().get(), - expectedCoordinatorDoc.getAbortReason().get()); + ASSERT_BSONOBJ_EQ(coordinatorDoc.getAbortReason().value(), + expectedCoordinatorDoc.getAbortReason().value()); } else { ASSERT(!coordinatorDoc.getAbortReason()); } @@ -297,8 +297,8 @@ protected: ASSERT(onDiskIt != onDiskDonorShards.end()); if (it->getMutableState().getMinFetchTimestamp()) { ASSERT(onDiskIt->getMutableState().getMinFetchTimestamp()); - ASSERT_EQUALS(onDiskIt->getMutableState().getMinFetchTimestamp().get(), - it->getMutableState().getMinFetchTimestamp().get()); + ASSERT_EQUALS(onDiskIt->getMutableState().getMinFetchTimestamp().value(), + it->getMutableState().getMinFetchTimestamp().value()); } else { ASSERT(!onDiskIt->getMutableState().getMinFetchTimestamp()); } @@ -346,7 +346,7 @@ protected: return; ASSERT(onDiskEntry.getReshardingFields()); - auto onDiskReshardingFields = onDiskEntry.getReshardingFields().get(); + auto onDiskReshardingFields = onDiskEntry.getReshardingFields().value(); ASSERT(onDiskReshardingFields.getReshardingUUID() == expectedReshardingFields->getReshardingUUID()); ASSERT(onDiskReshardingFields.getState() == expectedReshardingFields->getState()); @@ -396,10 +396,10 @@ protected: ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType->getAllowMigrations()); - auto expectedReshardingFields = expectedCollType->getReshardingFields().get(); + auto expectedReshardingFields = expectedCollType->getReshardingFields().value(); ASSERT(onDiskEntry.getReshardingFields()); - auto onDiskReshardingFields = onDiskEntry.getReshardingFields().get(); + auto onDiskReshardingFields = onDiskEntry.getReshardingFields().value(); ASSERT_EQUALS(onDiskReshardingFields.getReshardingUUID(), expectedReshardingFields.getReshardingUUID()); ASSERT(onDiskReshardingFields.getState() == expectedReshardingFields.getState()); @@ -410,8 +410,9 @@ protected: if (expectedReshardingFields.getRecipientFields()->getCloneTimestamp()) { ASSERT(onDiskReshardingFields.getRecipientFields()->getCloneTimestamp()); - ASSERT_EQUALS(onDiskReshardingFields.getRecipientFields()->getCloneTimestamp().get(), - expectedReshardingFields.getRecipientFields()->getCloneTimestamp().get()); + ASSERT_EQUALS( + onDiskReshardingFields.getRecipientFields()->getCloneTimestamp().value(), + expectedReshardingFields.getRecipientFields()->getCloneTimestamp().value()); } else { ASSERT(!onDiskReshardingFields.getRecipientFields()->getCloneTimestamp()); } diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp index e5bd8defdbd..7068918b875 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp @@ -405,7 +405,7 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateDonorServiceInstance) { ASSERT(donorStateMachine != boost::none); - donorStateMachine.get()->interrupt({ErrorCodes::InternalError, "Shut down for test"}); + donorStateMachine.value()->interrupt({ErrorCodes::InternalError, "Shut down for test"}); } TEST_F(ReshardingDonorRecipientCommonTest, CreateRecipientServiceInstance) { @@ -432,7 +432,7 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateRecipientServiceInstance) { ASSERT(recipientStateMachine != boost::none); - recipientStateMachine.get()->interrupt({ErrorCodes::InternalError, "Shut down for test"}); + recipientStateMachine.value()->interrupt({ErrorCodes::InternalError, "Shut down for test"}); } TEST_F(ReshardingDonorRecipientCommonTest, diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp index ac4f30d216c..367c594ab7b 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp @@ -220,7 +220,7 @@ ReshardingDonorService::DonorStateMachine::DonorStateMachine( _metadata{donorDoc.getCommonReshardingMetadata()}, _recipientShardIds{donorDoc.getRecipientShards()}, _donorCtx{donorDoc.getMutableState()}, - _donorMetricsToRestore{donorDoc.getMetrics() ? donorDoc.getMetrics().get() + _donorMetricsToRestore{donorDoc.getMetrics() ? donorDoc.getMetrics().value() : ReshardingDonorMetrics()}, _externalState{std::move(externalState)}, _markKilledExecutor(std::make_shared<ThreadPool>([] { @@ -512,7 +512,7 @@ boost::optional<BSONObj> ReshardingDonorService::DonorStateMachine::reportForCur void ReshardingDonorService::DonorStateMachine::onReshardingFieldsChanges( OperationContext* opCtx, const TypeCollectionReshardingFields& reshardingFields) { if (reshardingFields.getState() == CoordinatorStateEnum::kAborting) { - abort(reshardingFields.getUserCanceled().get()); + abort(reshardingFields.getUserCanceled().value()); return; } diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp index d4f5046e340..cfe93c7e8cf 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp @@ -772,7 +772,7 @@ TEST_F(ReshardingDonorServiceTest, RestoreMetricsOnKBlockingWrites) { donor ->reportForCurrentOp(MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle, MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle) - .get(); + .value(); ASSERT_EQ(currOp.getStringField("donorState"), DonorState_serializer(DonorStateEnum::kBlockingWrites)); ASSERT_GTE(currOp.getField("totalOperationTimeElapsedSecs").Long(), opTimeDurationSecs); diff --git a/src/mongo/db/s/resharding/resharding_metrics.cpp b/src/mongo/db/s/resharding/resharding_metrics.cpp index 7595559831e..69d2e899d84 100644 --- a/src/mongo/db/s/resharding/resharding_metrics.cpp +++ b/src/mongo/db/s/resharding/resharding_metrics.cpp @@ -62,7 +62,7 @@ BSONObj createOriginalCommand(const NamespaceString& nss, BSONObj shardKey) { Date_t readStartTime(const CommonReshardingMetadata& metadata, ClockSource* fallbackSource) { if (const auto& startTime = metadata.getStartTime()) { - return startTime.get(); + return startTime.value(); } else { return fallbackSource->now(); } diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp index 3441f7c1eea..d6321abb360 100644 --- a/src/mongo/db/s/resharding/resharding_op_observer.cpp +++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp @@ -53,8 +53,7 @@ std::shared_ptr<ReshardingCoordinatorObserver> getReshardingCoordinatorObserver( auto instance = ReshardingCoordinatorService::ReshardingCoordinator::lookup(opCtx, service, reshardingId); - iassert( - 5400001, "ReshardingCoordinatorService instance does not exist", instance.is_initialized()); + iassert(5400001, "ReshardingCoordinatorService instance does not exist", instance.has_value()); return (*instance)->getObserver(); } @@ -62,7 +61,7 @@ std::shared_ptr<ReshardingCoordinatorObserver> getReshardingCoordinatorObserver( boost::optional<Timestamp> parseNewMinFetchTimestampValue(const BSONObj& obj) { auto doc = ReshardingDonorDocument::parse(IDLParserContext("Resharding"), obj); if (doc.getMutableState().getState() == DonorStateEnum::kDonatingInitialData) { - return doc.getMutableState().getMinFetchTimestamp().get(); + return doc.getMutableState().getMinFetchTimestamp().value(); } else { return boost::none; } @@ -114,8 +113,8 @@ boost::optional<Timestamp> _calculatePin(OperationContext* opCtx) { Timestamp ret = Timestamp::max(); auto cursor = collection->getCursor(opCtx); for (auto doc = cursor->next(); doc; doc = cursor->next()) { - if (auto fetchTs = parseNewMinFetchTimestampValue(doc.get().data.toBson()); fetchTs) { - ret = std::min(ret, fetchTs.get()); + if (auto fetchTs = parseNewMinFetchTimestampValue(doc.value().data.toBson()); fetchTs) { + ret = std::min(ret, fetchTs.value()); } } @@ -136,7 +135,7 @@ void _doPin(OperationContext* opCtx) { } StatusWith<Timestamp> res = storageEngine->pinOldestTimestamp( - opCtx, ReshardingHistoryHook::kName.toString(), pin.get(), false); + opCtx, ReshardingHistoryHook::kName.toString(), pin.value(), false); if (!res.isOK()) { if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::Mode::modeReplSet) { // The pin has failed, but we're in standalone mode. Ignore the error. @@ -155,7 +154,7 @@ void _doPin(OperationContext* opCtx) { // is the most robust path forward. Ignore this case. LOGV2_WARNING(5384104, "This node is unable to pin history for resharding", - "requestedTs"_attr = pin.get()); + "requestedTs"_attr = pin.value()); } else { // For recovery cases we also ignore the error. The expected scenario is the pin // request is no longer needed, but the write to delete the pin was rolled @@ -164,7 +163,7 @@ void _doPin(OperationContext* opCtx) { // consequence to observing this error. Ignore this case. LOGV2(5384103, "The requested pin was unavailable, but should also be unnecessary", - "requestedTs"_attr = pin.get()); + "requestedTs"_attr = pin.value()); } } } diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp index 9a643ef819e..d0214736e61 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp @@ -333,7 +333,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt BSONObj oField = op.getObject(); BSONObj o2Field; if (op.getObject2()) - o2Field = op.getObject2().get(); + o2Field = op.getObject2().value(); // If the 'o2' field does not have an _id, the oplog entry is corrupted. auto idField = o2Field["_id"]; diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp index 4e6c32ac1e2..268147678c3 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp @@ -275,7 +275,7 @@ public: } const ChunkManager& chunkManager() { - return _cm.get(); + return _cm.value(); } const std::vector<NamespaceString>& stashCollections() { diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp index 9a63fe0daea..5879e241f54 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp @@ -520,7 +520,7 @@ boost::optional<BSONObj> ReshardingRecipientService::RecipientStateMachine::repo void ReshardingRecipientService::RecipientStateMachine::onReshardingFieldsChanges( OperationContext* opCtx, const TypeCollectionReshardingFields& reshardingFields) { if (reshardingFields.getState() == CoordinatorStateEnum::kAborting) { - abort(reshardingFields.getUserCanceled().get()); + abort(reshardingFields.getUserCanceled().value()); return; } @@ -662,7 +662,7 @@ void ReshardingRecipientService::RecipientStateMachine::_ensureDataReplicationSt _recipientService->getInstanceCleanupExecutor(), abortToken, factory, - txnCloneTime.get()) + txnCloneTime.value()) .share(); stdx::lock_guard lk(_mutex); diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp index d054ae355ab..b9809f93d89 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp @@ -869,7 +869,7 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUp) { ->reportForCurrentOp( MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle, MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle) - .get(); + .value(); ASSERT_EQ(currOp.getField("documentsCopied").numberLong(), 1L); ASSERT_EQ(currOp.getField("bytesCopied").numberLong(), (long)reshardedDoc.objsize()); @@ -880,7 +880,7 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUp) { ->reportForCurrentOp( MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle, MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle) - .get(); + .value(); ASSERT_EQ(currOp.getField("documentsCopied").numberLong(), 1L); ASSERT_EQ(currOp.getField("bytesCopied").numberLong(), (long)reshardedDoc.objsize()); diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp index 223da448f54..0582a78037c 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp +++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp @@ -262,9 +262,9 @@ protected: auto bsonOplog = client.findOne(std::move(findCmd)); ASSERT(!bsonOplog.isEmpty()); auto oplogEntry = repl::MutableOplogEntry::parse(bsonOplog).getValue(); - ASSERT_EQ(oplogEntry.getTxnNumber().get(), txnNum); + ASSERT_EQ(oplogEntry.getTxnNumber().value(), txnNum); ASSERT_BSONOBJ_EQ(oplogEntry.getObject(), BSON("$sessionMigrateInfo" << 1)); - ASSERT_BSONOBJ_EQ(oplogEntry.getObject2().get(), BSON("$incompleteOplogHistory" << 1)); + ASSERT_BSONOBJ_EQ(oplogEntry.getObject2().value(), BSON("$incompleteOplogHistory" << 1)); ASSERT(oplogEntry.getOpType() == repl::OpTypeEnum::kNoop); auto bsonTxn = @@ -355,8 +355,8 @@ protected: std::shared_ptr<executor::ThreadPoolTaskExecutor> cleanupExecutor, boost::optional<CancellationToken> customCancelToken = boost::none) { // Allows callers to control the cancellation of the cloner's run() function when specified. - auto cancelToken = customCancelToken.is_initialized() - ? customCancelToken.get() + auto cancelToken = customCancelToken.has_value() + ? customCancelToken.value() : operationContext()->getCancellationToken(); auto cancelableOpCtxExecutor = std::make_shared<ThreadPool>([] { diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp index 4ff2c9e3643..f7d20c6d813 100644 --- a/src/mongo/db/s/resharding/resharding_util.cpp +++ b/src/mongo/db/s/resharding/resharding_util.cpp @@ -175,7 +175,7 @@ void checkForHolesAndOverlapsInChunks(std::vector<ReshardedChunk>& chunks, if (prevMax) { uassert(ErrorCodes::BadValue, "Chunk ranges must be contiguous", - SimpleBSONObjComparator::kInstance.evaluate(prevMax.get() == chunk.getMin())); + SimpleBSONObjComparator::kInstance.evaluate(prevMax.value() == chunk.getMin())); } prevMax = boost::optional<BSONObj>(chunk.getMax()); } @@ -202,7 +202,7 @@ Timestamp getHighestMinFetchTimestamp(const std::vector<DonorShardEntry>& donorS uassert(4957300, "All donors must have a minFetchTimestamp, but donor {} does not."_format( StringData{donor.getId()}), - donorFetchTimestamp.is_initialized()); + donorFetchTimestamp.has_value()); if (maxMinFetchTimestamp < donorFetchTimestamp.value()) { maxMinFetchTimestamp = donorFetchTimestamp.value(); } @@ -221,7 +221,7 @@ void checkForOverlappingZones(std::vector<ReshardingZoneType>& zones) { if (prevMax) { uassert(ErrorCodes::BadValue, "Zone ranges must not overlap", - SimpleBSONObjComparator::kInstance.evaluate(prevMax.get() <= zone.getMin())); + SimpleBSONObjComparator::kInstance.evaluate(prevMax.value() <= zone.getMin())); } prevMax = boost::optional<BSONObj>(zone.getMax()); } diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp index d7511dee872..8c9a1b8cb32 100644 --- a/src/mongo/db/s/session_catalog_migration_destination.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination.cpp @@ -118,7 +118,7 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE // the appropriate no-op. This code on the destination patches up the CRUD operation oplog entry // to look like the classic format. if (entry->getNeedsRetryImage()) { - switch (entry->getNeedsRetryImage().get()) { + switch (entry->getNeedsRetryImage().value()) { case repl::RetryImageEnum::kPreImage: entry->setPreImageOpTime({repl::OpTime()}); break; diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp index afa613481c6..ec7513d7a84 100644 --- a/src/mongo/db/s/session_catalog_migration_source.cpp +++ b/src/mongo/db/s/session_catalog_migration_source.cpp @@ -420,7 +420,7 @@ bool SessionCatalogMigrationSource::shouldSkipOplogEntry(const mongo::repl::Oplo // prevent a multi-statement transaction from being retried as a retryable write. return false; } - auto shardKey = shardKeyPattern.extractShardKeyFromOplogEntry(object2.get()); + auto shardKey = shardKeyPattern.extractShardKeyFromOplogEntry(object2.value()); return !chunkRange.containsKey(shardKey); } @@ -506,7 +506,7 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock lk, OperationCo // Skipping an entry here will also result in the pre/post images to also not be // sent in the migration as they're handled by 'fetchPrePostImageOplog' below. - if (shouldSkipOplogEntry(nextOplog.get(), _keyPattern, _chunkRange)) { + if (shouldSkipOplogEntry(nextOplog.value(), _keyPattern, _chunkRange)) { continue; } @@ -770,8 +770,8 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte // Otherwise, skip the record by returning boost::none. auto result = [&]() -> boost::optional<repl::OplogEntry> { if (!_record.getState() || - _record.getState().get() == DurableTxnStateEnum::kCommitted || - _record.getState().get() == DurableTxnStateEnum::kPrepared) { + _record.getState().value() == DurableTxnStateEnum::kCommitted || + _record.getState().value() == DurableTxnStateEnum::kPrepared) { return makeSentinelOplogEntry( _record.getSessionId(), _record.getTxnNum(), diff --git a/src/mongo/db/s/session_catalog_migration_source_test.cpp b/src/mongo/db/s/session_catalog_migration_source_test.cpp index d723c069c2a..62bba20660b 100644 --- a/src/mongo/db/s/session_catalog_migration_source_test.cpp +++ b/src/mongo/db/s/session_catalog_migration_source_test.cpp @@ -181,7 +181,7 @@ repl::OplogEntry makeRewrittenOplogInSession(repl::OpTime opTime, *original.getTxnNumber(), original.getStatementIds(), // statement ids original.getPrevWriteOpTimeInTransaction() - .get()); // optime of previous write within same transaction + .value()); // optime of previous write within same transaction }; repl::DurableReplOperation makeDurableReplOp( @@ -722,8 +722,8 @@ TEST_F(SessionCatalogMigrationSourceTest, ForgeImageEntriesWhenFetchingEntriesWi // Check that the key fields are what we expect. The destination will overwrite any unneeded // fields when it processes the incoming entries. ASSERT_BSONOBJ_EQ(preImage, nextOplogResult.oplog->getObject()); - ASSERT_EQUALS(txnNumber, nextOplogResult.oplog->getTxnNumber().get()); - ASSERT_EQUALS(sessionId, nextOplogResult.oplog->getSessionId().get()); + ASSERT_EQUALS(txnNumber, nextOplogResult.oplog->getTxnNumber().value()); + ASSERT_EQUALS(sessionId, nextOplogResult.oplog->getSessionId().value()); ASSERT_EQUALS("n", repl::OpType_serializer(nextOplogResult.oplog->getOpType())); ASSERT_EQ(entry.getStatementIds().size(), nextOplogResult.oplog->getStatementIds().size()); for (size_t i = 0; i < entry.getStatementIds().size(); i++) { diff --git a/src/mongo/db/s/shard_key_index_util.cpp b/src/mongo/db/s/shard_key_index_util.cpp index 1cdd4f99008..7050abedb5e 100644 --- a/src/mongo/db/s/shard_key_index_util.cpp +++ b/src/mongo/db/s/shard_key_index_util.cpp @@ -186,7 +186,7 @@ bool isLastShardKeyIndex(OperationContext* opCtx, const BSONObj& shardKey) { return !_findShardKeyPrefixedIndex( opCtx, collection, indexCatalog, indexName, shardKey, false /* requireSingleKey */) - .is_initialized(); + .has_value(); } boost::optional<ShardKeyIndex> findShardKeyPrefixedIndex(OperationContext* opCtx, diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp index a111b9bf592..f3c2a80f4a0 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp @@ -212,7 +212,7 @@ ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() { ASSERT_EQUALS(collAndChunksRes.epoch, collectionType.getEpoch()); ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL); - ASSERT(!collAndChunksRes.timeseriesFields.is_initialized()); + ASSERT(!collAndChunksRes.timeseriesFields.has_value()); for (unsigned int i = 0; i < collAndChunksRes.changedChunks.size(); ++i) { ASSERT_BSONOBJ_EQ(collAndChunksRes.changedChunks[i].toShardBSON(), chunks[i].toShardBSON()); } @@ -454,7 +454,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO _remoteLoaderMock->setChunkRefreshReturnValue(chunks); auto collAndChunksRes = _shardLoader->getChunksSince(kNss, ChunkVersion::UNSHARDED()).get(); - ASSERT(collAndChunksRes.timeseriesFields.is_initialized()); + ASSERT(collAndChunksRes.timeseriesFields.has_value()); ASSERT(collAndChunksRes.timeseriesFields->getGranularity() == BucketGranularityEnum::Seconds); } @@ -475,7 +475,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{lastChunk}); auto collAndChunksRes = _shardLoader->getChunksSince(kNss, maxLoaderVersion).get(); - ASSERT(collAndChunksRes.timeseriesFields.is_initialized()); + ASSERT(collAndChunksRes.timeseriesFields.has_value()); ASSERT(collAndChunksRes.timeseriesFields->getGranularity() == BucketGranularityEnum::Hours); } } diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp index 7d880424df3..75475adfd78 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator.cpp +++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp @@ -312,7 +312,7 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas }) .then([this, executor, token, anchor = shared_from_this()] { if (const auto bucketNss = metadata().getBucketNss()) { - return _acquireLockAsync(executor, token, bucketNss.get().ns()); + return _acquireLockAsync(executor, token, bucketNss.value().ns()); } return ExecutorFuture<void>(**executor); }) diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 97c4118f101..9f1b2e26dd2 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -166,7 +166,7 @@ private: return; } updateState->updateInProgress = true; - update = updateState->nextUpdateToSend.get(); + update = updateState->nextUpdateToSend.value(); updateState->nextUpdateToSend = boost::none; } diff --git a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp index 16d75a2bfb9..28e893755ef 100644 --- a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp @@ -193,8 +193,9 @@ public: if (optRenameCollectionParticipant) { uassert(ErrorCodes::CommandFailed, "Provided UUID does not match", - optRenameCollectionParticipant.get()->sourceUUID() == req.getSourceUUID()); - optRenameCollectionParticipant.get()->getUnblockCrudFuture().get(opCtx); + optRenameCollectionParticipant.value()->sourceUUID() == + req.getSourceUUID()); + optRenameCollectionParticipant.value()->getUnblockCrudFuture().get(opCtx); } // Since no write that generated a retryable write oplog entry with this sessionId diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp index 2bc0b8507de..de43200ad8b 100644 --- a/src/mongo/db/s/split_vector.cpp +++ b/src/mongo/db/s/split_vector.cpp @@ -124,12 +124,12 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx, } // We need a maximum size for the chunk. - if (!maxChunkSizeBytes || maxChunkSizeBytes.get() <= 0) { + if (!maxChunkSizeBytes || maxChunkSizeBytes.value() <= 0) { uasserted(ErrorCodes::InvalidOptions, "need to specify the desired max chunk size"); } // If there's not enough data for more than one chunk, no point continuing. - if (dataSize < maxChunkSizeBytes.get() || recCount == 0) { + if (dataSize < maxChunkSizeBytes.value() || recCount == 0) { std::vector<BSONObj> emptyVector; return emptyVector; } @@ -146,18 +146,18 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx, // maxChunkObjects, if provided. const long long avgRecSize = dataSize / recCount; - long long keyCount = maxChunkSizeBytes.get() / (2 * avgRecSize); + long long keyCount = maxChunkSizeBytes.value() / (2 * avgRecSize); - if (maxChunkObjects.get() && (maxChunkObjects.get() < keyCount)) { + if (maxChunkObjects.value() && (maxChunkObjects.value() < keyCount)) { LOGV2(22108, "Limiting the number of documents per chunk to {maxChunkObjects} based " "on the maxChunkObjects parameter for split vector command (compared to maximum " "possible: {maxPossibleDocumentsPerChunk})", "Limiting the number of documents per chunk for split vector command based on " "the maxChunksObject parameter", - "maxChunkObjects"_attr = maxChunkObjects.get(), + "maxChunkObjects"_attr = maxChunkObjects.value(), "maxPossibleDocumentsPerChunk"_attr = keyCount); - keyCount = maxChunkObjects.get(); + keyCount = maxChunkObjects.value(); } // @@ -280,7 +280,8 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx, } // Stop if we have enough split points. - if (maxSplitPoints && maxSplitPoints.get() && (numChunks >= maxSplitPoints.get())) { + if (maxSplitPoints && maxSplitPoints.value() && + (numChunks >= maxSplitPoints.value())) { LOGV2(22111, "Max number of requested split points reached ({numSplitPoints}) before " "the end of chunk {namespace} {minKey} -->> {maxKey}", diff --git a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp index a22b2bc8019..90a8caccc2b 100644 --- a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp +++ b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp @@ -41,7 +41,7 @@ namespace { void createTransactionCoordinatorImpl(OperationContext* opCtx, TxnNumber clientTxnNumber, boost::optional<TxnRetryCounter> clientTxnRetryCounter) { - auto clientLsid = opCtx->getLogicalSessionId().get(); + auto clientLsid = opCtx->getLogicalSessionId().value(); auto clockSource = opCtx->getServiceContext()->getFastClockSource(); // If this shard has been selected as the coordinator, set up the coordinator state diff --git a/src/mongo/db/s/type_lockpings.cpp b/src/mongo/db/s/type_lockpings.cpp index 345b9aa9806..60b4721aad2 100644 --- a/src/mongo/db/s/type_lockpings.cpp +++ b/src/mongo/db/s/type_lockpings.cpp @@ -66,11 +66,11 @@ StatusWith<LockpingsType> LockpingsType::fromBSON(const BSONObj& source) { } Status LockpingsType::validate() const { - if (!_process.is_initialized() || _process->empty()) { + if (!_process.has_value() || _process->empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << process.name() << " field"}; } - if (!_ping.is_initialized()) { + if (!_ping.has_value()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << ping.name() << " field"}; } diff --git a/src/mongo/db/s/type_locks.cpp b/src/mongo/db/s/type_locks.cpp index 59a9b6e44c2..4571e4a6635 100644 --- a/src/mongo/db/s/type_locks.cpp +++ b/src/mongo/db/s/type_locks.cpp @@ -106,11 +106,11 @@ StatusWith<LocksType> LocksType::fromBSON(const BSONObj& source) { } Status LocksType::validate() const { - if (!_name.is_initialized() || _name->empty()) { + if (!_name.has_value() || _name->empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << name.name() << " field"}; } - if (!_state.is_initialized()) { + if (!_state.has_value()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << state.name() << " field"}; } @@ -124,21 +124,21 @@ Status LocksType::validate() const { // if the lock is locked, check the remaining fields if (lockState != State::UNLOCKED) { - if (!_process.is_initialized() || _process->empty()) { + if (!_process.has_value() || _process->empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << process.name() << " field"}; } - if (!_lockID.is_initialized()) { + if (!_lockID.has_value()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << lockID.name() << " field"}; } - if (!_who.is_initialized() || _who->empty()) { + if (!_who.has_value() || _who->empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << who.name() << " field"}; } - if (!_why.is_initialized() || _why->empty()) { + if (!_why.has_value() || _why->empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "missing " << why.name() << " field"}; } } |