From eda533b781990de451847a8bff5d22176fb9ea4b Mon Sep 17 00:00:00 2001 From: Allison Easton Date: Fri, 26 Aug 2022 08:53:28 +0000 Subject: SERVER-68891 StaleConfigInfo should use shard version --- src/mongo/db/catalog_raii.cpp | 12 +++--- src/mongo/db/db_raii.cpp | 2 +- src/mongo/db/exec/batched_delete_stage.cpp | 4 +- src/mongo/db/exec/delete_stage.cpp | 8 ++-- src/mongo/db/exec/update_stage.cpp | 8 ++-- .../db/pipeline/dispatch_shard_pipeline_test.cpp | 22 ++++++----- .../db/pipeline/document_source_graph_lookup.cpp | 2 +- src/mongo/db/pipeline/document_source_lookup.cpp | 2 +- .../shardsvr_process_interface.cpp | 7 +++- src/mongo/db/pipeline/sharded_union_test.cpp | 8 ++-- src/mongo/db/s/balancer/balancer.cpp | 2 +- .../db/s/chunk_operation_precondition_checks.cpp | 38 +++++++++--------- src/mongo/db/s/collection_sharding_runtime.cpp | 15 +++---- ...gration_destination_manager_legacy_commands.cpp | 2 +- .../db/s/shard_filtering_metadata_refresh.cpp | 4 +- src/mongo/db/s/shard_filtering_metadata_refresh.h | 5 ++- src/mongo/db/s/shardsvr_merge_chunks_command.cpp | 10 ++++- src/mongo/db/s/split_chunk.cpp | 20 ++++++---- src/mongo/s/append_raw_responses_test.cpp | 20 +++++----- src/mongo/s/catalog_cache.cpp | 2 +- src/mongo/s/catalog_cache.h | 3 +- src/mongo/s/catalog_cache_test.cpp | 7 ++-- src/mongo/s/stale_exception.cpp | 22 ++++------- src/mongo/s/stale_exception.h | 10 ++--- src/mongo/s/stale_exception_test.cpp | 6 +-- src/mongo/s/stale_shard_version_helpers_test.cpp | 9 +++-- src/mongo/s/transaction_router_test.cpp | 2 +- src/mongo/s/write_ops/batch_write_exec_test.cpp | 46 +++++++++++----------- src/mongo/s/write_ops/batch_write_op_test.cpp | 14 +++---- src/mongo/s/write_ops/batched_command_request.cpp | 2 +- .../s/write_ops/batched_command_response_test.cpp | 4 +- src/mongo/s/write_ops/write_op_test.cpp | 10 ++--- 32 files changed, 173 insertions(+), 155 deletions(-) (limited to 'src/mongo') diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp index 4ba0dd3e650..30e9224024a 100644 --- a/src/mongo/db/catalog_raii.cpp +++ b/src/mongo/db/catalog_raii.cpp @@ -298,20 +298,20 @@ AutoGetCollection::AutoGetCollection( uassert(StaleConfigInfo(_resolvedNss, *receivedShardVersion, - ChunkVersion::UNSHARDED() /* wantedVersion */, + ShardVersion::UNSHARDED() /* wantedVersion */, ShardingState::get(opCtx)->shardId()), str::stream() << "Namespace " << _resolvedNss << " is a view therefore the shard " << "version attached to the request must be unset or UNSHARDED", - !receivedShardVersion || *receivedShardVersion == ChunkVersion::UNSHARDED()); + !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED()); return; } // There is neither a collection nor a view for the namespace, so if we reached to this point // there are the following possibilities depending on the received shard version: - // 1. ChunkVersion::UNSHARDED: The request comes from a router and the operation entails the + // 1. ShardVersion::UNSHARDED: The request comes from a router and the operation entails the // implicit creation of an unsharded collection. We can continue. - // 2. ChunkVersion::IGNORED: The request comes from a router that broadcasted the same to all + // 2. ShardVersion::IGNORED: The request comes from a router that broadcasted the same to all // shards, but this shard doesn't own any chunks for the collection. We can continue. // 3. boost::none: The request comes from client directly connected to the shard. We can // continue. @@ -325,8 +325,8 @@ AutoGetCollection::AutoGetCollection( ShardingState::get(opCtx)->shardId()), str::stream() << "No metadata for namespace " << _resolvedNss << " therefore the shard " << "version attached to the request must be unset, UNSHARDED or IGNORED", - !receivedShardVersion || *receivedShardVersion == ChunkVersion::UNSHARDED() || - *receivedShardVersion == ChunkVersion::IGNORED()); + !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED() || + *receivedShardVersion == ShardVersion::IGNORED()); } Collection* AutoGetCollection::getWritableCollection(OperationContext* opCtx) { diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index eb7d3c2e3d6..1ae023eccbd 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -853,7 +853,7 @@ AutoGetCollectionForReadCommandLockFree::AutoGetCollectionForReadCommandLockFree while (_autoCollForReadCommandBase->getCollection() && _autoCollForReadCommandBase->getCollection().isSharded() && receivedShardVersion && - receivedShardVersion.value() == ChunkVersion::UNSHARDED()) { + receivedShardVersion.value() == ShardVersion::UNSHARDED()) { reachedAutoGetLockFreeShardConsistencyRetry.executeIf( [&](auto&) { reachedAutoGetLockFreeShardConsistencyRetry.pauseWhileSet(opCtx); }, [&](const BSONObj& data) { diff --git a/src/mongo/db/exec/batched_delete_stage.cpp b/src/mongo/db/exec/batched_delete_stage.cpp index dbb919fedba..63242ad5206 100644 --- a/src/mongo/db/exec/batched_delete_stage.cpp +++ b/src/mongo/db/exec/batched_delete_stage.cpp @@ -276,8 +276,8 @@ PlanStage::StageState BatchedDeleteStage::_deleteBatch(WorkingSetID* out) { return ret; } } catch (const ExceptionFor& ex) { - if (ex->getVersionReceived() == ChunkVersion::IGNORED() && ex->getCriticalSectionSignal()) { - // If ChunkVersion is IGNORED and we encountered a critical section, then yield, wait + if (ex->getVersionReceived() == ShardVersion::IGNORED() && ex->getCriticalSectionSignal()) { + // If ShardVersion is IGNORED and we encountered a critical section, then yield, wait // for critical section to finish and then we'll resume the write from the point we had // left. We do this to prevent large multi-writes from repeatedly failing due to // StaleConfig and exhausting the mongos retry attempts. diff --git a/src/mongo/db/exec/delete_stage.cpp b/src/mongo/db/exec/delete_stage.cpp index e992e7aa51f..57368624775 100644 --- a/src/mongo/db/exec/delete_stage.cpp +++ b/src/mongo/db/exec/delete_stage.cpp @@ -217,9 +217,9 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { writeToOrphan = true; } } catch (const ExceptionFor& ex) { - if (ex->getVersionReceived() == ChunkVersion::IGNORED() && + if (ex->getVersionReceived() == ShardVersion::IGNORED() && ex->getCriticalSectionSignal()) { - // If ChunkVersion is IGNORED and we encountered a critical section, then yield, + // If ShardVersion is IGNORED and we encountered a critical section, then yield, // wait for the critical section to finish and then we'll resume the write from the // point we had left. We do this to prevent large multi-writes from repeatedly // failing due to StaleConfig and exhausting the mongos retry attempts. @@ -289,9 +289,9 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { return ret; } } catch (const ExceptionFor& ex) { - if (ex->getVersionReceived() == ChunkVersion::IGNORED() && + if (ex->getVersionReceived() == ShardVersion::IGNORED() && ex->getCriticalSectionSignal()) { - // If ChunkVersion is IGNORED and we encountered a critical section, then yield, + // If ShardVersion is IGNORED and we encountered a critical section, then yield, // wait for the critical section to finish and then we'll resume the write from the // point we had left. We do this to prevent large multi-writes from repeatedly // failing due to StaleConfig and exhausting the mongos retry attempts. diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp index 64af7f13ddb..0874f060fcf 100644 --- a/src/mongo/db/exec/update_stage.cpp +++ b/src/mongo/db/exec/update_stage.cpp @@ -497,9 +497,9 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { writeToOrphan = true; } } catch (const ExceptionFor& ex) { - if (ex->getVersionReceived() == ChunkVersion::IGNORED() && + if (ex->getVersionReceived() == ShardVersion::IGNORED() && ex->getCriticalSectionSignal()) { - // If ChunkVersion is IGNORED and we encountered a critical section, then yield, + // If ShardVersion is IGNORED and we encountered a critical section, then yield, // wait for critical section to finish and then we'll resume the write from the // point we had left. We do this to prevent large multi-writes from repeatedly // failing due to StaleConfig and exhausting the mongos retry attempts. @@ -560,9 +560,9 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { return updateRet; } } catch (const ExceptionFor& ex) { - if (ex->getVersionReceived() == ChunkVersion::IGNORED() && + if (ex->getVersionReceived() == ShardVersion::IGNORED() && ex->getCriticalSectionSignal()) { - // If ChunkVersion is IGNORED and we encountered a critical section, then yield, + // If ShardVersion is IGNORED and we encountered a critical section, then yield, // wait for critical section to finish and then we'll resume the write from the // point we had left. We do this to prevent large multi-writes from repeatedly // failing due to StaleConfig and exhausting the mongos retry attempts. diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp index effa178fc32..26eb3002a80 100644 --- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp +++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp @@ -174,11 +174,12 @@ TEST_F(DispatchShardPipelineTest, DispatchShardPipelineDoesNotRetryOnStaleConfig onCommand([&](const executor::RemoteCommandRequest& request) { OID epoch{OID::gen()}; Timestamp timestamp{1, 0}; - return createErrorCursorResponse({StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {1, 0}), - boost::none, - ShardId{"0"}), - "Mock error: shard version mismatch"}); + return createErrorCursorResponse( + {StaleConfigInfo(kTestAggregateNss, + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), + boost::none, + ShardId{"0"}), + "Mock error: shard version mismatch"}); }); future.default_timed_get(); } @@ -217,11 +218,12 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) { // Mock out one error response, then expect a refresh of the sharding catalog for that // namespace, then mock out a successful response. onCommand([&](const executor::RemoteCommandRequest& request) { - return createErrorCursorResponse({StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {2, 0}), - boost::none, - ShardId{"0"}), - "Mock error: shard version mismatch"}); + return createErrorCursorResponse( + {StaleConfigInfo(kTestAggregateNss, + ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0})), + boost::none, + ShardId{"0"}), + "Mock error: shard version mismatch"}); }); // Mock the expected config server queries. diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp index 26537b44cfd..a02c02e10fd 100644 --- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp +++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp @@ -494,7 +494,7 @@ void DocumentSourceGraphLookUp::performSearch() { // throw a custom exception. if (auto staleInfo = ex.extraInfo(); staleInfo && staleInfo->getVersionWanted() && - staleInfo->getVersionWanted() != ChunkVersion::UNSHARDED()) { + staleInfo->getVersionWanted() != ShardVersion::UNSHARDED()) { uassert(3904801, "Cannot run $graphLookup with a sharded foreign collection in a transaction", foreignShardedGraphLookupAllowed()); diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp index 633b7a2f01c..ae3e227a214 100644 --- a/src/mongo/db/pipeline/document_source_lookup.cpp +++ b/src/mongo/db/pipeline/document_source_lookup.cpp @@ -441,7 +441,7 @@ DocumentSource::GetNextResult DocumentSourceLookUp::doGetNext() { // throw a custom exception. if (auto staleInfo = ex.extraInfo(); staleInfo && staleInfo->getVersionWanted() && - staleInfo->getVersionWanted() != ChunkVersion::UNSHARDED()) { + staleInfo->getVersionWanted() != ShardVersion::UNSHARDED()) { uassert(3904800, "Cannot run $lookup with a sharded foreign collection in a transaction", foreignShardedLookupAllowed()); diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp index 837248fe720..b771a9c1d66 100644 --- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp @@ -76,8 +76,13 @@ void ShardServerProcessInterface::checkRoutingInfoEpochOrThrow( // Mark the cache entry routingInfo for the 'nss' and 'shardId' if the entry is staler than // 'targetCollectionVersion'. + const ShardVersion ignoreIndexVersion{ + targetCollectionVersion, + CollectionIndexes{CollectionGeneration{targetCollectionVersion.epoch(), + targetCollectionVersion.getTimestamp()}, + boost::none}}; catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( - nss, targetCollectionVersion, shardId); + nss, ignoreIndexVersion, shardId); const auto routingInfo = uassertStatusOK(catalogCache->getCollectionRoutingInfo(expCtx->opCtx, nss)); diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp index a8d15b8dbbe..2b136a89dc2 100644 --- a/src/mongo/db/pipeline/sharded_union_test.cpp +++ b/src/mongo/db/pipeline/sharded_union_test.cpp @@ -163,7 +163,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) { Timestamp timestamp{1, 0}; return createErrorCursorResponse( Status{StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {1, 0}), + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), boost::none, ShardId{"0"}), "Mock error: shard version mismatch"}); @@ -248,7 +248,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir Timestamp timestamp{1, 0}; return createErrorCursorResponse( Status{StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {1, 0}), + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), boost::none, ShardId{"0"}), "Mock error: shard version mismatch"}); @@ -341,7 +341,7 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo onCommand([&](const executor::RemoteCommandRequest& request) { return createErrorCursorResponse( Status{StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {1, 0}), + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), boost::none, ShardId{"0"}), "Mock error: shard version mismatch"}); @@ -349,7 +349,7 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo onCommand([&](const executor::RemoteCommandRequest& request) { return createErrorCursorResponse( Status{StaleConfigInfo(kTestAggregateNss, - ChunkVersion({epoch, timestamp}, {1, 0}), + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), boost::none, ShardId{"0"}), "Mock error: shard version mismatch"}); diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index ff763612920..a51ab3c2145 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -990,7 +990,7 @@ Status Balancer::_splitChunksIfNeeded(OperationContext* opCtx) { cm.getShardKeyPattern(), splitInfo.collectionVersion.epoch(), splitInfo.collectionVersion.getTimestamp(), - ChunkVersion::IGNORED() /*shardVersion*/, + ShardVersion::IGNORED() /*shardVersion*/, ChunkRange(splitInfo.minKey, splitInfo.maxKey), splitInfo.splitKeys); if (!splitStatus.isOK()) { diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.cpp b/src/mongo/db/s/chunk_operation_precondition_checks.cpp index da8f5d890a3..483e6080e47 100644 --- a/src/mongo/db/s/chunk_operation_precondition_checks.cpp +++ b/src/mongo/db/s/chunk_operation_precondition_checks.cpp @@ -45,7 +45,7 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx, auto optMetadata = csr->getCurrentMetadataIfKnown(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, boost::none /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " needs to be recovered", @@ -54,8 +54,8 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx, auto metadata = *optMetadata; uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - ChunkVersion::UNSHARDED() /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion::UNSHARDED() /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " is not sharded", metadata.isSharded()); @@ -67,8 +67,8 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx, const auto shardVersion = metadata.getShardVersion(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " has changed since operation was sent (sent epoch: " << expectedEpoch @@ -77,8 +77,8 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx, (!expectedTimestamp || expectedTimestamp == shardVersion.getTimestamp())); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Shard does not contain any chunks for collection.", shardVersion.majorVersion() > 0); @@ -95,8 +95,8 @@ void checkShardKeyPattern(OperationContext* opCtx, const auto shardVersion = metadata.getShardVersion(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "The range " << chunkRange.toString() << " is not valid for collection " << nss.ns() << " with key pattern " @@ -113,8 +113,8 @@ void checkChunkMatchesRange(OperationContext* opCtx, ChunkType existingChunk; uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Range with bounds " << chunkRange.toString() << " is not owned by this shard.", @@ -122,8 +122,8 @@ void checkChunkMatchesRange(OperationContext* opCtx, existingChunk.getMin().woCompare(chunkRange.getMin()) == 0); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Chunk bounds " << chunkRange.toString() << " do not exist.", existingChunk.getRange() == chunkRange); @@ -138,8 +138,8 @@ void checkRangeWithinChunk(OperationContext* opCtx, ChunkType existingChunk; uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Range with bounds " << chunkRange.toString() << " is not contained within a chunk owned by this shard.", @@ -158,8 +158,8 @@ void checkRangeOwnership(OperationContext* opCtx, BSONObj minKey = chunkRange.getMin(); do { uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Range with bounds " << chunkRange.toString() << " is not owned by this shard.", @@ -169,8 +169,8 @@ void checkRangeOwnership(OperationContext* opCtx, } while (existingChunk.getMax().woCompare(chunkRange.getMax()) < 0); uassert( StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - shardVersion /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(shardVersion) /* wantedVersion */, shardId), str::stream() << "Shard does not contain a sequence of chunks that exactly fills the range " << chunkRange.toString(), diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp index 15f6bc03fb0..df80079487c 100644 --- a/src/mongo/db/s/collection_sharding_runtime.cpp +++ b/src/mongo/db/s/collection_sharding_runtime.cpp @@ -144,8 +144,7 @@ ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription( const auto receivedShardVersion{oss.getShardVersion(_nss)}; uassert( StaleConfigInfo(_nss, - receivedShardVersion ? (ChunkVersion)*receivedShardVersion - : ChunkVersion::IGNORED(), + receivedShardVersion ? *receivedShardVersion : ShardVersion::IGNORED(), boost::none /* wantedVersion */, ShardingState::get(_serviceContext)->shardId()), str::stream() << "sharding status of collection " << _nss.ns() @@ -355,7 +354,7 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( // Assume that the received shard version was IGNORED if the current operation wasn't versioned const auto& receivedShardVersion = - optReceivedShardVersion ? (ChunkVersion)*optReceivedShardVersion : ChunkVersion::IGNORED(); + optReceivedShardVersion ? *optReceivedShardVersion : ShardVersion::IGNORED(); auto csrLock = CSRLock::lockShared(opCtx, this); @@ -388,10 +387,12 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( const auto& currentMetadata = optCurrentMetadata->get(); - auto wantedShardVersion = currentMetadata.getShardVersion(); + const auto wantedPlacementVersion = currentMetadata.getShardVersion(); + const auto wantedShardVersion = ShardVersion(wantedPlacementVersion); + const ChunkVersion receivedPlacementVersion = receivedShardVersion; if (wantedShardVersion.isWriteCompatibleWith(receivedShardVersion) || - ChunkVersion::isIgnoredVersion(receivedShardVersion)) + receivedShardVersion == ShardVersion::IGNORED()) return optCurrentMetadata; StaleConfigInfo sci( @@ -401,13 +402,13 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( str::stream() << "timestamp mismatch detected for " << _nss.ns(), wantedShardVersion.isSameCollection(receivedShardVersion)); - if (!wantedShardVersion.isSet() && receivedShardVersion.isSet()) { + if (!wantedPlacementVersion.isSet() && receivedPlacementVersion.isSet()) { uasserted(std::move(sci), str::stream() << "this shard no longer contains chunks for " << _nss.ns() << ", " << "the collection may have been dropped"); } - if (wantedShardVersion.isSet() && !receivedShardVersion.isSet()) { + if (wantedPlacementVersion.isSet() && !receivedPlacementVersion.isSet()) { uasserted(std::move(sci), str::stream() << "this shard contains chunks for " << _nss.ns() << ", " << "but the client expects unsharded collection"); diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp index 726d96d7465..6e820d5bef0 100644 --- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp +++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp @@ -133,7 +133,7 @@ public: auto const optMetadata = CollectionShardingRuntime::get(opCtx, nss)->getCurrentMetadataIfKnown(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, boost::none /* wantedVersion */, shardId, boost::none), diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index 0895d38bd84..5debd090ab2 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -242,7 +242,7 @@ SharedSemiFuture recoverRefreshShardVersion(ServiceContext* serviceContext void onShardVersionMismatch(OperationContext* opCtx, const NamespaceString& nss, - boost::optional shardVersionReceived) { + boost::optional shardVersionReceived) { invariant(!opCtx->lockState()->isLocked()); invariant(!opCtx->getClient()->isInDirectClient()); invariant(ShardingState::get(opCtx)->canAcceptShardedCommands()); @@ -326,7 +326,7 @@ void onShardVersionMismatch(OperationContext* opCtx, Status onShardVersionMismatchNoExcept(OperationContext* opCtx, const NamespaceString& nss, - boost::optional shardVersionReceived) noexcept { + boost::optional shardVersionReceived) noexcept { try { onShardVersionMismatch(opCtx, nss, shardVersionReceived); return Status::OK(); diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.h b/src/mongo/db/s/shard_filtering_metadata_refresh.h index 955eb3ccdee..c0373d6e103 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.h +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.h @@ -32,6 +32,7 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" namespace mongo { @@ -56,11 +57,11 @@ class OperationContext; */ Status onShardVersionMismatchNoExcept(OperationContext* opCtx, const NamespaceString& nss, - boost::optional shardVersionReceived) noexcept; + boost::optional shardVersionReceived) noexcept; void onShardVersionMismatch(OperationContext* opCtx, const NamespaceString& nss, - boost::optional shardVersionReceived); + boost::optional shardVersionReceived); /** * Unconditionally get the shard's filtering metadata from the config server on the calling thread. diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp index 7c844d3444f..69a4610a838 100644 --- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp @@ -106,10 +106,16 @@ void mergeChunks(OperationContext* opCtx, auto cmdResponse = commitMergeOnConfigServer( opCtx, nss, expectedEpoch, expectedTimestamp, chunkRange, metadataBeforeMerge); - auto shardVersionReceived = [&]() -> boost::optional { + auto shardVersionReceived = [&]() -> boost::optional { // Old versions might not have the shardVersion field if (cmdResponse.response[ChunkVersion::kChunkVersionField]) { - return ChunkVersion::parse(cmdResponse.response[ChunkVersion::kChunkVersionField]); + ChunkVersion placementVersion = + ChunkVersion::parse(cmdResponse.response[ChunkVersion::kChunkVersionField]); + return ShardVersion( + placementVersion, + CollectionIndexes{ + CollectionGeneration{placementVersion.epoch(), placementVersion.getTimestamp()}, + boost::none}); } return boost::none; }(); diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index f76ae40c9fc..3ee8a2e808f 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -109,21 +109,21 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx, ShardId shardId = ShardingState::get(opCtx)->shardId(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, boost::none /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " needs to be recovered", metadataAfterSplit); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - ChunkVersion::UNSHARDED() /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion::UNSHARDED() /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " is not sharded", metadataAfterSplit->isSharded()); const auto epoch = metadataAfterSplit->getShardVersion().epoch(); uassert(StaleConfigInfo(nss, - ChunkVersion::IGNORED() /* receivedVersion */, - metadataAfterSplit->getShardVersion() /* wantedVersion */, + ShardVersion::IGNORED() /* receivedVersion */, + ShardVersion(metadataAfterSplit->getShardVersion()) /* wantedVersion */, shardId), str::stream() << "Collection " << nss.ns() << " changed since split start", epoch == expectedEpoch && @@ -252,10 +252,16 @@ StatusWith> splitChunk( const Shard::CommandResponse& cmdResponse = cmdResponseStatus.getValue(); - boost::optional shardVersionReceived = [&]() -> boost::optional { + boost::optional shardVersionReceived = [&]() -> boost::optional { // old versions might not have the shardVersion field if (cmdResponse.response[ChunkVersion::kChunkVersionField]) { - return ChunkVersion::parse(cmdResponse.response[ChunkVersion::kChunkVersionField]); + ChunkVersion placementVersion = + ChunkVersion::parse(cmdResponse.response[ChunkVersion::kChunkVersionField]); + return ShardVersion( + placementVersion, + CollectionIndexes{ + CollectionGeneration{placementVersion.epoch(), placementVersion.getTimestamp()}, + boost::none}); } return boost::none; }(); diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp index 1bcfb9c8bc8..99c7387724e 100644 --- a/src/mongo/s/append_raw_responses_test.cpp +++ b/src/mongo/s/append_raw_responses_test.cpp @@ -192,16 +192,16 @@ protected: const std::vector kShardIdList{kShard1, kShard2, kShard3, kShard4, kShard5}; - const Status kStaleConfigErrorStatus{[] { - OID epoch{OID::gen()}; - Timestamp timestamp{1, 0}; - return StaleConfigInfo( - NamespaceString("Foo.Bar"), - ChunkVersion({epoch, timestamp}, {1, 0}), - boost::none, - ShardId{"dummy"}); - }(), - "dummy"}; + const Status kStaleConfigErrorStatus{ + [] { + OID epoch{OID::gen()}; + Timestamp timestamp{1, 0}; + return StaleConfigInfo(NamespaceString("Foo.Bar"), + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), + boost::none, + ShardId{"dummy"}); + }(), + "dummy"}; private: static void _assertShardIdsMatch(const std::set& expectedShardIds, diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 0c5823bc7c5..66025644cd7 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -452,7 +452,7 @@ void CatalogCache::setOperationShouldBlockBehindCatalogCacheRefresh(OperationCon void CatalogCache::invalidateShardOrEntireCollectionEntryForShardedCollection( const NamespaceString& nss, - const boost::optional& wantedVersion, + const boost::optional& wantedVersion, const ShardId& shardId) { _stats.countStaleConfigErrors.addAndFetch(1); diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h index 48c781d27ad..f7f9fec91dd 100644 --- a/src/mongo/s/catalog_cache.h +++ b/src/mongo/s/catalog_cache.h @@ -35,6 +35,7 @@ #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_version.h" #include "mongo/s/type_collection_common_types_gen.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/read_through_cache.h" @@ -227,7 +228,7 @@ public: */ void invalidateShardOrEntireCollectionEntryForShardedCollection( const NamespaceString& nss, - const boost::optional& wantedVersion, + const boost::optional& wantedVersion, const ShardId& shardId); /** diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp index 41faae8b072..c68dc4b0053 100644 --- a/src/mongo/s/catalog_cache_test.cpp +++ b/src/mongo/s/catalog_cache_test.cpp @@ -259,7 +259,8 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) { TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}); + const auto cachedCollVersion = + ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0})); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); @@ -284,8 +285,8 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) { TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}); - const auto wantedCollVersion = - ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0}); + const auto wantedCollVersion = ShardVersion( + ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0})); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); diff --git a/src/mongo/s/stale_exception.cpp b/src/mongo/s/stale_exception.cpp index e15e54753f9..c9e66b5ee14 100644 --- a/src/mongo/s/stale_exception.cpp +++ b/src/mongo/s/stale_exception.cpp @@ -44,12 +44,9 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(StaleDbRoutingVersion); void StaleConfigInfo::serialize(BSONObjBuilder* bob) const { bob->append("ns", _nss.ns()); - ShardVersion receivedShardVersion(_received); - receivedShardVersion.serialize("vReceived", bob); - if (_wanted) { - ShardVersion wantedShardVersion(*_wanted); - wantedShardVersion.serialize("vWanted", bob); - } + _received.serialize("vReceived", bob); + if (_wanted) + _wanted->serialize("vWanted", bob); invariant(_shardId != ""); bob->append("shardId", _shardId.toString()); @@ -59,16 +56,13 @@ std::shared_ptr StaleConfigInfo::parse(const BSONObj& obj) auto shardId = obj["shardId"].String(); uassert(ErrorCodes::NoSuchKey, "The shardId field is missing", !shardId.empty()); - const ChunkVersion& receivedVersion = ShardVersion::parse(obj["vReceived"]); return std::make_shared(NamespaceString(obj["ns"].String()), - receivedVersion, + ShardVersion::parse(obj["vReceived"]), [&] { - if (auto vWantedElem = obj["vWanted"]) { - const ChunkVersion& wantedVersion = - ShardVersion::parse(vWantedElem); - return boost::make_optional(wantedVersion); - } - return boost::optional(); + if (auto vWantedElem = obj["vWanted"]) + return boost::make_optional( + ShardVersion::parse(vWantedElem)); + return boost::optional(); }(), ShardId(std::move(shardId))); } diff --git a/src/mongo/s/stale_exception.h b/src/mongo/s/stale_exception.h index 5a19f6db95c..da34725af68 100644 --- a/src/mongo/s/stale_exception.h +++ b/src/mongo/s/stale_exception.h @@ -31,8 +31,8 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/shard_id.h" -#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/util/concurrency/notification.h" namespace mongo { @@ -43,8 +43,8 @@ public: enum class OperationType { kRead, kWrite }; StaleConfigInfo(NamespaceString nss, - ChunkVersion received, - boost::optional wanted, + ShardVersion received, + boost::optional wanted, ShardId shardId, boost::optional> criticalSectionSignal = boost::none, boost::optional duringOperationType = boost::none) @@ -84,8 +84,8 @@ public: protected: NamespaceString _nss; - ChunkVersion _received; - boost::optional _wanted; + ShardVersion _received; + boost::optional _wanted; ShardId _shardId; // The following fields are not serialized and therefore do not get propagated to the router. diff --git a/src/mongo/s/stale_exception_test.cpp b/src/mongo/s/stale_exception_test.cpp index 67d857a0ca5..1e92988516d 100644 --- a/src/mongo/s/stale_exception_test.cpp +++ b/src/mongo/s/stale_exception_test.cpp @@ -38,7 +38,7 @@ const NamespaceString kNss("test.nss"); TEST(StaleExceptionTest, StaleConfigInfoSerializationTest) { const ShardId kShardId("SHARD_ID"); - StaleConfigInfo info(kNss, ChunkVersion::UNSHARDED(), ChunkVersion::UNSHARDED(), kShardId); + StaleConfigInfo info(kNss, ShardVersion::UNSHARDED(), ShardVersion::UNSHARDED(), kShardId); // Serialize BSONObjBuilder bob; @@ -49,8 +49,8 @@ TEST(StaleExceptionTest, StaleConfigInfoSerializationTest) { std::static_pointer_cast(StaleConfigInfo::parse(bob.obj())); ASSERT_EQUALS(deserializedInfo->getNss(), kNss); - ASSERT_EQUALS(deserializedInfo->getVersionReceived(), ChunkVersion::UNSHARDED()); - ASSERT_EQUALS(*deserializedInfo->getVersionWanted(), ChunkVersion::UNSHARDED()); + ASSERT_EQUALS(deserializedInfo->getVersionReceived(), ShardVersion::UNSHARDED()); + ASSERT_EQUALS(*deserializedInfo->getVersionWanted(), ShardVersion::UNSHARDED()); ASSERT_EQUALS(deserializedInfo->getShardId(), kShardId); } diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp index 0acedd12eae..030a3d21213 100644 --- a/src/mongo/s/stale_shard_version_helpers_test.cpp +++ b/src/mongo/s/stale_shard_version_helpers_test.cpp @@ -93,10 +93,11 @@ TEST_F(AsyncShardVersionRetry, LimitedStaleErrorsShouldReturnCorrectValue) { auto future = shardVersionRetry( service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) { if (++tries < 5) { - uassert(StaleConfigInfo(nss(), - ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23}), - ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99}), - ShardId("sB")), + uassert(StaleConfigInfo( + nss(), + ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23})), + ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99})), + ShardId("sB")), "testX", false); } diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp index bee7dd7e673..d6f1d93d133 100644 --- a/src/mongo/s/transaction_router_test.cpp +++ b/src/mongo/s/transaction_router_test.cpp @@ -114,7 +114,7 @@ protected: const NamespaceString kViewNss = NamespaceString("test.foo"); const Status kStaleConfigStatus = { - StaleConfigInfo(kViewNss, ChunkVersion::UNSHARDED(), boost::none, shard1), + StaleConfigInfo(kViewNss, ShardVersion::UNSHARDED(), boost::none, shard1), "The metadata for the collection is not loaded"}; void setUp() override { diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp index 52fe7c41763..e8aa07b6f30 100644 --- a/src/mongo/s/write_ops/batch_write_exec_test.cpp +++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp @@ -89,13 +89,13 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss, // Report a stale version error for each write in the batch. int i = 0; for (itInserted = inserted.begin(); itInserted != inserted.end(); ++itInserted) { - staleResponse.addToErrDetails( - write_ops::WriteError(i, - Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {1, 0}), - ChunkVersion({epoch, timestamp}, {2, 0}), - ShardId(kShardName1)), - "Stale error"))); + staleResponse.addToErrDetails(write_ops::WriteError( + i, + Status(StaleConfigInfo(nss, + ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})), + ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0})), + ShardId(kShardName1)), + "Stale error"))); ++i; } @@ -731,8 +731,8 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit response.addToErrDetails(write_ops::WriteError( 0, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -831,15 +831,15 @@ TEST_F(BatchWriteExecTest, response.addToErrDetails(write_ops::WriteError( 0, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); response.addToErrDetails(write_ops::WriteError( 1, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -927,8 +927,8 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) { response.addToErrDetails(write_ops::WriteError( 1, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -943,8 +943,8 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) { response.addToErrDetails(write_ops::WriteError( 0, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -1043,8 +1043,8 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK response.addToErrDetails(write_ops::WriteError( 1, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -1059,8 +1059,8 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK response.addToErrDetails(write_ops::WriteError( 1, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); return response.toBSON(); @@ -1162,8 +1162,8 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK) response.addToErrDetails(write_ops::WriteError( 0, Status(StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), ShardId(kShardName2)), "Stale error"))); diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp index eb07226621f..3681cd77b10 100644 --- a/src/mongo/s/write_ops/batch_write_op_test.cpp +++ b/src/mongo/s/write_ops/batch_write_op_test.cpp @@ -284,13 +284,13 @@ TEST_F(BatchWriteOpTest, SingleStaleError) { buildResponse(0, &response); OID epoch{OID::gen()}; Timestamp timestamp{1, 0}; - response.addToErrDetails( - write_ops::WriteError(0, - Status{StaleConfigInfo(nss, - ChunkVersion({epoch, timestamp}, {101, 200}), - ChunkVersion({epoch, timestamp}, {105, 200}), - ShardId("shard")), - "mock stale error"})); + response.addToErrDetails(write_ops::WriteError( + 0, + Status{StaleConfigInfo(nss, + ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})), + ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})), + ShardId("shard")), + "mock stale error"})); // First stale response comes back, we should retry batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp index 65017292f3c..e2a29e72751 100644 --- a/src/mongo/s/write_ops/batched_command_request.cpp +++ b/src/mongo/s/write_ops/batched_command_request.cpp @@ -47,7 +47,7 @@ BatchedCommandRequest constructBatchedCommandRequest(const OpMsgRequest& request auto shardVersionField = request.body[ShardVersion::kShardVersionField]; if (!shardVersionField.eoo()) { auto shardVersion = ShardVersion::parse(shardVersionField); - if (shardVersion == ChunkVersion::UNSHARDED()) { + if (shardVersion == ShardVersion::UNSHARDED()) { batchRequest.setDbVersion(DatabaseVersion(request.body)); } batchRequest.setShardVersion(shardVersion); diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp index 4d7acf32c22..c36f734a193 100644 --- a/src/mongo/s/write_ops/batched_command_response_test.cpp +++ b/src/mongo/s/write_ops/batched_command_response_test.cpp @@ -71,8 +71,8 @@ TEST(BatchedCommandResponseTest, StaleConfigInfo) { OID epoch = OID::gen(); StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"), - ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0}), - ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0}), + ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0})), + ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0})), ShardId("TestShard")); BSONObjBuilder builder(BSON("index" << 0 << "code" << ErrorCodes::StaleConfig << "errmsg" << "StaleConfig error")); diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp index 2d179b6593f..31476906b9b 100644 --- a/src/mongo/s/write_ops/write_op_test.cpp +++ b/src/mongo/s/write_ops/write_op_test.cpp @@ -228,8 +228,8 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) { write_ops::WriteError retryableError( 0, {StaleConfigInfo(kNss, - ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), - ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}), + ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})), + ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})), ShardId("shardA")), "simulate ssv error for test"}); writeOp.noteWriteError(*targeted[0], retryableError); @@ -329,7 +329,7 @@ TEST_F(WriteOpTest, RetrySingleOp) { // Stale exception write_ops::WriteError error( 0, - {StaleConfigInfo(kNss, ChunkVersion::IGNORED(), boost::none, ShardId("shard")), + {StaleConfigInfo(kNss, ShardVersion::IGNORED(), boost::none, ShardId("shard")), "some message"}); writeOp.noteWriteError(*targeted.front(), error); @@ -422,8 +422,8 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) { write_ops::WriteError retryableError( 0, {StaleConfigInfo(kNss, - ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), - ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}), + ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})), + ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})), ShardId("shardA")), "simulate ssv error for test"}); writeOp.noteWriteError(*targeted[0], retryableError); -- cgit v1.2.1