summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-09-01 11:02:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-01 11:32:04 +0000
commitce3443291cd663ae5375941f380d4bc52bc88a85 (patch)
treed6ffa445b5cdefeecbbf85cb8a5cc022810de781
parent5f11658224585f0399d57afa2fd8886165f854bf (diff)
downloadmongo-ce3443291cd663ae5375941f380d4bc52bc88a85.tar.gz
SERVER-69033 Remove ChunkVersion only constructor from ShardVersion
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_merge.cpp4
-rw-r--r--src/mongo/db/pipeline/process_interface/common_process_interface.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp15
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp15
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp5
-rw-r--r--src/mongo/db/s/chunk_operation_precondition_checks.cpp38
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp9
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp36
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp9
-rw-r--r--src/mongo/db/s/operation_sharding_state_test.cpp15
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp48
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp31
-rw-r--r--src/mongo/db/s/shard_key_util.cpp8
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp3
-rw-r--r--src/mongo/db/s/sharding_write_router_bm.cpp4
-rw-r--r--src/mongo/db/s/split_chunk.cpp16
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp3
-rw-r--r--src/mongo/s/catalog_cache_test.cpp7
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager.h5
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp24
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp18
-rw-r--r--src/mongo/s/cluster_ddl.cpp2
-rw-r--r--src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_create_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_split_vector_cmd.cpp2
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp4
-rw-r--r--src/mongo/s/query/cluster_find.cpp3
-rw-r--r--src/mongo/s/router.cpp3
-rw-r--r--src/mongo/s/shard_version.h5
-rw-r--r--src/mongo/s/stale_shard_version_helpers_test.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp377
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp102
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h6
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp14
-rw-r--r--src/mongo/s/write_ops/write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp83
47 files changed, 644 insertions, 397 deletions
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index 26eb3002a80..79b666c1956 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -176,7 +176,8 @@ TEST_F(DispatchShardPipelineTest, DispatchShardPipelineDoesNotRetryOnStaleConfig
Timestamp timestamp{1, 0};
return createErrorCursorResponse(
{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
@@ -220,7 +221,8 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
onCommand([&](const executor::RemoteCommandRequest& request) {
return createErrorCursorResponse(
{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp
index 63e2aa38bde..ed9c14b3bed 100644
--- a/src/mongo/db/pipeline/document_source_merge.cpp
+++ b/src/mongo/db/pipeline/document_source_merge.cpp
@@ -555,7 +555,9 @@ Value DocumentSourceMerge::serialize(boost::optional<ExplainOptions::Verbosity>
return mergeOnFields;
}());
spec.setTargetCollectionVersion(
- _targetCollectionVersion ? boost::make_optional(ShardVersion(*_targetCollectionVersion))
+ _targetCollectionVersion ? boost::make_optional(ShardVersion(
+ *_targetCollectionVersion,
+ CollectionIndexes(*_targetCollectionVersion, boost::none)))
: boost::none);
return Value(Document{{getSourceName(), spec.toBSON()}});
}
diff --git a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
index 8e56f390b13..3aa6d48c136 100644
--- a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
@@ -198,7 +198,9 @@ boost::optional<ShardVersion> CommonProcessInterface::refreshAndGetCollectionVer
->catalogCache()
->getCollectionRoutingInfoWithRefresh(expCtx->opCtx, nss));
- return cm.isSharded() ? boost::make_optional(ShardVersion(cm.getVersion())) : boost::none;
+ return cm.isSharded() ? boost::make_optional(ShardVersion(
+ cm.getVersion(), CollectionIndexes(cm.getVersion(), boost::none)))
+ : boost::none;
}
std::vector<FieldPath> CommonProcessInterface::_shardKeyToDocumentKeyFields(
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 13ddc42253d..c836400728b 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -206,14 +206,17 @@ std::vector<RemoteCursor> establishShardCursors(OperationContext* opCtx,
// The collection is sharded. Use the routing table to decide which shards to target
// based on the query and collation, and build versioned requests for them.
for (const auto& shardId : shardIds) {
- auto versionedCmdObj = appendShardVersion(cmdObj, cm->getVersion(shardId));
+ ChunkVersion placementVersion = cm->getVersion(shardId);
+ auto versionedCmdObj = appendShardVersion(
+ cmdObj,
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)));
requests.emplace_back(shardId, std::move(versionedCmdObj));
}
} else {
// The collection is unsharded. Target only the primary shard for the database.
// Don't append shard version info when contacting the config servers.
const auto cmdObjWithShardVersion = cm->dbPrimary() != ShardId::kConfigServerId
- ? appendShardVersion(cmdObj, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdObj, ShardVersion::UNSHARDED())
: cmdObj;
requests.emplace_back(cm->dbPrimary(),
appendDbVersionIfPresent(cmdObjWithShardVersion, cm->dbVersion()));
@@ -809,14 +812,16 @@ std::unique_ptr<Pipeline, PipelineDeleter> runPipelineDirectlyOnSingleShard(
auto versionedCmdObj = [&] {
if (cm.isSharded()) {
- return appendShardVersion(aggregation_request_helper::serializeToCommandObj(request),
- cm.getVersion(shardId));
+ ChunkVersion placementVersion = cm.getVersion(shardId);
+ return appendShardVersion(
+ aggregation_request_helper::serializeToCommandObj(request),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)));
} else {
// The collection is unsharded. Don't append shard version info when contacting the
// config servers.
const auto cmdObjWithShardVersion = (shardId != ShardId::kConfigServerId)
? appendShardVersion(aggregation_request_helper::serializeToCommandObj(request),
- ChunkVersion::UNSHARDED())
+ ShardVersion::UNSHARDED())
: aggregation_request_helper::serializeToCommandObj(request);
return appendDbVersionIfPresent(std::move(cmdObjWithShardVersion), cm.dbVersion());
}
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 2b136a89dc2..54f4b091fa2 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -163,7 +163,8 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
Timestamp timestamp{1, 0};
return createErrorCursorResponse(
Status{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
@@ -248,7 +249,8 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
Timestamp timestamp{1, 0};
return createErrorCursorResponse(
Status{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
@@ -341,7 +343,8 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
onCommand([&](const executor::RemoteCommandRequest& request) {
return createErrorCursorResponse(
Status{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
@@ -349,7 +352,8 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
onCommand([&](const executor::RemoteCommandRequest& request) {
return createErrorCursorResponse(
Status{StaleConfigInfo(kTestAggregateNss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"0"}),
"Mock error: shard version mismatch"});
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index 8b8cfcad412..05514d0a4c9 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -301,13 +301,14 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulRequestChunkDataSizeCommand) {
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkType chunk = makeChunk(0, kShardId0);
- auto futureResponse = _scheduler.requestDataSize(operationContext(),
- kNss,
- chunk.getShard(),
- chunk.getRange(),
- chunk.getVersion(),
- KeyPattern(BSON("x" << 1)),
- false /* issuedByRemoteUser */);
+ auto futureResponse = _scheduler.requestDataSize(
+ operationContext(),
+ kNss,
+ chunk.getShard(),
+ chunk.getRange(),
+ ShardVersion(chunk.getVersion(), CollectionIndexes(chunk.getVersion(), boost::none)),
+ KeyPattern(BSON("x" << 1)),
+ false /* issuedByRemoteUser */);
auto swReceivedDataSize = futureResponse.getNoThrow();
ASSERT_OK(swReceivedDataSize.getStatus());
auto receivedDataSize = swReceivedDataSize.getValue();
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index e1899a66eac..20e9e141ba9 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -61,11 +61,12 @@ const std::string kProgress("progress");
const std::string kNoPhase("none");
const std::string kRemainingChunksToProcess("remainingChunksToProcess");
-ChunkVersion getShardVersion(OperationContext* opCtx,
+ShardVersion getShardVersion(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss) {
auto cm = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
- return cm.getVersion(shardId);
+ const auto placementVersion = cm.getVersion(shardId);
+ return ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
}
std::vector<ChunkType> getCollectionChunks(OperationContext* opCtx, const CollectionType& coll) {
diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.cpp b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
index 483e6080e47..4ca7250b988 100644
--- a/src/mongo/db/s/chunk_operation_precondition_checks.cpp
+++ b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
@@ -64,11 +64,13 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
"The collection was not found locally even though it is marked as sharded.",
collection);
- const auto shardVersion = metadata.getShardVersion();
+ const auto placementVersion = metadata.getShardVersion();
+ const auto shardVersion =
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Collection " << nss.ns()
<< " has changed since operation was sent (sent epoch: " << expectedEpoch
@@ -78,10 +80,10 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Shard does not contain any chunks for collection.",
- shardVersion.majorVersion() > 0);
+ placementVersion.majorVersion() > 0);
return metadata;
}
@@ -92,11 +94,13 @@ void checkShardKeyPattern(OperationContext* opCtx,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
const auto& keyPattern = metadata.getKeyPattern();
- const auto shardVersion = metadata.getShardVersion();
+ const auto placementVersion = metadata.getShardVersion();
+ const auto shardVersion =
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "The range " << chunkRange.toString()
<< " is not valid for collection " << nss.ns() << " with key pattern "
@@ -109,12 +113,14 @@ void checkChunkMatchesRange(OperationContext* opCtx,
const CollectionMetadata& metadata,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto shardVersion = metadata.getShardVersion();
+ const auto placementVersion = metadata.getShardVersion();
+ const auto shardVersion =
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
ChunkType existingChunk;
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Range with bounds " << chunkRange.toString()
<< " is not owned by this shard.",
@@ -123,7 +129,7 @@ void checkChunkMatchesRange(OperationContext* opCtx,
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Chunk bounds " << chunkRange.toString() << " do not exist.",
existingChunk.getRange() == chunkRange);
@@ -134,12 +140,14 @@ void checkRangeWithinChunk(OperationContext* opCtx,
const CollectionMetadata& metadata,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto shardVersion = metadata.getShardVersion();
+ const auto placementVersion = metadata.getShardVersion();
+ const auto shardVersion =
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
ChunkType existingChunk;
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Range with bounds " << chunkRange.toString()
<< " is not contained within a chunk owned by this shard.",
@@ -152,14 +160,16 @@ void checkRangeOwnership(OperationContext* opCtx,
const CollectionMetadata& metadata,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto shardVersion = metadata.getShardVersion();
+ const auto placementVersion = metadata.getShardVersion();
+ const auto shardVersion =
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none));
ChunkType existingChunk;
BSONObj minKey = chunkRange.getMin();
do {
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Range with bounds " << chunkRange.toString()
<< " is not owned by this shard.",
@@ -170,7 +180,7 @@ void checkRangeOwnership(OperationContext* opCtx,
uassert(
StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(shardVersion) /* wantedVersion */,
+ shardVersion /* wantedVersion */,
shardId),
str::stream() << "Shard does not contain a sequence of chunks that exactly fills the range "
<< chunkRange.toString(),
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 497bcba54cf..cb3f647bbbb 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -151,7 +151,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto* const css = CollectionShardingState::get(operationContext(), kNss);
testFilterFn(css->getOwnershipFilter(
@@ -180,7 +181,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto* const css = CollectionShardingState::get(operationContext(), kNss);
testFilterFn(css->getOwnershipFilter(
@@ -217,7 +219,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto* const css = CollectionShardingState::get(operationContext(), kNss);
testFilterFn(css->getOwnershipFilter(
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index df80079487c..a01b9528cc3 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -388,7 +388,8 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt(
const auto& currentMetadata = optCurrentMetadata->get();
const auto wantedPlacementVersion = currentMetadata.getShardVersion();
- const auto wantedShardVersion = ShardVersion(wantedPlacementVersion);
+ const auto wantedShardVersion = ShardVersion(
+ wantedPlacementVersion, CollectionIndexes(wantedPlacementVersion, boost::none));
const ChunkVersion receivedPlacementVersion = receivedShardVersion;
if (wantedShardVersion.isWriteCompatibleWith(receivedShardVersion) ||
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index 32a32d3f377..c38f1ccb354 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -90,10 +90,12 @@ TEST_F(CollectionShardingRuntimeTest,
CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor());
ASSERT_FALSE(csr.getCollectionDescription(opCtx).isSharded());
auto metadata = makeShardedMetadata(opCtx);
- ScopedSetShardRole scopedSetShardRole{opCtx,
- kTestNss,
- ShardVersion(metadata.getShardVersion()),
- boost::none /* databaseVersion */};
+ ScopedSetShardRole scopedSetShardRole{
+ opCtx,
+ kTestNss,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)),
+ boost::none /* databaseVersion */};
ASSERT_THROWS_CODE(csr.getCollectionDescription(opCtx), DBException, ErrorCodes::StaleConfig);
}
@@ -111,10 +113,12 @@ TEST_F(CollectionShardingRuntimeTest,
OperationContext* opCtx = operationContext();
auto metadata = makeShardedMetadata(opCtx);
csr.setFilteringMetadata(opCtx, metadata);
- ScopedSetShardRole scopedSetShardRole{opCtx,
- kTestNss,
- ShardVersion(metadata.getShardVersion()),
- boost::none /* databaseVersion */};
+ ScopedSetShardRole scopedSetShardRole{
+ opCtx,
+ kTestNss,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)),
+ boost::none /* databaseVersion */};
ASSERT_TRUE(csr.getCollectionDescription(opCtx).isSharded());
}
@@ -177,10 +181,12 @@ TEST_F(CollectionShardingRuntimeTest,
OperationContext* opCtx = operationContext();
auto metadata = makeShardedMetadata(opCtx);
csr.setFilteringMetadata(opCtx, metadata);
- ScopedSetShardRole scopedSetShardRole{opCtx,
- kTestNss,
- ShardVersion(metadata.getShardVersion()),
- boost::none /* databaseVersion */};
+ ScopedSetShardRole scopedSetShardRole{
+ opCtx,
+ kTestNss,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)),
+ boost::none /* databaseVersion */};
ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 1);
// Set it again with a different metadata object (UUID is generated randomly in
@@ -220,11 +226,13 @@ TEST_F(CollectionShardingRuntimeTest, ReturnUnshardedMetadataInServerlessMode) {
ASSERT_FALSE(csr.getCollectionDescription(opCtx).isSharded());
// Enable sharding state and set shard version on the OSS for logical session nss.
+ CollectionGeneration gen{OID::gen(), Timestamp(1, 1)};
ScopedSetShardRole scopedSetShardRole2{
opCtx,
NamespaceString::kLogicalSessionsNamespace,
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0})), /* shardVersion */
- boost::none /* databaseVersion */
+ ShardVersion(ChunkVersion(gen, {1, 0}),
+ CollectionIndexes(gen, boost::none)), /* shardVersion */
+ boost::none /* databaseVersion */
};
CollectionShardingRuntime csrLogicalSession(
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 43247243663..b312b54c8bb 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -840,7 +840,9 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx) {
"numInitialChunks"_attr = _initialChunks->chunks.size(),
"initialCollectionVersion"_attr = _initialChunks->collVersion());
- auto result = CreateCollectionResponse(_initialChunks->chunks.back().getVersion());
+ const auto placementVersion = _initialChunks->chunks.back().getVersion();
+ auto result = CreateCollectionResponse(
+ {placementVersion, CollectionIndexes(placementVersion, boost::none)});
result.setCollectionUUID(_collectionUUID);
_result = std::move(result);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 415a0fb1042..f74337c7b8e 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -786,7 +786,9 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC
auto cmd = nssOrUUID.nss() ? BSON("listIndexes" << nssOrUUID.nss()->coll())
: BSON("listIndexes" << *nssOrUUID.uuid());
if (cm) {
- cmd = appendShardVersion(cmd, cm->getVersion(fromShardId));
+ ChunkVersion placementVersion = cm->getVersion(fromShardId);
+ cmd = appendShardVersion(
+ cmd, ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)));
}
if (afterClusterTime) {
cmd = cmd.addFields(makeLocalReadConcernWithAfterClusterTime(*afterClusterTime));
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index 628a62ae180..fefe6006dbc 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -109,7 +109,8 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithoutIdInShardKey) {
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kTestNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
// The order of fields in `doc` deliberately does not match the shard key
@@ -137,7 +138,8 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdInShardKey) {
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kTestNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
// The order of fields in `doc` deliberately does not match the shard key
@@ -165,7 +167,8 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdHashInShardKey) {
ScopedSetShardRole scopedSetShardRole{
operationContext(),
kTestNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto doc = BSON("key2" << true << "_id"
diff --git a/src/mongo/db/s/operation_sharding_state_test.cpp b/src/mongo/db/s/operation_sharding_state_test.cpp
index 837a65c64dc..28275a27007 100644
--- a/src/mongo/db/s/operation_sharding_state_test.cpp
+++ b/src/mongo/db/s/operation_sharding_state_test.cpp
@@ -47,7 +47,8 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleDbVersion) {
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleShardVersion) {
- ShardVersion shardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {1, 0}));
+ CollectionGeneration gen(OID::gen(), Timestamp(1, 0));
+ ShardVersion shardVersion({gen, {1, 0}}, {gen, boost::none});
ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, shardVersion, boost::none);
auto& oss = OperationShardingState::get(operationContext());
@@ -58,13 +59,15 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
auto& oss = OperationShardingState::get(operationContext());
{
- ShardVersion shardVersion1(ChunkVersion({OID::gen(), Timestamp(10, 0)}, {1, 0}));
+ CollectionGeneration gen1(OID::gen(), Timestamp(10, 0));
+ ShardVersion shardVersion1({gen1, {1, 0}}, {gen1, boost::none});
ScopedSetShardRole scopedSetShardRole1(
operationContext(), kNss, shardVersion1, boost::none);
ASSERT_EQ(shardVersion1, *oss.getShardVersion(kNss));
}
{
- ShardVersion shardVersion2(ChunkVersion({OID::gen(), Timestamp(20, 0)}, {1, 0}));
+ CollectionGeneration gen2(OID::gen(), Timestamp(20, 0));
+ ShardVersion shardVersion2({gen2, {1, 0}}, {gen2, boost::none});
ScopedSetShardRole scopedSetShardRole2(
operationContext(), kNss, shardVersion2, boost::none);
ASSERT_EQ(shardVersion2, *oss.getShardVersion(kNss));
@@ -72,8 +75,10 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleRecursiveShardVersionDifferentNamespaces) {
- ShardVersion shardVersion1(ChunkVersion({OID::gen(), Timestamp(10, 0)}, {1, 0}));
- ShardVersion shardVersion2(ChunkVersion({OID::gen(), Timestamp(20, 0)}, {1, 0}));
+ CollectionGeneration gen1(OID::gen(), Timestamp(10, 0));
+ CollectionGeneration gen2(OID::gen(), Timestamp(20, 0));
+ ShardVersion shardVersion1({gen1, {1, 0}}, {gen1, boost::none});
+ ShardVersion shardVersion2({gen2, {1, 0}}, {gen2, boost::none});
ScopedSetShardRole scopedSetShardRole1(operationContext(), kNss, shardVersion1, boost::none);
ScopedSetShardRole scopedSetShardRole2(
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 62dd66dceb3..e1dc8fea867 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -380,29 +380,31 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
sharding_ddl_util::sendAuthenticatedCommandToShards(
opCtx, fromNss.db(), cmdObj.addFields(osi.toBSON()), participants, **executor);
}))
- .then(_executePhase(Phase::kSetResponse,
- [this, anchor = shared_from_this()] {
- auto opCtxHolder = cc().makeOperationContext();
- auto* opCtx = opCtxHolder.get();
- getForwardableOpMetadata().setOn(opCtx);
-
- // Retrieve the new collection version
- const auto catalog = Grid::get(opCtx)->catalogCache();
- const auto cm =
- uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(
- opCtx, _request.getTo()));
- _response = RenameCollectionResponse(
- cm.isSharded() ? cm.getVersion() : ChunkVersion::UNSHARDED());
-
- ShardingLogging::get(opCtx)->logChange(
- opCtx,
- "renameCollection.end",
- nss().ns(),
- BSON("source" << nss().toString() << "destination"
- << _request.getTo().toString()),
- ShardingCatalogClient::kMajorityWriteConcern);
- LOGV2(5460504, "Collection renamed", "namespace"_attr = nss());
- }))
+ .then(_executePhase(
+ Phase::kSetResponse,
+ [this, anchor = shared_from_this()] {
+ auto opCtxHolder = cc().makeOperationContext();
+ auto* opCtx = opCtxHolder.get();
+ getForwardableOpMetadata().setOn(opCtx);
+
+ // Retrieve the new collection version
+ const auto catalog = Grid::get(opCtx)->catalogCache();
+ const auto cm = uassertStatusOK(
+ catalog->getCollectionRoutingInfoWithRefresh(opCtx, _request.getTo()));
+ _response = RenameCollectionResponse(
+ cm.isSharded() ? ShardVersion(cm.getVersion(),
+ CollectionIndexes(cm.getVersion(), boost::none))
+ : ShardVersion::UNSHARDED());
+
+ ShardingLogging::get(opCtx)->logChange(
+ opCtx,
+ "renameCollection.end",
+ nss().ns(),
+ BSON("source" << nss().toString() << "destination"
+ << _request.getTo().toString()),
+ ShardingCatalogClient::kMajorityWriteConcern);
+ LOGV2(5460504, "Collection renamed", "namespace"_attr = nss());
+ }))
.onError([this, anchor = shared_from_this()](const Status& status) {
if (!status.isA<ErrorCategory::NotPrimaryError>() &&
!status.isA<ErrorCategory::ShutdownError>()) {
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 706fa9300c7..2b309d7a4d9 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -197,7 +197,8 @@ protected:
ReshardingEnv env(CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, kNss).value());
env.destShard = kShardList[1].getName();
- env.version = ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}));
+ CollectionGeneration gen(OID::gen(), Timestamp(1, 1));
+ env.version = ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none));
env.tempNss =
NamespaceString(kNss.db(),
fmt::format("{}{}",
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index 67e89ec8188..88f81e4326c 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -271,7 +271,9 @@ protected:
ScopedSetShardRole scopedSetShardRole{
opCtx,
sourceNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(
+ metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto csr = CollectionShardingRuntime::get(opCtx, sourceNss);
@@ -353,7 +355,8 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest, ConstructDonorDocumentFromRe
ScopedSetShardRole scopedSetShardRole{
opCtx,
kOriginalNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -373,7 +376,8 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest,
ScopedSetShardRole scopedSetShardRole{
opCtx,
kTemporaryReshardingNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -392,7 +396,8 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateDonorServiceInstance) {
ScopedSetShardRole scopedSetShardRole{
opCtx,
kOriginalNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -419,7 +424,8 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateRecipientServiceInstance) {
ScopedSetShardRole scopedSetShardRole{
opCtx,
kTemporaryReshardingNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -447,7 +453,8 @@ TEST_F(ReshardingDonorRecipientCommonTest,
ScopedSetShardRole scopedSetShardRole{
opCtx,
kOriginalNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -469,7 +476,8 @@ TEST_F(ReshardingDonorRecipientCommonTest,
ScopedSetShardRole scopedSetShardRole{
opCtx,
kTemporaryReshardingNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -497,7 +505,8 @@ TEST_F(ReshardingDonorRecipientCommonTest, ProcessDonorFieldsWhenShardDoesntOwnA
ScopedSetShardRole scopedSetShardRole{
opCtx,
kOriginalNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -522,7 +531,8 @@ TEST_F(ReshardingDonorRecipientCommonTest, ProcessRecipientFieldsWhenShardDoesnt
ScopedSetShardRole scopedSetShardRole{
opCtx,
kTemporaryReshardingNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
@@ -548,7 +558,8 @@ TEST_F(ReshardingDonorRecipientCommonTest, ProcessReshardingFieldsWithoutDonorOr
ScopedSetShardRole scopedSetShardRole{
opCtx,
kTemporaryReshardingNss,
- ShardVersion(metadata.getShardVersion()) /* shardVersion */,
+ ShardVersion(metadata.getShardVersion(),
+ CollectionIndexes(metadata.getShardVersion(), boost::none)) /* shardVersion */,
boost::none /* databaseVersion */};
auto reshardingFields =
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 2bacf4b94c5..95283ba0049 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -321,11 +321,14 @@ ValidationBehaviorsRefineShardKey::ValidationBehaviorsRefineShardKey(OperationCo
std::vector<BSONObj> ValidationBehaviorsRefineShardKey::loadIndexes(
const NamespaceString& nss) const {
+ ChunkVersion placementVersion = _cm.getVersion(_indexShard->getId());
auto indexesRes = _indexShard->runExhaustiveCursorCommand(
_opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
nss.db().toString(),
- appendShardVersion(BSON("listIndexes" << nss.coll()), _cm.getVersion(_indexShard->getId())),
+ appendShardVersion(
+ BSON("listIndexes" << nss.coll()),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none))),
Milliseconds(-1));
if (indexesRes.getStatus().code() != ErrorCodes::NamespaceNotFound) {
return uassertStatusOK(indexesRes).docs;
@@ -335,13 +338,14 @@ std::vector<BSONObj> ValidationBehaviorsRefineShardKey::loadIndexes(
void ValidationBehaviorsRefineShardKey::verifyUsefulNonMultiKeyIndex(
const NamespaceString& nss, const BSONObj& proposedKey) const {
+ ChunkVersion placementVersion = _cm.getVersion(_indexShard->getId());
auto checkShardingIndexRes = uassertStatusOK(_indexShard->runCommand(
_opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
appendShardVersion(
BSON(kCheckShardingIndexCmdName << nss.ns() << kKeyPatternField << proposedKey),
- _cm.getVersion(_indexShard->getId())),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none))),
Shard::RetryPolicy::kIdempotent));
if (checkShardingIndexRes.commandStatus == ErrorCodes::UnknownError) {
// CheckShardingIndex returns UnknownError if a compatible shard key index cannot be found,
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index b01a264077d..626e9e7624d 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -522,7 +522,8 @@ boost::optional<CreateCollectionResponse> checkIfCollectionAlreadySharded(
SimpleBSONObjComparator::kInstance.evaluate(defaultCollator == collation) &&
cm.isUnique() == unique);
- CreateCollectionResponse response(cm.getVersion());
+ CreateCollectionResponse response(
+ {cm.getVersion(), CollectionIndexes(cm.getVersion(), boost::none)});
response.setCollectionUUID(cm.getUUID());
return response;
}
diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp
index ce28ff604c5..08bf6c52a54 100644
--- a/src/mongo/db/s/sharding_write_router_bm.cpp
+++ b/src/mongo/db/s/sharding_write_router_bm.cpp
@@ -147,10 +147,12 @@ std::unique_ptr<CatalogCacheMock> createCatalogCacheMock(OperationContext* opCtx
opCtx->getServiceContext(),
std::make_unique<CollectionShardingStateFactoryShard>(opCtx->getServiceContext()));
+ const ChunkVersion placementVersion = chunkManager.getVersion(originatorShard);
OperationShardingState::setShardRole(
opCtx,
kNss,
- ShardVersion(chunkManager.getVersion(originatorShard)) /* shardVersion */,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none)) /* shardVersion */,
boost::none /* databaseVersion */);
// Configuring the filtering metadata such that calls to getCollectionDescription return what we
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 3ee8a2e808f..5cdec99ab46 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -120,15 +120,17 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
shardId),
str::stream() << "Collection " << nss.ns() << " is not sharded",
metadataAfterSplit->isSharded());
- const auto epoch = metadataAfterSplit->getShardVersion().epoch();
- uassert(StaleConfigInfo(nss,
- ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(metadataAfterSplit->getShardVersion()) /* wantedVersion */,
- shardId),
+ const auto placementVersion = metadataAfterSplit->getShardVersion();
+ const auto epoch = placementVersion.epoch();
+ uassert(StaleConfigInfo(
+ nss,
+ ShardVersion::IGNORED() /* receivedVersion */,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none)) /* wantedVersion */,
+ shardId),
str::stream() << "Collection " << nss.ns() << " changed since split start",
epoch == expectedEpoch &&
- (!expectedTimestamp ||
- metadataAfterSplit->getShardVersion().getTimestamp() == expectedTimestamp));
+ (!expectedTimestamp || placementVersion.getTimestamp() == expectedTimestamp));
ChunkType nextChunk;
for (auto it = splitPoints.begin(); it != splitPoints.end(); ++it) {
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index 99c7387724e..547d4e37c52 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -197,7 +197,8 @@ protected:
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
return StaleConfigInfo(NamespaceString("Foo.Bar"),
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"dummy"});
}(),
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index c68dc4b0053..d13273f005c 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -259,8 +259,9 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
+ const CollectionGeneration gen(OID::gen(), Timestamp(1, 1));
const auto cachedCollVersion =
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}));
+ ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none));
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -285,8 +286,8 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
- const auto wantedCollVersion = ShardVersion(
- ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0}));
+ const auto wantedCollVersion = ShardVersion(ChunkVersion(cachedCollVersion, {2, 0}),
+ CollectionIndexes(cachedCollVersion, boost::none));
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 31de7493b57..f44c58c8760 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -930,15 +930,15 @@ bool ComparableChunkVersion::operator<(const ComparableChunkVersion& other) cons
}
ShardEndpoint::ShardEndpoint(const ShardId& shardName,
- boost::optional<ChunkVersion> shardVersion,
+ boost::optional<ShardVersion> shardVersion,
boost::optional<DatabaseVersion> dbVersion)
: shardName(shardName),
shardVersion(std::move(shardVersion)),
databaseVersion(std::move(dbVersion)) {
if (databaseVersion)
- invariant(shardVersion && *shardVersion == ChunkVersion::UNSHARDED());
+ invariant(shardVersion && *shardVersion == ShardVersion::UNSHARDED());
else if (shardVersion)
- invariant(*shardVersion != ChunkVersion::UNSHARDED());
+ invariant(*shardVersion != ShardVersion::UNSHARDED());
else
invariant(shardName == ShardId::kConfigServerId);
}
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index e8f1b1c860d..595ad25cfff 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -40,6 +40,7 @@
#include "mongo/s/database_version.h"
#include "mongo/s/resharding/type_collection_fields_gen.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/s/shard_version.h"
#include "mongo/s/type_collection_common_types_gen.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/ticketholder.h"
@@ -471,12 +472,12 @@ using RoutingTableHistoryValueHandle = RoutingTableHistoryCache::ValueHandle;
*/
struct ShardEndpoint {
ShardEndpoint(const ShardId& shardName,
- boost::optional<ChunkVersion> shardVersion,
+ boost::optional<ShardVersion> shardVersion,
boost::optional<DatabaseVersion> dbVersion);
ShardId shardName;
- boost::optional<ChunkVersion> shardVersion;
+ boost::optional<ShardVersion> shardVersion;
boost::optional<DatabaseVersion> databaseVersion;
};
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 5d964f74e51..aee5f0da601 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -362,7 +362,7 @@ ShardEndpoint ChunkManagerTargeter::targetInsert(OperationContext* opCtx,
// in commands
return ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion());
}
@@ -390,7 +390,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext*
// shardVersion in commands
return std::vector{ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion())};
}
@@ -581,7 +581,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
// shardVersion in commands
return std::vector{ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion())};
}
@@ -594,7 +594,11 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
std::vector<ShardEndpoint> endpoints;
for (auto&& shardId : shardIds) {
- endpoints.emplace_back(std::move(shardId), _cm.getVersion(shardId), boost::none);
+ const auto placementVersion = _cm.getVersion(shardId);
+ endpoints.emplace_back(
+ std::move(shardId),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
}
return endpoints;
@@ -604,7 +608,11 @@ StatusWith<ShardEndpoint> ChunkManagerTargeter::_targetShardKey(const BSONObj& s
const BSONObj& collation) const {
try {
auto chunk = _cm.findIntersectingChunk(shardKey, collation);
- return ShardEndpoint(chunk.getShardId(), _cm.getVersion(chunk.getShardId()), boost::none);
+ const auto placementVersion = _cm.getVersion(chunk.getShardId());
+ return ShardEndpoint(
+ chunk.getShardId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -620,7 +628,11 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContex
std::vector<ShardEndpoint> endpoints;
for (auto&& shardId : shardIds) {
- endpoints.emplace_back(std::move(shardId), _cm.getVersion(shardId), boost::none);
+ const auto placementVersion = _cm.getVersion(shardId);
+ endpoints.emplace_back(
+ std::move(shardId),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
}
return endpoints;
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 6f3a52b7b1a..663e51cad92 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -154,7 +154,7 @@ std::vector<AsyncRequestsSender::Request> buildVersionedRequestsForTargetedShard
// Attach shardVersion "UNSHARDED", unless targeting the config server.
const auto cmdObjWithShardVersion = (primaryShardId != ShardId::kConfigServerId)
- ? appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdToSend, ShardVersion::UNSHARDED())
: cmdToSend;
return std::vector<AsyncRequestsSender::Request>{AsyncRequestsSender::Request(
@@ -177,7 +177,12 @@ std::vector<AsyncRequestsSender::Request> buildVersionedRequestsForTargetedShard
for (const ShardId& shardId : shardIds) {
if (shardsToSkip.find(shardId) == shardsToSkip.end()) {
- requests.emplace_back(shardId, appendShardVersion(cmdToSend, cm.getVersion(shardId)));
+ ChunkVersion placementVersion = cm.getVersion(shardId);
+ requests.emplace_back(
+ shardId,
+ appendShardVersion(cmdToSend,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none))));
}
}
@@ -438,7 +443,7 @@ AsyncRequestsSender::Response executeCommandAgainstDatabasePrimary(
Shard::RetryPolicy retryPolicy) {
// Attach shardVersion "UNSHARDED", unless targeting the config server.
const auto cmdObjWithShardVersion = (dbInfo->getPrimary() != ShardId::kConfigServerId)
- ? appendShardVersion(cmdObj, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdObj, ShardVersion::UNSHARDED())
: cmdObj;
auto responses =
@@ -699,14 +704,17 @@ StatusWith<Shard::QueryResponse> loadIndexesFromAuthoritativeShard(OperationCont
// For a sharded collection we must load indexes from a shard with chunks. For
// consistency with cluster listIndexes, load from the shard that owns the minKey chunk.
const auto minKeyShardId = cm.getMinKeyShardIdWithSimpleCollation();
+ ChunkVersion placementVersion = cm.getVersion(minKeyShardId);
return {
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, minKeyShardId)),
- appendShardVersion(cmdNoVersion, cm.getVersion(minKeyShardId))};
+ appendShardVersion(cmdNoVersion,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none)))};
} else {
// For an unsharded collection, the primary shard will have correct indexes. We attach
// unsharded shard version to detect if the collection has become sharded.
const auto cmdObjWithShardVersion = (cm.dbPrimary() != ShardId::kConfigServerId)
- ? appendShardVersion(cmdNoVersion, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdNoVersion, ShardVersion::UNSHARDED())
: cmdNoVersion;
return {
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary())),
diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp
index 99d186fef90..1c731656906 100644
--- a/src/mongo/s/cluster_ddl.cpp
+++ b/src/mongo/s/cluster_ddl.cpp
@@ -45,7 +45,7 @@ namespace {
std::vector<AsyncRequestsSender::Request> buildUnshardedRequestsForAllShards(
OperationContext* opCtx, std::vector<ShardId> shardIds, const BSONObj& cmdObj) {
auto cmdToSend = cmdObj;
- appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED());
+ appendShardVersion(cmdToSend, ShardVersion::UNSHARDED());
std::vector<AsyncRequestsSender::Request> requests;
for (auto&& shardId : shardIds)
diff --git a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
index 70b5192d6c1..0b8be3b008f 100644
--- a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
+++ b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
@@ -79,9 +79,12 @@ public:
auto cmdObj = CommandHelpers::filterCommandRequestForPassthrough(request().toBSON({}));
if (cm.isSharded()) {
- cmdObj = appendShardVersion(cmdObj, cm.getVersion(shardId));
+ cmdObj = appendShardVersion(
+ cmdObj,
+ ShardVersion(cm.getVersion(shardId),
+ CollectionIndexes(cm.getVersion(shardId), boost::none)));
} else {
- cmdObj = appendShardVersion(cmdObj, ChunkVersion::UNSHARDED());
+ cmdObj = appendShardVersion(cmdObj, ShardVersion::UNSHARDED());
cmdObj = appendDbVersionIfPresent(cmdObj, dbInfo->getVersion());
}
diff --git a/src/mongo/s/commands/cluster_create_cmd.cpp b/src/mongo/s/commands/cluster_create_cmd.cpp
index 35ce98ad22a..f73ce1356a0 100644
--- a/src/mongo/s/commands/cluster_create_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_cmd.cpp
@@ -141,7 +141,7 @@ public:
// parallel.
// If the DB primary is hosted by the config server, apply the original metadata.
if (dbInfo->getPrimary() != ShardId::kConfigServerId) {
- cmdToSend = appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED());
+ cmdToSend = appendShardVersion(cmdToSend, ShardVersion::UNSHARDED());
}
cmdToSend = appendDbVersionIfPresent(cmdToSend, dbInfo);
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index d01690787ee..136c30a72f7 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -419,17 +419,19 @@ public:
BSONObjBuilder bob;
if (cm.isSharded()) {
- _runCommand(opCtx,
- shard->getId(),
- cm.getVersion(shard->getId()),
- boost::none,
- nss,
- applyReadWriteConcern(opCtx, false, false, explainCmd),
- &bob);
+ ChunkVersion placementVersion = cm.getVersion(shard->getId());
+ _runCommand(
+ opCtx,
+ shard->getId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none,
+ nss,
+ applyReadWriteConcern(opCtx, false, false, explainCmd),
+ &bob);
} else {
_runCommand(opCtx,
shard->getId(),
- boost::make_optional(!cm.dbVersion().isFixed(), ChunkVersion::UNSHARDED()),
+ boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()),
cm.dbVersion(),
nss,
applyReadWriteConcern(opCtx, false, false, explainCmd),
@@ -496,18 +498,19 @@ public:
// This means that we always assume that a findAndModify request using _id is targetable
// to a single shard.
auto chunk = cm.findIntersectingChunk(shardKey, collation, true);
-
- _runCommand(opCtx,
- chunk.getShardId(),
- cm.getVersion(chunk.getShardId()),
- boost::none,
- nss,
- applyReadWriteConcern(opCtx, this, cmdObjForShard),
- &result);
+ ChunkVersion placementVersion = cm.getVersion(chunk.getShardId());
+ _runCommand(
+ opCtx,
+ chunk.getShardId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none,
+ nss,
+ applyReadWriteConcern(opCtx, this, cmdObjForShard),
+ &result);
} else {
_runCommand(opCtx,
cm.dbPrimary(),
- boost::make_optional(!cm.dbVersion().isFixed(), ChunkVersion::UNSHARDED()),
+ boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()),
cm.dbVersion(),
nss,
applyReadWriteConcern(opCtx, this, cmdObjForShard),
@@ -531,7 +534,7 @@ private:
static void _runCommand(OperationContext* opCtx,
const ShardId& shardId,
- const boost::optional<ChunkVersion>& shardVersion,
+ const boost::optional<ShardVersion>& shardVersion,
const boost::optional<DatabaseVersion>& dbVersion,
const NamespaceString& nss,
const BSONObj& cmdObj,
@@ -640,7 +643,7 @@ private:
static void _handleWouldChangeOwningShardErrorRetryableWriteLegacy(
OperationContext* opCtx,
const ShardId& shardId,
- const boost::optional<ChunkVersion>& shardVersion,
+ const boost::optional<ShardVersion>& shardVersion,
const boost::optional<DatabaseVersion>& dbVersion,
const NamespaceString& nss,
const BSONObj& cmdObj,
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 6bfabdb5b76..2b8cf4a8171 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -249,14 +249,16 @@ public:
// specified in the split command through the "middle" parameter, choose "middle" as the
// splitPoint. Otherwise use the splitVector command with 'force' to ask the shard for the
// middle of the chunk.
+ const auto placementVersion = cm.getVersion(chunk->getShardId());
const BSONObj splitPoint = !middle.isEmpty()
? middle
- : selectMedianKey(opCtx,
- chunk->getShardId(),
- nss,
- cm.getShardKeyPattern(),
- cm.getVersion(chunk->getShardId()),
- ChunkRange(chunk->getMin(), chunk->getMax()));
+ : selectMedianKey(
+ opCtx,
+ chunk->getShardId(),
+ nss,
+ cm.getShardKeyPattern(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ ChunkRange(chunk->getMin(), chunk->getMax()));
LOGV2(22758,
"Splitting chunk {chunkRange} in {namespace} on shard {shardId} at key {splitPoint}",
diff --git a/src/mongo/s/commands/cluster_split_vector_cmd.cpp b/src/mongo/s/commands/cluster_split_vector_cmd.cpp
index 4b0e66418b5..54eb9b6a1fa 100644
--- a/src/mongo/s/commands/cluster_split_vector_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_vector_cmd.cpp
@@ -88,7 +88,7 @@ public:
BSONObj filteredCmdObj(applyReadWriteConcern(
opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)));
BSONObj filteredCmdObjWithVersion(
- appendShardVersion(filteredCmdObj, ChunkVersion::UNSHARDED()));
+ appendShardVersion(filteredCmdObj, ShardVersion::UNSHARDED()));
auto shard =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary()));
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index eb49ea8e422..87a75bd8850 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -154,7 +154,7 @@ BSONObj createCommandForMergingShard(Document serializedCommand,
// Attach the IGNORED chunk version to the command. On the shard, this will skip the actual
// version check but will nonetheless mark the operation as versioned.
- auto mergeCmdObj = appendShardVersion(mergeCmd.freeze().toBson(), ChunkVersion::IGNORED());
+ auto mergeCmdObj = appendShardVersion(mergeCmd.freeze().toBson(), ShardVersion::IGNORED());
// Attach the read and write concerns if needed, and return the final command object.
return applyReadWriteConcern(mergeCtx->opCtx,
@@ -801,7 +801,7 @@ Status runPipelineOnSpecificShardOnly(const boost::intrusive_ptr<ExpressionConte
overrideBatchSize);
if (!forPerShardCursor && shardId != ShardId::kConfigServerId) {
- cmdObj = appendShardVersion(std::move(cmdObj), ChunkVersion::UNSHARDED());
+ cmdObj = appendShardVersion(std::move(cmdObj), ShardVersion::UNSHARDED());
}
if (!forPerShardCursor) {
// Unless this is a per shard cursor, we need to send shard version info.
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index bb55dc6ccc6..6ccadf2e927 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -186,7 +186,8 @@ std::vector<std::pair<ShardId, BSONObj>> constructRequestsForShards(
findCommandToForward->serialize(BSONObj(), &cmdBuilder);
if (cm.isSharded()) {
- ShardVersion(cm.getVersion(shardId))
+ const auto placementVersion = cm.getVersion(shardId);
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none))
.serialize(ShardVersion::kShardVersionField, &cmdBuilder);
} else if (!query.nss().isOnInternalDb()) {
ShardVersion::UNSHARDED().serialize(ShardVersion::kShardVersionField, &cmdBuilder);
diff --git a/src/mongo/s/router.cpp b/src/mongo/s/router.cpp
index ed4b474e8f1..52032f16ba4 100644
--- a/src/mongo/s/router.cpp
+++ b/src/mongo/s/router.cpp
@@ -115,7 +115,8 @@ void CollectionRouter::appendCRUDRoutingTokenToCommand(const ShardId& shardId,
dbVersion.serialize(&dbvBuilder);
}
}
- ShardVersion(chunkVersion).serialize(ShardVersion::kShardVersionField, builder);
+ ShardVersion(chunkVersion, CollectionIndexes(chunkVersion, boost::none))
+ .serialize(ShardVersion::kShardVersionField, builder);
}
ChunkManager CollectionRouter::_getRoutingInfo(OperationContext* opCtx) const {
diff --git a/src/mongo/s/shard_version.h b/src/mongo/s/shard_version.h
index 3062be5b9b7..fbe33785d4e 100644
--- a/src/mongo/s/shard_version.h
+++ b/src/mongo/s/shard_version.h
@@ -50,11 +50,6 @@ public:
ShardVersion(ChunkVersion chunkVersion, CollectionIndexes indexVersion);
- ShardVersion(ChunkVersion chunkVersion)
- : CollectionGeneration(chunkVersion.epoch(), chunkVersion.getTimestamp()),
- ChunkVersion(chunkVersion),
- CollectionIndexes({chunkVersion.epoch(), chunkVersion.getTimestamp()}, boost::none) {}
-
ShardVersion() : ShardVersion(ChunkVersion(), CollectionIndexes()) {}
static ShardVersion IGNORED() {
diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp
index 030a3d21213..7485dc87148 100644
--- a/src/mongo/s/stale_shard_version_helpers_test.cpp
+++ b/src/mongo/s/stale_shard_version_helpers_test.cpp
@@ -93,11 +93,14 @@ TEST_F(AsyncShardVersionRetry, LimitedStaleErrorsShouldReturnCorrectValue) {
auto future = shardVersionRetry(
service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) {
if (++tries < 5) {
- uassert(StaleConfigInfo(
- nss(),
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23})),
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99})),
- ShardId("sB")),
+ const CollectionGeneration gen1(OID::gen(), Timestamp(1, 0));
+ const CollectionGeneration gen2(OID::gen(), Timestamp(1, 0));
+ uassert(StaleConfigInfo(nss(),
+ ShardVersion(ChunkVersion(gen1, {5, 23}),
+ CollectionIndexes(gen1, boost::none)),
+ ShardVersion(ChunkVersion(gen2, {6, 99}),
+ CollectionIndexes(gen2, boost::none)),
+ ShardId("sB")),
"testX",
false);
}
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index e8aa07b6f30..9c01ab18b4a 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -92,8 +92,10 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss,
staleResponse.addToErrDetails(write_ops::WriteError(
i,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName1)),
"Stale error")));
++i;
@@ -330,10 +332,12 @@ public:
const NamespaceString nss{"foo.bar"};
+ const CollectionGeneration gen{OID::gen(), Timestamp(1, 1)};
MockNSTargeter singleShardNSTargeter{
nss,
{MockRange(ShardEndpoint(kShardName1,
- ChunkVersion({OID::gen(), Timestamp(1, 1)}, {100, 200}),
+ ShardVersion(ChunkVersion(gen, {100, 200}),
+ CollectionIndexes(gen, boost::none)),
boost::none),
BSON("x" << MINKEY),
BSON("x" << MAXKEY))}};
@@ -404,21 +408,28 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
- return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ return std::vector{
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -491,22 +502,29 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) {
std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}), boost::none)};
+ kShardName2,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1,
- ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
- boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2,
- ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
- boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -685,23 +703,31 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -731,8 +757,10 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -785,23 +813,31 @@ TEST_F(BatchWriteExecTest,
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -831,15 +867,19 @@ TEST_F(BatchWriteExecTest,
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -891,23 +931,31 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -927,8 +975,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -943,8 +993,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1007,23 +1059,31 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -1043,8 +1103,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1059,8 +1121,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1119,13 +1183,20 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
const BatchItemRef& itemRef) const override {
if (targetAll) {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
} else {
- return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ return std::vector{
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
}
@@ -1134,14 +1205,18 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -1162,8 +1237,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
@@ -1882,23 +1959,31 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -2020,23 +2105,31 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -2166,23 +2259,31 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 3681cd77b10..da0b55eea60 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -119,7 +119,7 @@ using BatchWriteOpTest = WriteOpTestFixture;
TEST_F(BatchWriteOpTest, SingleOp) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -151,7 +151,7 @@ TEST_F(BatchWriteOpTest, SingleOp) {
TEST_F(BatchWriteOpTest, SingleError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -189,7 +189,7 @@ TEST_F(BatchWriteOpTest, SingleError) {
TEST_F(BatchWriteOpTest, SingleTargetError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -223,7 +223,7 @@ TEST_F(BatchWriteOpTest, SingleTargetError) {
// concern error if one occurs.
TEST_F(BatchWriteOpTest, SingleWriteConcernErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -265,7 +265,7 @@ TEST_F(BatchWriteOpTest, SingleWriteConcernErrorOrdered) {
// Single-op stale version test. We should retry the same batch until we're not stale.
TEST_F(BatchWriteOpTest, SingleStaleError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -287,8 +287,10 @@ TEST_F(BatchWriteOpTest, SingleStaleError) {
response.addToErrDetails(write_ops::WriteError(
0,
Status{StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId("shard")),
"mock stale error"}));
@@ -326,7 +328,7 @@ TEST_F(BatchWriteOpTest, SingleStaleError) {
// Multi-op targeting test (ordered)
TEST_F(BatchWriteOpTest, MultiOpSameShardOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -362,7 +364,7 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardOrdered) {
// Multi-op targeting test (unordered)
TEST_F(BatchWriteOpTest, MultiOpSameShardUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -404,8 +406,8 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardUnordered) {
// (one to each shard, one-by-one)
TEST_F(BatchWriteOpTest, MultiOpTwoShardsOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -460,7 +462,7 @@ void verifyTargetedBatches(std::map<ShardId, size_t> expected,
for (auto it = targeted.begin(); it != targeted.end(); ++it) {
ASSERT_EQUALS(expected[it->second->getEndpoint().shardName],
it->second->getWrites().size());
- ASSERT_EQUALS(ChunkVersion::IGNORED(), *it->second->getEndpoint().shardVersion);
+ ASSERT_EQUALS(ShardVersion::IGNORED(), *it->second->getEndpoint().shardVersion);
expected.erase(expected.find(it->second->getEndpoint().shardName));
}
ASSERT(expected.empty());
@@ -470,8 +472,8 @@ void verifyTargetedBatches(std::map<ShardId, size_t> expected,
// to each shard).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -515,8 +517,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsUnordered) {
// two batches to each shard (two for each delete op).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -570,8 +572,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) {
// of two batches to each shard (containing writes for both ops).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -617,8 +619,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachUnordered) {
// ops should be batched together.
TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -711,8 +713,8 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) {
// shards. Should batch all the ops together into two batches of four ops for each shard.
TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -764,8 +766,8 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsUnordered) {
// one shard. There should be one set of two batches to each shard and an error reported.
TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -824,8 +826,8 @@ TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) {
// on each shard. There should be one set of two batches to each shard and and two errors reported.
TEST_F(BatchWriteOpTest, MultiOpTwoShardErrorsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -881,8 +883,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardErrorsUnordered) {
// shard. There should be one set of two batches to each shard and an error reported.
TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -943,8 +945,8 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) {
// should not get run.
TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1004,7 +1006,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) {
// the error if ordered : false.
TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1047,8 +1049,8 @@ TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
// ordered and we also have an error
TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1102,7 +1104,7 @@ TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
// Targeting failure on second op in batch op (ordered)
TEST_F(BatchWriteOpTest, MultiOpFailedTargetOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1156,7 +1158,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetOrdered) {
// Targeting failure on second op in batch op (unordered)
TEST_F(BatchWriteOpTest, MultiOpFailedTargetUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1209,8 +1211,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetUnordered) {
// into write errors for first affected write.
TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1256,8 +1258,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) {
// into write errors for all affected writes.
TEST_F(BatchWriteOpTest, MultiOpFailedBatchUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1312,8 +1314,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchUnordered) {
// write.
TEST_F(BatchWriteOpTest, MultiOpAbortOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1355,8 +1357,8 @@ TEST_F(BatchWriteOpTest, MultiOpAbortOrdered) {
// writes.
TEST_F(BatchWriteOpTest, MultiOpAbortUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1395,8 +1397,8 @@ TEST_F(BatchWriteOpTest, MultiOpAbortUnordered) {
// Multi-op targeting test where each op goes to both shards and both return a write concern error
TEST_F(BatchWriteOpTest, MultiOpTwoWCErrors) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1437,7 +1439,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoWCErrors) {
TEST_F(BatchWriteOpTest, AttachingStmtIds) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
const std::vector<StmtId> stmtIds{1, 2, 3};
@@ -1527,7 +1529,7 @@ using BatchWriteOpLimitTests = WriteOpTestFixture;
// Big single operation test - should go through
TEST_F(BatchWriteOpLimitTests, OneBigDoc) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1562,7 +1564,7 @@ TEST_F(BatchWriteOpLimitTests, OneBigDoc) {
// Big doc with smaller additional doc - should go through as two batches
TEST_F(BatchWriteOpLimitTests, OneBigOneSmall) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1630,7 +1632,7 @@ private:
TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Delete) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1659,7 +1661,7 @@ TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Delete) {
TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Update) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index e2a29e72751..3542a606169 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -200,7 +200,7 @@ void BatchedCommandRequest::setWriteCommandRequestBase(
void BatchedCommandRequest::serialize(BSONObjBuilder* builder) const {
_visit([&](auto&& op) { op.serialize({}, builder); });
if (_shardVersion) {
- ShardVersion(*_shardVersion).serialize(ShardVersion::kShardVersionField, builder);
+ _shardVersion->serialize(ShardVersion::kShardVersionField, builder);
}
if (_dbVersion) {
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index e0b13566c3d..185cafbbcb8 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -112,7 +112,7 @@ public:
bool isVerboseWC() const;
- void setShardVersion(ChunkVersion shardVersion) {
+ void setShardVersion(ShardVersion shardVersion) {
_shardVersion = std::move(shardVersion);
}
@@ -120,7 +120,7 @@ public:
return _shardVersion.is_initialized();
}
- const ChunkVersion& getShardVersion() const {
+ const ShardVersion& getShardVersion() const {
invariant(_shardVersion);
return *_shardVersion;
}
@@ -226,7 +226,7 @@ private:
std::unique_ptr<write_ops::UpdateCommandRequest> _updateReq;
std::unique_ptr<write_ops::DeleteCommandRequest> _deleteReq;
- boost::optional<ChunkVersion> _shardVersion;
+ boost::optional<ShardVersion> _shardVersion;
boost::optional<DatabaseVersion> _dbVersion;
boost::optional<BSONObj> _writeConcern;
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 0ba795e44a5..990c67e76c3 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -72,7 +72,9 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns());
ASSERT(insertRequest.hasShardVersion());
- ASSERT_EQ(ChunkVersion({epoch, timestamp}, {1, 2}).toString(),
+ ASSERT_EQ(ShardVersion(ChunkVersion({epoch, timestamp}, {1, 2}),
+ CollectionIndexes({epoch, timestamp}, boost::none))
+ .toString(),
insertRequest.getShardVersion().toString());
}
}
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 757961a5348..3b2670e6828 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -70,10 +70,13 @@ TEST(BatchedCommandResponseTest, Basic) {
TEST(BatchedCommandResponseTest, StaleConfigInfo) {
OID epoch = OID::gen();
- StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0})),
- ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0})),
- ShardId("TestShard"));
+ StaleConfigInfo staleInfo(
+ NamespaceString("TestDB.TestColl"),
+ ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0}),
+ CollectionIndexes({epoch, Timestamp(100, 0)}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0}),
+ CollectionIndexes({epoch, Timestamp(100, 0)}, boost::none)),
+ ShardId("TestShard"));
BSONObjBuilder builder(BSON("index" << 0 << "code" << ErrorCodes::StaleConfig << "errmsg"
<< "StaleConfig error"));
staleInfo.serialize(&builder);
@@ -156,7 +159,8 @@ TEST(BatchedCommandResponseTest, TooManyBigErrors) {
}
TEST(BatchedCommandResponseTest, CompatibilityFromWriteErrorToBatchCommandResponse) {
- ShardVersion versionReceived(ChunkVersion({OID::gen(), Timestamp(2, 0)}, {1, 0}));
+ CollectionGeneration gen(OID::gen(), Timestamp(2, 0));
+ ShardVersion versionReceived(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none));
write_ops::UpdateCommandReply reply;
reply.getWriteCommandReplyBase().setN(1);
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 372d5e04cd8..e1c49bd96b8 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -127,7 +127,7 @@ void WriteOp::targetWrites(OperationContext* opCtx,
// Outside of a transaction, multiple endpoints currently imply no versioning, since we
// can't retry half a regular multi-write.
if (endpoints.size() > 1u && !inTransaction) {
- endpoint.shardVersion = ChunkVersion::IGNORED();
+ endpoint.shardVersion = ShardVersion::IGNORED();
}
targetedWrites->push_back(std::make_unique<TargetedWrite>(std::move(endpoint), ref));
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 31476906b9b..d297e0cf7f2 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -91,7 +91,7 @@ TEST_F(WriteOpTest, BasicError) {
}
TEST_F(WriteOpTest, TargetSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -118,12 +118,19 @@ TEST_F(WriteOpTest, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST_F(WriteOpTest, TargetMultiOneShard) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -153,12 +160,19 @@ TEST_F(WriteOpTest, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST_F(WriteOpTest, TargetMultiAllShards) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -195,10 +209,15 @@ TEST_F(WriteOpTest, TargetMultiAllShards) {
}
TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -227,10 +246,11 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
0,
- {StaleConfigInfo(kNss,
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})),
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})),
- ShardId("shardA")),
+ {StaleConfigInfo(
+ kNss,
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ ShardVersion(ChunkVersion(gen, {11, 0}), CollectionIndexes(gen, boost::none)),
+ ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
@@ -245,7 +265,7 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Single error after targeting test
TEST_F(WriteOpTest, ErrorSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -274,7 +294,7 @@ TEST_F(WriteOpTest, ErrorSingle) {
// Cancel single targeting test
TEST_F(WriteOpTest, CancelSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -305,7 +325,7 @@ TEST_F(WriteOpTest, CancelSingle) {
// Retry single targeting test
TEST_F(WriteOpTest, RetrySingleOp) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -345,12 +365,19 @@ private:
};
TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -385,10 +412,15 @@ TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
}
TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -421,10 +453,11 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
0,
- {StaleConfigInfo(kNss,
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})),
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})),
- ShardId("shardA")),
+ {StaleConfigInfo(
+ kNss,
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ ShardVersion(ChunkVersion(gen, {11, 0}), CollectionIndexes(gen, boost::none)),
+ ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);