summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-09-01 11:02:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-01 11:32:04 +0000
commitce3443291cd663ae5375941f380d4bc52bc88a85 (patch)
treed6ffa445b5cdefeecbbf85cb8a5cc022810de781 /src/mongo/s
parent5f11658224585f0399d57afa2fd8886165f854bf (diff)
downloadmongo-ce3443291cd663ae5375941f380d4bc52bc88a85.tar.gz
SERVER-69033 Remove ChunkVersion only constructor from ShardVersion
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp3
-rw-r--r--src/mongo/s/catalog_cache_test.cpp7
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager.h5
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp24
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp18
-rw-r--r--src/mongo/s/cluster_ddl.cpp2
-rw-r--r--src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_create_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_split_vector_cmd.cpp2
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp4
-rw-r--r--src/mongo/s/query/cluster_find.cpp3
-rw-r--r--src/mongo/s/router.cpp3
-rw-r--r--src/mongo/s/shard_version.h5
-rw-r--r--src/mongo/s/stale_shard_version_helpers_test.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp377
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp102
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h6
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp14
-rw-r--r--src/mongo/s/write_ops/write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp83
25 files changed, 461 insertions, 288 deletions
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index 99c7387724e..547d4e37c52 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -197,7 +197,8 @@ protected:
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
return StaleConfigInfo(NamespaceString("Foo.Bar"),
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
boost::none,
ShardId{"dummy"});
}(),
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index c68dc4b0053..d13273f005c 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -259,8 +259,9 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
+ const CollectionGeneration gen(OID::gen(), Timestamp(1, 1));
const auto cachedCollVersion =
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}));
+ ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none));
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -285,8 +286,8 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
- const auto wantedCollVersion = ShardVersion(
- ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0}));
+ const auto wantedCollVersion = ShardVersion(ChunkVersion(cachedCollVersion, {2, 0}),
+ CollectionIndexes(cachedCollVersion, boost::none));
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 31de7493b57..f44c58c8760 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -930,15 +930,15 @@ bool ComparableChunkVersion::operator<(const ComparableChunkVersion& other) cons
}
ShardEndpoint::ShardEndpoint(const ShardId& shardName,
- boost::optional<ChunkVersion> shardVersion,
+ boost::optional<ShardVersion> shardVersion,
boost::optional<DatabaseVersion> dbVersion)
: shardName(shardName),
shardVersion(std::move(shardVersion)),
databaseVersion(std::move(dbVersion)) {
if (databaseVersion)
- invariant(shardVersion && *shardVersion == ChunkVersion::UNSHARDED());
+ invariant(shardVersion && *shardVersion == ShardVersion::UNSHARDED());
else if (shardVersion)
- invariant(*shardVersion != ChunkVersion::UNSHARDED());
+ invariant(*shardVersion != ShardVersion::UNSHARDED());
else
invariant(shardName == ShardId::kConfigServerId);
}
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index e8f1b1c860d..595ad25cfff 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -40,6 +40,7 @@
#include "mongo/s/database_version.h"
#include "mongo/s/resharding/type_collection_fields_gen.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/s/shard_version.h"
#include "mongo/s/type_collection_common_types_gen.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/ticketholder.h"
@@ -471,12 +472,12 @@ using RoutingTableHistoryValueHandle = RoutingTableHistoryCache::ValueHandle;
*/
struct ShardEndpoint {
ShardEndpoint(const ShardId& shardName,
- boost::optional<ChunkVersion> shardVersion,
+ boost::optional<ShardVersion> shardVersion,
boost::optional<DatabaseVersion> dbVersion);
ShardId shardName;
- boost::optional<ChunkVersion> shardVersion;
+ boost::optional<ShardVersion> shardVersion;
boost::optional<DatabaseVersion> databaseVersion;
};
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 5d964f74e51..aee5f0da601 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -362,7 +362,7 @@ ShardEndpoint ChunkManagerTargeter::targetInsert(OperationContext* opCtx,
// in commands
return ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion());
}
@@ -390,7 +390,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext*
// shardVersion in commands
return std::vector{ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion())};
}
@@ -581,7 +581,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
// shardVersion in commands
return std::vector{ShardEndpoint(
_cm.dbPrimary(),
- _nss.isOnInternalDb() ? boost::optional<ChunkVersion>() : ChunkVersion::UNSHARDED(),
+ _nss.isOnInternalDb() ? boost::optional<ShardVersion>() : ShardVersion::UNSHARDED(),
_nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion())};
}
@@ -594,7 +594,11 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
std::vector<ShardEndpoint> endpoints;
for (auto&& shardId : shardIds) {
- endpoints.emplace_back(std::move(shardId), _cm.getVersion(shardId), boost::none);
+ const auto placementVersion = _cm.getVersion(shardId);
+ endpoints.emplace_back(
+ std::move(shardId),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
}
return endpoints;
@@ -604,7 +608,11 @@ StatusWith<ShardEndpoint> ChunkManagerTargeter::_targetShardKey(const BSONObj& s
const BSONObj& collation) const {
try {
auto chunk = _cm.findIntersectingChunk(shardKey, collation);
- return ShardEndpoint(chunk.getShardId(), _cm.getVersion(chunk.getShardId()), boost::none);
+ const auto placementVersion = _cm.getVersion(chunk.getShardId());
+ return ShardEndpoint(
+ chunk.getShardId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -620,7 +628,11 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContex
std::vector<ShardEndpoint> endpoints;
for (auto&& shardId : shardIds) {
- endpoints.emplace_back(std::move(shardId), _cm.getVersion(shardId), boost::none);
+ const auto placementVersion = _cm.getVersion(shardId);
+ endpoints.emplace_back(
+ std::move(shardId),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none);
}
return endpoints;
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 6f3a52b7b1a..663e51cad92 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -154,7 +154,7 @@ std::vector<AsyncRequestsSender::Request> buildVersionedRequestsForTargetedShard
// Attach shardVersion "UNSHARDED", unless targeting the config server.
const auto cmdObjWithShardVersion = (primaryShardId != ShardId::kConfigServerId)
- ? appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdToSend, ShardVersion::UNSHARDED())
: cmdToSend;
return std::vector<AsyncRequestsSender::Request>{AsyncRequestsSender::Request(
@@ -177,7 +177,12 @@ std::vector<AsyncRequestsSender::Request> buildVersionedRequestsForTargetedShard
for (const ShardId& shardId : shardIds) {
if (shardsToSkip.find(shardId) == shardsToSkip.end()) {
- requests.emplace_back(shardId, appendShardVersion(cmdToSend, cm.getVersion(shardId)));
+ ChunkVersion placementVersion = cm.getVersion(shardId);
+ requests.emplace_back(
+ shardId,
+ appendShardVersion(cmdToSend,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none))));
}
}
@@ -438,7 +443,7 @@ AsyncRequestsSender::Response executeCommandAgainstDatabasePrimary(
Shard::RetryPolicy retryPolicy) {
// Attach shardVersion "UNSHARDED", unless targeting the config server.
const auto cmdObjWithShardVersion = (dbInfo->getPrimary() != ShardId::kConfigServerId)
- ? appendShardVersion(cmdObj, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdObj, ShardVersion::UNSHARDED())
: cmdObj;
auto responses =
@@ -699,14 +704,17 @@ StatusWith<Shard::QueryResponse> loadIndexesFromAuthoritativeShard(OperationCont
// For a sharded collection we must load indexes from a shard with chunks. For
// consistency with cluster listIndexes, load from the shard that owns the minKey chunk.
const auto minKeyShardId = cm.getMinKeyShardIdWithSimpleCollation();
+ ChunkVersion placementVersion = cm.getVersion(minKeyShardId);
return {
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, minKeyShardId)),
- appendShardVersion(cmdNoVersion, cm.getVersion(minKeyShardId))};
+ appendShardVersion(cmdNoVersion,
+ ShardVersion(placementVersion,
+ CollectionIndexes(placementVersion, boost::none)))};
} else {
// For an unsharded collection, the primary shard will have correct indexes. We attach
// unsharded shard version to detect if the collection has become sharded.
const auto cmdObjWithShardVersion = (cm.dbPrimary() != ShardId::kConfigServerId)
- ? appendShardVersion(cmdNoVersion, ChunkVersion::UNSHARDED())
+ ? appendShardVersion(cmdNoVersion, ShardVersion::UNSHARDED())
: cmdNoVersion;
return {
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary())),
diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp
index 99d186fef90..1c731656906 100644
--- a/src/mongo/s/cluster_ddl.cpp
+++ b/src/mongo/s/cluster_ddl.cpp
@@ -45,7 +45,7 @@ namespace {
std::vector<AsyncRequestsSender::Request> buildUnshardedRequestsForAllShards(
OperationContext* opCtx, std::vector<ShardId> shardIds, const BSONObj& cmdObj) {
auto cmdToSend = cmdObj;
- appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED());
+ appendShardVersion(cmdToSend, ShardVersion::UNSHARDED());
std::vector<AsyncRequestsSender::Request> requests;
for (auto&& shardId : shardIds)
diff --git a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
index 70b5192d6c1..0b8be3b008f 100644
--- a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
+++ b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp
@@ -79,9 +79,12 @@ public:
auto cmdObj = CommandHelpers::filterCommandRequestForPassthrough(request().toBSON({}));
if (cm.isSharded()) {
- cmdObj = appendShardVersion(cmdObj, cm.getVersion(shardId));
+ cmdObj = appendShardVersion(
+ cmdObj,
+ ShardVersion(cm.getVersion(shardId),
+ CollectionIndexes(cm.getVersion(shardId), boost::none)));
} else {
- cmdObj = appendShardVersion(cmdObj, ChunkVersion::UNSHARDED());
+ cmdObj = appendShardVersion(cmdObj, ShardVersion::UNSHARDED());
cmdObj = appendDbVersionIfPresent(cmdObj, dbInfo->getVersion());
}
diff --git a/src/mongo/s/commands/cluster_create_cmd.cpp b/src/mongo/s/commands/cluster_create_cmd.cpp
index 35ce98ad22a..f73ce1356a0 100644
--- a/src/mongo/s/commands/cluster_create_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_cmd.cpp
@@ -141,7 +141,7 @@ public:
// parallel.
// If the DB primary is hosted by the config server, apply the original metadata.
if (dbInfo->getPrimary() != ShardId::kConfigServerId) {
- cmdToSend = appendShardVersion(cmdToSend, ChunkVersion::UNSHARDED());
+ cmdToSend = appendShardVersion(cmdToSend, ShardVersion::UNSHARDED());
}
cmdToSend = appendDbVersionIfPresent(cmdToSend, dbInfo);
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index d01690787ee..136c30a72f7 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -419,17 +419,19 @@ public:
BSONObjBuilder bob;
if (cm.isSharded()) {
- _runCommand(opCtx,
- shard->getId(),
- cm.getVersion(shard->getId()),
- boost::none,
- nss,
- applyReadWriteConcern(opCtx, false, false, explainCmd),
- &bob);
+ ChunkVersion placementVersion = cm.getVersion(shard->getId());
+ _runCommand(
+ opCtx,
+ shard->getId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none,
+ nss,
+ applyReadWriteConcern(opCtx, false, false, explainCmd),
+ &bob);
} else {
_runCommand(opCtx,
shard->getId(),
- boost::make_optional(!cm.dbVersion().isFixed(), ChunkVersion::UNSHARDED()),
+ boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()),
cm.dbVersion(),
nss,
applyReadWriteConcern(opCtx, false, false, explainCmd),
@@ -496,18 +498,19 @@ public:
// This means that we always assume that a findAndModify request using _id is targetable
// to a single shard.
auto chunk = cm.findIntersectingChunk(shardKey, collation, true);
-
- _runCommand(opCtx,
- chunk.getShardId(),
- cm.getVersion(chunk.getShardId()),
- boost::none,
- nss,
- applyReadWriteConcern(opCtx, this, cmdObjForShard),
- &result);
+ ChunkVersion placementVersion = cm.getVersion(chunk.getShardId());
+ _runCommand(
+ opCtx,
+ chunk.getShardId(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ boost::none,
+ nss,
+ applyReadWriteConcern(opCtx, this, cmdObjForShard),
+ &result);
} else {
_runCommand(opCtx,
cm.dbPrimary(),
- boost::make_optional(!cm.dbVersion().isFixed(), ChunkVersion::UNSHARDED()),
+ boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()),
cm.dbVersion(),
nss,
applyReadWriteConcern(opCtx, this, cmdObjForShard),
@@ -531,7 +534,7 @@ private:
static void _runCommand(OperationContext* opCtx,
const ShardId& shardId,
- const boost::optional<ChunkVersion>& shardVersion,
+ const boost::optional<ShardVersion>& shardVersion,
const boost::optional<DatabaseVersion>& dbVersion,
const NamespaceString& nss,
const BSONObj& cmdObj,
@@ -640,7 +643,7 @@ private:
static void _handleWouldChangeOwningShardErrorRetryableWriteLegacy(
OperationContext* opCtx,
const ShardId& shardId,
- const boost::optional<ChunkVersion>& shardVersion,
+ const boost::optional<ShardVersion>& shardVersion,
const boost::optional<DatabaseVersion>& dbVersion,
const NamespaceString& nss,
const BSONObj& cmdObj,
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 6bfabdb5b76..2b8cf4a8171 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -249,14 +249,16 @@ public:
// specified in the split command through the "middle" parameter, choose "middle" as the
// splitPoint. Otherwise use the splitVector command with 'force' to ask the shard for the
// middle of the chunk.
+ const auto placementVersion = cm.getVersion(chunk->getShardId());
const BSONObj splitPoint = !middle.isEmpty()
? middle
- : selectMedianKey(opCtx,
- chunk->getShardId(),
- nss,
- cm.getShardKeyPattern(),
- cm.getVersion(chunk->getShardId()),
- ChunkRange(chunk->getMin(), chunk->getMax()));
+ : selectMedianKey(
+ opCtx,
+ chunk->getShardId(),
+ nss,
+ cm.getShardKeyPattern(),
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none)),
+ ChunkRange(chunk->getMin(), chunk->getMax()));
LOGV2(22758,
"Splitting chunk {chunkRange} in {namespace} on shard {shardId} at key {splitPoint}",
diff --git a/src/mongo/s/commands/cluster_split_vector_cmd.cpp b/src/mongo/s/commands/cluster_split_vector_cmd.cpp
index 4b0e66418b5..54eb9b6a1fa 100644
--- a/src/mongo/s/commands/cluster_split_vector_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_vector_cmd.cpp
@@ -88,7 +88,7 @@ public:
BSONObj filteredCmdObj(applyReadWriteConcern(
opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)));
BSONObj filteredCmdObjWithVersion(
- appendShardVersion(filteredCmdObj, ChunkVersion::UNSHARDED()));
+ appendShardVersion(filteredCmdObj, ShardVersion::UNSHARDED()));
auto shard =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary()));
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index eb49ea8e422..87a75bd8850 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -154,7 +154,7 @@ BSONObj createCommandForMergingShard(Document serializedCommand,
// Attach the IGNORED chunk version to the command. On the shard, this will skip the actual
// version check but will nonetheless mark the operation as versioned.
- auto mergeCmdObj = appendShardVersion(mergeCmd.freeze().toBson(), ChunkVersion::IGNORED());
+ auto mergeCmdObj = appendShardVersion(mergeCmd.freeze().toBson(), ShardVersion::IGNORED());
// Attach the read and write concerns if needed, and return the final command object.
return applyReadWriteConcern(mergeCtx->opCtx,
@@ -801,7 +801,7 @@ Status runPipelineOnSpecificShardOnly(const boost::intrusive_ptr<ExpressionConte
overrideBatchSize);
if (!forPerShardCursor && shardId != ShardId::kConfigServerId) {
- cmdObj = appendShardVersion(std::move(cmdObj), ChunkVersion::UNSHARDED());
+ cmdObj = appendShardVersion(std::move(cmdObj), ShardVersion::UNSHARDED());
}
if (!forPerShardCursor) {
// Unless this is a per shard cursor, we need to send shard version info.
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index bb55dc6ccc6..6ccadf2e927 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -186,7 +186,8 @@ std::vector<std::pair<ShardId, BSONObj>> constructRequestsForShards(
findCommandToForward->serialize(BSONObj(), &cmdBuilder);
if (cm.isSharded()) {
- ShardVersion(cm.getVersion(shardId))
+ const auto placementVersion = cm.getVersion(shardId);
+ ShardVersion(placementVersion, CollectionIndexes(placementVersion, boost::none))
.serialize(ShardVersion::kShardVersionField, &cmdBuilder);
} else if (!query.nss().isOnInternalDb()) {
ShardVersion::UNSHARDED().serialize(ShardVersion::kShardVersionField, &cmdBuilder);
diff --git a/src/mongo/s/router.cpp b/src/mongo/s/router.cpp
index ed4b474e8f1..52032f16ba4 100644
--- a/src/mongo/s/router.cpp
+++ b/src/mongo/s/router.cpp
@@ -115,7 +115,8 @@ void CollectionRouter::appendCRUDRoutingTokenToCommand(const ShardId& shardId,
dbVersion.serialize(&dbvBuilder);
}
}
- ShardVersion(chunkVersion).serialize(ShardVersion::kShardVersionField, builder);
+ ShardVersion(chunkVersion, CollectionIndexes(chunkVersion, boost::none))
+ .serialize(ShardVersion::kShardVersionField, builder);
}
ChunkManager CollectionRouter::_getRoutingInfo(OperationContext* opCtx) const {
diff --git a/src/mongo/s/shard_version.h b/src/mongo/s/shard_version.h
index 3062be5b9b7..fbe33785d4e 100644
--- a/src/mongo/s/shard_version.h
+++ b/src/mongo/s/shard_version.h
@@ -50,11 +50,6 @@ public:
ShardVersion(ChunkVersion chunkVersion, CollectionIndexes indexVersion);
- ShardVersion(ChunkVersion chunkVersion)
- : CollectionGeneration(chunkVersion.epoch(), chunkVersion.getTimestamp()),
- ChunkVersion(chunkVersion),
- CollectionIndexes({chunkVersion.epoch(), chunkVersion.getTimestamp()}, boost::none) {}
-
ShardVersion() : ShardVersion(ChunkVersion(), CollectionIndexes()) {}
static ShardVersion IGNORED() {
diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp
index 030a3d21213..7485dc87148 100644
--- a/src/mongo/s/stale_shard_version_helpers_test.cpp
+++ b/src/mongo/s/stale_shard_version_helpers_test.cpp
@@ -93,11 +93,14 @@ TEST_F(AsyncShardVersionRetry, LimitedStaleErrorsShouldReturnCorrectValue) {
auto future = shardVersionRetry(
service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) {
if (++tries < 5) {
- uassert(StaleConfigInfo(
- nss(),
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23})),
- ShardVersion(ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99})),
- ShardId("sB")),
+ const CollectionGeneration gen1(OID::gen(), Timestamp(1, 0));
+ const CollectionGeneration gen2(OID::gen(), Timestamp(1, 0));
+ uassert(StaleConfigInfo(nss(),
+ ShardVersion(ChunkVersion(gen1, {5, 23}),
+ CollectionIndexes(gen1, boost::none)),
+ ShardVersion(ChunkVersion(gen2, {6, 99}),
+ CollectionIndexes(gen2, boost::none)),
+ ShardId("sB")),
"testX",
false);
}
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index e8aa07b6f30..9c01ab18b4a 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -92,8 +92,10 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss,
staleResponse.addToErrDetails(write_ops::WriteError(
i,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {1, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {2, 0}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName1)),
"Stale error")));
++i;
@@ -330,10 +332,12 @@ public:
const NamespaceString nss{"foo.bar"};
+ const CollectionGeneration gen{OID::gen(), Timestamp(1, 1)};
MockNSTargeter singleShardNSTargeter{
nss,
{MockRange(ShardEndpoint(kShardName1,
- ChunkVersion({OID::gen(), Timestamp(1, 1)}, {100, 200}),
+ ShardVersion(ChunkVersion(gen, {100, 200}),
+ CollectionIndexes(gen, boost::none)),
boost::none),
BSON("x" << MINKEY),
BSON("x" << MAXKEY))}};
@@ -404,21 +408,28 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
- return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ return std::vector{
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -491,22 +502,29 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) {
std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}), boost::none)};
+ kShardName2,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1,
- ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
- boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2,
- ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
- boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ CollectionIndexes({epoch, Timestamp(1, 1)}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -685,23 +703,31 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -731,8 +757,10 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -785,23 +813,31 @@ TEST_F(BatchWriteExecTest,
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -831,15 +867,19 @@ TEST_F(BatchWriteExecTest,
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -891,23 +931,31 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -927,8 +975,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -943,8 +993,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1007,23 +1059,31 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -1043,8 +1103,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1059,8 +1121,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
response.addToErrDetails(write_ops::WriteError(
1,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
return response.toBSON();
@@ -1119,13 +1183,20 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
const BatchItemRef& itemRef) const override {
if (targetAll) {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
} else {
- return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ return std::vector{
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
}
@@ -1134,14 +1205,18 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("sk" << MINKEY),
- BSON("sk" << 10)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("sk" << 10),
- BSON("sk" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << MINKEY),
+ BSON("sk" << 10)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("sk" << 10),
+ BSON("sk" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -1162,8 +1237,10 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
response.addToErrDetails(write_ops::WriteError(
0,
Status(StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId(kShardName2)),
"Stale error")));
@@ -1882,23 +1959,31 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -2020,23 +2105,31 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -2166,23 +2259,31 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(
- kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- ShardEndpoint(
- kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
+ ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(
- ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
- BSON("x" << MINKEY),
- BSON("x" << 0)),
- MockRange(
- ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
- BSON("x" << 0),
- BSON("x" << MAXKEY))});
+ {MockRange(ShardEndpoint(kShardName1,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << MINKEY),
+ BSON("x" << 0)),
+ MockRange(ShardEndpoint(kShardName2,
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ boost::none),
+ BSON("x" << 0),
+ BSON("x" << MAXKEY))});
auto future = launchAsync([&] {
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 3681cd77b10..da0b55eea60 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -119,7 +119,7 @@ using BatchWriteOpTest = WriteOpTestFixture;
TEST_F(BatchWriteOpTest, SingleOp) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -151,7 +151,7 @@ TEST_F(BatchWriteOpTest, SingleOp) {
TEST_F(BatchWriteOpTest, SingleError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -189,7 +189,7 @@ TEST_F(BatchWriteOpTest, SingleError) {
TEST_F(BatchWriteOpTest, SingleTargetError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -223,7 +223,7 @@ TEST_F(BatchWriteOpTest, SingleTargetError) {
// concern error if one occurs.
TEST_F(BatchWriteOpTest, SingleWriteConcernErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -265,7 +265,7 @@ TEST_F(BatchWriteOpTest, SingleWriteConcernErrorOrdered) {
// Single-op stale version test. We should retry the same batch until we're not stale.
TEST_F(BatchWriteOpTest, SingleStaleError) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -287,8 +287,10 @@ TEST_F(BatchWriteOpTest, SingleStaleError) {
response.addToErrDetails(write_ops::WriteError(
0,
Status{StaleConfigInfo(nss,
- ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200})),
- ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200})),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, timestamp}, {105, 200}),
+ CollectionIndexes({epoch, timestamp}, boost::none)),
ShardId("shard")),
"mock stale error"}));
@@ -326,7 +328,7 @@ TEST_F(BatchWriteOpTest, SingleStaleError) {
// Multi-op targeting test (ordered)
TEST_F(BatchWriteOpTest, MultiOpSameShardOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -362,7 +364,7 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardOrdered) {
// Multi-op targeting test (unordered)
TEST_F(BatchWriteOpTest, MultiOpSameShardUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -404,8 +406,8 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardUnordered) {
// (one to each shard, one-by-one)
TEST_F(BatchWriteOpTest, MultiOpTwoShardsOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -460,7 +462,7 @@ void verifyTargetedBatches(std::map<ShardId, size_t> expected,
for (auto it = targeted.begin(); it != targeted.end(); ++it) {
ASSERT_EQUALS(expected[it->second->getEndpoint().shardName],
it->second->getWrites().size());
- ASSERT_EQUALS(ChunkVersion::IGNORED(), *it->second->getEndpoint().shardVersion);
+ ASSERT_EQUALS(ShardVersion::IGNORED(), *it->second->getEndpoint().shardVersion);
expected.erase(expected.find(it->second->getEndpoint().shardName));
}
ASSERT(expected.empty());
@@ -470,8 +472,8 @@ void verifyTargetedBatches(std::map<ShardId, size_t> expected,
// to each shard).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -515,8 +517,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsUnordered) {
// two batches to each shard (two for each delete op).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -570,8 +572,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) {
// of two batches to each shard (containing writes for both ops).
TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -617,8 +619,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachUnordered) {
// ops should be batched together.
TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -711,8 +713,8 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) {
// shards. Should batch all the ops together into two batches of four ops for each shard.
TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -764,8 +766,8 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsUnordered) {
// one shard. There should be one set of two batches to each shard and an error reported.
TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -824,8 +826,8 @@ TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) {
// on each shard. There should be one set of two batches to each shard and and two errors reported.
TEST_F(BatchWriteOpTest, MultiOpTwoShardErrorsUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -881,8 +883,8 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardErrorsUnordered) {
// shard. There should be one set of two batches to each shard and an error reported.
TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -943,8 +945,8 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) {
// should not get run.
TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1004,7 +1006,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) {
// the error if ordered : false.
TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1047,8 +1049,8 @@ TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
// ordered and we also have an error
TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1102,7 +1104,7 @@ TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
// Targeting failure on second op in batch op (ordered)
TEST_F(BatchWriteOpTest, MultiOpFailedTargetOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1156,7 +1158,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetOrdered) {
// Targeting failure on second op in batch op (unordered)
TEST_F(BatchWriteOpTest, MultiOpFailedTargetUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1209,8 +1211,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetUnordered) {
// into write errors for first affected write.
TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1256,8 +1258,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) {
// into write errors for all affected writes.
TEST_F(BatchWriteOpTest, MultiOpFailedBatchUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1312,8 +1314,8 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchUnordered) {
// write.
TEST_F(BatchWriteOpTest, MultiOpAbortOrdered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1355,8 +1357,8 @@ TEST_F(BatchWriteOpTest, MultiOpAbortOrdered) {
// writes.
TEST_F(BatchWriteOpTest, MultiOpAbortUnordered) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1395,8 +1397,8 @@ TEST_F(BatchWriteOpTest, MultiOpAbortUnordered) {
// Multi-op targeting test where each op goes to both shards and both return a write concern error
TEST_F(BatchWriteOpTest, MultiOpTwoWCErrors) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointA(ShardId("shardA"), ShardVersion::IGNORED(), boost::none);
+ ShardEndpoint endpointB(ShardId("shardB"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterSplitRange(nss, endpointA, endpointB);
@@ -1437,7 +1439,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoWCErrors) {
TEST_F(BatchWriteOpTest, AttachingStmtIds) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
const std::vector<StmtId> stmtIds{1, 2, 3};
@@ -1527,7 +1529,7 @@ using BatchWriteOpLimitTests = WriteOpTestFixture;
// Big single operation test - should go through
TEST_F(BatchWriteOpLimitTests, OneBigDoc) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1562,7 +1564,7 @@ TEST_F(BatchWriteOpLimitTests, OneBigDoc) {
// Big doc with smaller additional doc - should go through as two batches
TEST_F(BatchWriteOpLimitTests, OneBigOneSmall) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterFullRange(nss, endpoint);
@@ -1630,7 +1632,7 @@ private:
TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Delete) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
@@ -1659,7 +1661,7 @@ TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Delete) {
TEST_F(BatchWriteOpTransactionTest, ThrowTargetingErrorsInTransaction_Update) {
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
auto targeter = initTargeterHalfRange(nss, endpoint);
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index e2a29e72751..3542a606169 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -200,7 +200,7 @@ void BatchedCommandRequest::setWriteCommandRequestBase(
void BatchedCommandRequest::serialize(BSONObjBuilder* builder) const {
_visit([&](auto&& op) { op.serialize({}, builder); });
if (_shardVersion) {
- ShardVersion(*_shardVersion).serialize(ShardVersion::kShardVersionField, builder);
+ _shardVersion->serialize(ShardVersion::kShardVersionField, builder);
}
if (_dbVersion) {
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index e0b13566c3d..185cafbbcb8 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -112,7 +112,7 @@ public:
bool isVerboseWC() const;
- void setShardVersion(ChunkVersion shardVersion) {
+ void setShardVersion(ShardVersion shardVersion) {
_shardVersion = std::move(shardVersion);
}
@@ -120,7 +120,7 @@ public:
return _shardVersion.is_initialized();
}
- const ChunkVersion& getShardVersion() const {
+ const ShardVersion& getShardVersion() const {
invariant(_shardVersion);
return *_shardVersion;
}
@@ -226,7 +226,7 @@ private:
std::unique_ptr<write_ops::UpdateCommandRequest> _updateReq;
std::unique_ptr<write_ops::DeleteCommandRequest> _deleteReq;
- boost::optional<ChunkVersion> _shardVersion;
+ boost::optional<ShardVersion> _shardVersion;
boost::optional<DatabaseVersion> _dbVersion;
boost::optional<BSONObj> _writeConcern;
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 0ba795e44a5..990c67e76c3 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -72,7 +72,9 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns());
ASSERT(insertRequest.hasShardVersion());
- ASSERT_EQ(ChunkVersion({epoch, timestamp}, {1, 2}).toString(),
+ ASSERT_EQ(ShardVersion(ChunkVersion({epoch, timestamp}, {1, 2}),
+ CollectionIndexes({epoch, timestamp}, boost::none))
+ .toString(),
insertRequest.getShardVersion().toString());
}
}
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 757961a5348..3b2670e6828 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -70,10 +70,13 @@ TEST(BatchedCommandResponseTest, Basic) {
TEST(BatchedCommandResponseTest, StaleConfigInfo) {
OID epoch = OID::gen();
- StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0})),
- ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0})),
- ShardId("TestShard"));
+ StaleConfigInfo staleInfo(
+ NamespaceString("TestDB.TestColl"),
+ ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0}),
+ CollectionIndexes({epoch, Timestamp(100, 0)}, boost::none)),
+ ShardVersion(ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0}),
+ CollectionIndexes({epoch, Timestamp(100, 0)}, boost::none)),
+ ShardId("TestShard"));
BSONObjBuilder builder(BSON("index" << 0 << "code" << ErrorCodes::StaleConfig << "errmsg"
<< "StaleConfig error"));
staleInfo.serialize(&builder);
@@ -156,7 +159,8 @@ TEST(BatchedCommandResponseTest, TooManyBigErrors) {
}
TEST(BatchedCommandResponseTest, CompatibilityFromWriteErrorToBatchCommandResponse) {
- ShardVersion versionReceived(ChunkVersion({OID::gen(), Timestamp(2, 0)}, {1, 0}));
+ CollectionGeneration gen(OID::gen(), Timestamp(2, 0));
+ ShardVersion versionReceived(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none));
write_ops::UpdateCommandReply reply;
reply.getWriteCommandReplyBase().setN(1);
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 372d5e04cd8..e1c49bd96b8 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -127,7 +127,7 @@ void WriteOp::targetWrites(OperationContext* opCtx,
// Outside of a transaction, multiple endpoints currently imply no versioning, since we
// can't retry half a regular multi-write.
if (endpoints.size() > 1u && !inTransaction) {
- endpoint.shardVersion = ChunkVersion::IGNORED();
+ endpoint.shardVersion = ShardVersion::IGNORED();
}
targetedWrites->push_back(std::make_unique<TargetedWrite>(std::move(endpoint), ref));
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 31476906b9b..d297e0cf7f2 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -91,7 +91,7 @@ TEST_F(WriteOpTest, BasicError) {
}
TEST_F(WriteOpTest, TargetSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -118,12 +118,19 @@ TEST_F(WriteOpTest, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST_F(WriteOpTest, TargetMultiOneShard) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -153,12 +160,19 @@ TEST_F(WriteOpTest, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST_F(WriteOpTest, TargetMultiAllShards) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -195,10 +209,15 @@ TEST_F(WriteOpTest, TargetMultiAllShards) {
}
TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -227,10 +246,11 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
0,
- {StaleConfigInfo(kNss,
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})),
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})),
- ShardId("shardA")),
+ {StaleConfigInfo(
+ kNss,
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ ShardVersion(ChunkVersion(gen, {11, 0}), CollectionIndexes(gen, boost::none)),
+ ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
@@ -245,7 +265,7 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Single error after targeting test
TEST_F(WriteOpTest, ErrorSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -274,7 +294,7 @@ TEST_F(WriteOpTest, ErrorSingle) {
// Cancel single targeting test
TEST_F(WriteOpTest, CancelSingle) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -305,7 +325,7 @@ TEST_F(WriteOpTest, CancelSingle) {
// Retry single targeting test
TEST_F(WriteOpTest, RetrySingleOp) {
- ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
+ ShardEndpoint endpoint(ShardId("shard"), ShardVersion::IGNORED(), boost::none);
BatchedCommandRequest request([&] {
write_ops::InsertCommandRequest insertOp(kNss);
@@ -345,12 +365,19 @@ private:
};
TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -385,10 +412,15 @@ TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
}
TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
+ CollectionGeneration gen(OID(), Timestamp(1, 1));
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
+ ShardId("shardA"),
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
+ ShardId("shardB"),
+ ShardVersion(ChunkVersion(gen, {20, 0}), CollectionIndexes(gen, boost::none)),
+ boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -421,10 +453,11 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
0,
- {StaleConfigInfo(kNss,
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0})),
- ShardVersion(ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0})),
- ShardId("shardA")),
+ {StaleConfigInfo(
+ kNss,
+ ShardVersion(ChunkVersion(gen, {10, 0}), CollectionIndexes(gen, boost::none)),
+ ShardVersion(ChunkVersion(gen, {11, 0}), CollectionIndexes(gen, boost::none)),
+ ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);