summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2020-12-03 11:02:49 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-12-10 10:14:41 +0000
commit457ad84c771a6cdd78f44593760613f3abf2d24c (patch)
tree1029a0044907c5c5cf2d564fac6dd6682ccdd2a2 /src
parent4e5de13940486910b57c1feb57e58687f778b855 (diff)
downloadmongo-457ad84c771a6cdd78f44593760613f3abf2d24c.tar.gz
SERVER-53093 Add timestamp to ChunkVersion
PART 2: Fixing our tests
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp2
-rw-r--r--src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp3
-rw-r--r--src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp6
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp6
-rw-r--r--src/mongo/db/s/active_migrations_registry_test.cpp2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry_test.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp20
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp14
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp33
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp134
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp45
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp82
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp26
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp28
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp4
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp5
-rw-r--r--src/mongo/db/s/migration_util_test.cpp12
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp7
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp32
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp12
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp31
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp22
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp26
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp47
-rw-r--r--src/mongo/s/catalog_cache_test.cpp21
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.cpp4
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp3
-rw-r--r--src/mongo/s/chunk_manager_refresh_bm.cpp17
-rw-r--r--src/mongo/s/chunk_map_test.cpp16
-rw-r--r--src/mongo/s/chunk_test.cpp10
-rw-r--r--src/mongo/s/chunk_version_test.cpp51
-rw-r--r--src/mongo/s/comparable_chunk_version_test.cpp90
-rw-r--r--src/mongo/s/query/cluster_exchange_test.cpp2
-rw-r--r--src/mongo/s/query/sharded_agg_test_fixture.h2
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp4
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_test.cpp4
-rw-r--r--src/mongo/s/request_types/move_chunk_request_test.cpp6
-rw-r--r--src/mongo/s/request_types/set_shard_version_request_test.cpp14
-rw-r--r--src/mongo/s/routing_table_history_test.cpp102
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp254
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp3
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp39
55 files changed, 796 insertions, 521 deletions
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index 01280714e74..325215b6717 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -221,7 +221,7 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
expectGetCollection(kTestAggregateNss, epoch, uuid, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kTestAggregateNss,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
index e2d64f29eb5..dbf4f571c1c 100644
--- a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
@@ -62,7 +62,8 @@ public:
TEST_F(MongosProcessInterfaceTest, FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecified) {
auto expCtx = getExpCtx();
- auto targetCollectionVersion = boost::make_optional(ChunkVersion(0, 0, OID::gen()));
+ auto targetCollectionVersion =
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
auto processInterface = makeProcessInterface();
ASSERT_THROWS_CODE(processInterface->ensureFieldsUniqueOrResolveDocumentKey(
diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
index 82c36b40ca7..05f5adc6976 100644
--- a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
@@ -66,7 +66,8 @@ public:
TEST_F(ProcessInterfaceStandaloneTest,
FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecifiedOnMongos) {
auto expCtx = getExpCtx();
- auto targetCollectionVersion = boost::make_optional(ChunkVersion(0, 0, OID::gen()));
+ auto targetCollectionVersion =
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
auto processInterface = makeProcessInterface();
// Test that 'targetCollectionVersion' is not accepted if not from mongos.
@@ -88,7 +89,8 @@ TEST_F(ProcessInterfaceStandaloneTest,
TEST_F(ProcessInterfaceStandaloneTest, FailsToEnsureFieldsUniqueIfJoinFieldsAreNotSentFromMongos) {
auto expCtx = getExpCtx();
- auto targetCollectionVersion = boost::make_optional(ChunkVersion(0, 0, OID::gen()));
+ auto targetCollectionVersion =
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
auto processInterface = makeProcessInterface();
expCtx->fromMongos = true;
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 5d0744ae0fc..bb994059d03 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -167,7 +167,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
expectGetCollection(kTestAggregateNss, epoch, uuid, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kTestAggregateNss,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -244,7 +244,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
expectGetCollection(kTestAggregateNss, epoch, uuid, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kTestAggregateNss,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -335,7 +335,7 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
expectGetCollection(kTestAggregateNss, epoch, uuid, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kTestAggregateNss,
{shardKeyPattern.getKeyPattern().globalMin(),
diff --git a/src/mongo/db/s/active_migrations_registry_test.cpp b/src/mongo/db/s/active_migrations_registry_test.cpp
index 0f268e23fb0..4d3418fb331 100644
--- a/src/mongo/db/s/active_migrations_registry_test.cpp
+++ b/src/mongo/db/s/active_migrations_registry_test.cpp
@@ -59,7 +59,7 @@ protected:
};
MoveChunkRequest createMoveChunkRequest(const NamespaceString& nss) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
diff --git a/src/mongo/db/s/active_shard_collection_registry_test.cpp b/src/mongo/db/s/active_shard_collection_registry_test.cpp
index dccf0bdf63d..f154d7af00d 100644
--- a/src/mongo/db/s/active_shard_collection_registry_test.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry_test.cpp
@@ -88,7 +88,7 @@ TEST_F(ShardCollectionRegistrationTest, ScopedShardCollectionConstructorAndAssig
ASSERT(originalScopedShardCollection.mustExecute());
// Need to signal the registered shard collection so the destructor doesn't invariant
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen()));
+ CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
response.setCollectionUUID(UUID::gen());
originalScopedShardCollection.emplaceResponse(response);
}
@@ -119,7 +119,7 @@ TEST_F(ShardCollectionRegistrationTest,
_registry.registerShardCollection(secondShardsvrShardCollectionRequest);
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, secondScopedShardCollection.getStatus());
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen()));
+ CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
response.setCollectionUUID(UUID::gen());
originalScopedShardCollection.emplaceResponse(response);
}
@@ -153,7 +153,7 @@ TEST_F(ShardCollectionRegistrationTest, SecondShardCollectionWithSameOptionsJoin
auto uuid = UUID::gen();
- CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen()));
+ CreateCollectionResponse response(ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
response.setCollectionUUID(uuid);
originalScopedShardCollection.emplaceResponse(response);
@@ -220,10 +220,12 @@ TEST_F(ShardCollectionRegistrationTest, TwoShardCollectionsOnDifferentCollection
assertGet(_registry.registerShardCollection(secondShardsvrShardCollectionRequest));
ASSERT(secondScopedShardCollection.mustExecute());
- CreateCollectionResponse responseOriginal(ChunkVersion(1, 0, OID::gen()));
+ CreateCollectionResponse responseOriginal(
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
responseOriginal.setCollectionUUID(UUID::gen());
- CreateCollectionResponse responseSecond(ChunkVersion(1, 0, OID::gen()));
+ CreateCollectionResponse responseSecond(
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
responseOriginal.setCollectionUUID(UUID::gen());
originalScopedShardCollection.emplaceResponse(responseOriginal);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 5607901f2c2..980fb82cad8 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -126,7 +126,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, version);
@@ -182,7 +182,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, version);
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index cb353dfe6d4..f5b45711037 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -77,7 +77,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
int64_t currentChunk = 0;
- ChunkVersion chunkVersion(1, 0, OID::gen());
+ ChunkVersion chunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
const KeyPattern shardKeyPattern(BSON("x" << 1));
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 9cca1178bd4..4948ac24e48 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -117,7 +117,7 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -175,8 +175,8 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
std::string dbName = "foo";
const NamespaceString collName1(dbName, "bar");
const NamespaceString collName2(dbName, "baz");
- ChunkVersion version1(2, 0, OID::gen());
- ChunkVersion version2(2, 0, OID::gen());
+ ChunkVersion version1(2, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion version2(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName1, version1);
@@ -252,7 +252,7 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -309,7 +309,7 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -354,7 +354,7 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -450,7 +450,7 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(2, 0, OID::gen());
+ ChunkVersion version(2, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -501,7 +501,7 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(1, 0, OID::gen());
+ ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -556,7 +556,7 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(1, 0, OID::gen());
+ ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
@@ -615,7 +615,7 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
// Set up the database and collection as sharded in the metadata.
const std::string dbName = "foo";
const NamespaceString collName(dbName, "bar");
- ChunkVersion version(1, 0, OID::gen());
+ ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
setUpDatabase(dbName, kShardId0);
setUpCollection(collName, version);
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 9f97de3ee27..75442f5c09a 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -98,7 +98,7 @@ ScopedMigrationRequest ScopedMigrationRequestTest::makeScopedMigrationRequest(
}
MigrateInfo makeMigrateInfo() {
- const ChunkVersion kChunkVersion{1, 2, OID::gen()};
+ const ChunkVersion kChunkVersion{1, 2, OID::gen(), boost::none /* timestamp */};
BSONObjBuilder chunkBuilder;
chunkBuilder.append(ChunkType::ns(), kNs);
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index 186cd0172be..13e10d94a9f 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -48,7 +48,7 @@ const ShardId kToShard("shard0001");
const bool kWaitForDelete{true};
TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder chunkBuilder;
chunkBuilder.append(ChunkType::name(), OID::gen());
@@ -84,7 +84,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
}
TEST(MigrationTypeTest, FromAndToBSON) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -104,7 +104,7 @@ TEST(MigrationTypeTest, FromAndToBSON) {
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::min(), kMin);
@@ -121,7 +121,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
}
TEST(MigrationTypeTest, MissingRequiredMinField) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -138,7 +138,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
}
TEST(MigrationTypeTest, MissingRequiredMaxField) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -155,7 +155,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
}
TEST(MigrationTypeTest, MissingRequiredFromShardField) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -172,7 +172,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
}
TEST(MigrationTypeTest, MissingRequiredToShardField) {
- const ChunkVersion version(1, 2, OID::gen());
+ const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index ce95b6b5a46..e0559141e43 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -70,10 +70,11 @@ protected:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
[&] {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kNss,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c42989221ef..b2e27794f62 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -59,7 +59,7 @@ CollectionMetadata makeCollectionMetadataImpl(
std::vector<ChunkType> allChunks;
auto nextMinKey = shardKeyPattern.globalMin();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
for (const auto& myNextChunk : thisShardsChunks) {
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
@@ -94,6 +94,7 @@ CollectionMetadata makeCollectionMetadataImpl(
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
reshardingFields,
true,
allChunks)),
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index 9c4ce69c65d..1779b75b60b 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -51,21 +51,24 @@ protected:
UUID uuid = UUID::gen()) {
const OID epoch = OID::gen();
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
- auto chunk =
- ChunkType(kTestNss, std::move(range), ChunkVersion(1, 0, epoch), ShardId("other"));
- ChunkManager cm(
- ShardId("0"),
- DatabaseVersion(UUID::gen()),
- makeStandaloneRoutingTableHistory(RoutingTableHistory::makeNew(kTestNss,
- uuid,
- kShardKeyPattern,
- nullptr,
- false,
- epoch,
- boost::none,
- true,
- {std::move(chunk)})),
- boost::none);
+ auto chunk = ChunkType(kTestNss,
+ std::move(range),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ShardId("other"));
+ ChunkManager cm(ShardId("0"),
+ DatabaseVersion(UUID::gen()),
+ makeStandaloneRoutingTableHistory(
+ RoutingTableHistory::makeNew(kTestNss,
+ uuid,
+ kShardKeyPattern,
+ nullptr,
+ false,
+ epoch,
+ boost::none /* timestamp */,
+ boost::none,
+ true,
+ {std::move(chunk)})),
+ boost::none);
if (!OperationShardingState::isOperationVersioned(opCtx)) {
const auto version = cm.getVersion(ShardId("0"));
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index d0866e449d2..b583c6b87c4 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -206,7 +206,7 @@ public:
std::vector<ChunkType> chunks;
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
- ChunkVersion version(1, 0, OID::gen());
+ ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
ChunkType chunk(_nss, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
index a73d0aa1d0a..9c36446fd15 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp
@@ -138,14 +138,24 @@ protected:
TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
BumpChunkVersionOneChunkPerShard) {
const auto epoch = OID::gen();
- const auto shard0Chunk0 = generateChunkType(
- kNss, ChunkVersion(10, 1, epoch), kShard0.getName(), BSON("a" << 1), BSON("a" << 10));
- const auto shard1Chunk0 = generateChunkType(
- kNss, ChunkVersion(11, 2, epoch), kShard1.getName(), BSON("a" << 11), BSON("a" << 20));
+ const auto shard0Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(10, 1, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 1),
+ BSON("a" << 10));
+ const auto shard1Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(11, 2, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 11),
+ BSON("a" << 20));
const auto collectionVersion = shard1Chunk0.getVersion();
- ChunkVersion targetChunkVersion(
- collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
+ ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1,
+ 0,
+ collectionVersion.epoch(),
+ collectionVersion.getTimestamp());
setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0});
@@ -165,16 +175,30 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
BumpChunkVersionTwoChunksOnOneShard) {
const auto epoch = OID::gen();
- const auto shard0Chunk0 = generateChunkType(
- kNss, ChunkVersion(10, 1, epoch), kShard0.getName(), BSON("a" << 1), BSON("a" << 10));
- const auto shard0Chunk1 = generateChunkType(
- kNss, ChunkVersion(11, 2, epoch), kShard0.getName(), BSON("a" << 11), BSON("a" << 20));
- const auto shard1Chunk0 = generateChunkType(
- kNss, ChunkVersion(8, 1, epoch), kShard1.getName(), BSON("a" << 21), BSON("a" << 100));
+ const auto shard0Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(10, 1, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 1),
+ BSON("a" << 10));
+ const auto shard0Chunk1 =
+ generateChunkType(kNss,
+ ChunkVersion(11, 2, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 11),
+ BSON("a" << 20));
+ const auto shard1Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(8, 1, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 21),
+ BSON("a" << 100));
const auto collectionVersion = shard0Chunk1.getVersion();
- ChunkVersion targetChunkVersion(
- collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
+ ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1,
+ 0,
+ collectionVersion.epoch(),
+ collectionVersion.getTimestamp());
setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0});
@@ -193,18 +217,36 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
BumpChunkVersionTwoChunksOnTwoShards) {
const auto epoch = OID::gen();
- const auto shard0Chunk0 = generateChunkType(
- kNss, ChunkVersion(10, 1, epoch), kShard0.getName(), BSON("a" << 1), BSON("a" << 10));
- const auto shard0Chunk1 = generateChunkType(
- kNss, ChunkVersion(11, 2, epoch), kShard0.getName(), BSON("a" << 11), BSON("a" << 20));
- const auto shard1Chunk0 = generateChunkType(
- kNss, ChunkVersion(8, 1, epoch), kShard1.getName(), BSON("a" << 21), BSON("a" << 100));
- const auto shard1Chunk1 = generateChunkType(
- kNss, ChunkVersion(12, 1, epoch), kShard1.getName(), BSON("a" << 101), BSON("a" << 200));
+ const auto shard0Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(10, 1, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 1),
+ BSON("a" << 10));
+ const auto shard0Chunk1 =
+ generateChunkType(kNss,
+ ChunkVersion(11, 2, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 11),
+ BSON("a" << 20));
+ const auto shard1Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(8, 1, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 21),
+ BSON("a" << 100));
+ const auto shard1Chunk1 =
+ generateChunkType(kNss,
+ ChunkVersion(12, 1, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 101),
+ BSON("a" << 200));
const auto collectionVersion = shard1Chunk1.getVersion();
- ChunkVersion targetChunkVersion(
- collectionVersion.majorVersion() + 1, 0, collectionVersion.epoch());
+ ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1,
+ 0,
+ collectionVersion.epoch(),
+ collectionVersion.getTimestamp());
setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard0Chunk1, shard1Chunk0, shard1Chunk1});
@@ -223,10 +265,18 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
SucceedsInThePresenceOfTransientTransactionErrors) {
const auto epoch = OID::gen();
- const auto shard0Chunk0 = generateChunkType(
- kNss, ChunkVersion(10, 1, epoch), kShard0.getName(), BSON("a" << 1), BSON("a" << 10));
- const auto shard1Chunk0 = generateChunkType(
- kNss, ChunkVersion(11, 2, epoch), kShard1.getName(), BSON("a" << 11), BSON("a" << 20));
+ const auto shard0Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(10, 1, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 1),
+ BSON("a" << 10));
+ const auto shard1Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(11, 2, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 11),
+ BSON("a" << 20));
const auto initialCollectionVersion = shard1Chunk0.getVersion();
setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0});
@@ -242,8 +292,10 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
}
});
- auto targetChunkVersion = ChunkVersion{
- initialCollectionVersion.majorVersion() + 1, 0, initialCollectionVersion.epoch()};
+ auto targetChunkVersion = ChunkVersion{initialCollectionVersion.majorVersion() + 1,
+ 0,
+ initialCollectionVersion.epoch(),
+ initialCollectionVersion.getTimestamp()};
ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged(
shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion));
@@ -269,8 +321,10 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
}
});
- targetChunkVersion = ChunkVersion{
- initialCollectionVersion.majorVersion() + 2, 0, initialCollectionVersion.epoch()};
+ targetChunkVersion = ChunkVersion{initialCollectionVersion.majorVersion() + 2,
+ 0,
+ initialCollectionVersion.epoch(),
+ initialCollectionVersion.getTimestamp()};
ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged(
shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion));
@@ -284,10 +338,18 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest,
StopsRetryingOnPermanentServerErrors) {
const auto epoch = OID::gen();
- const auto shard0Chunk0 = generateChunkType(
- kNss, ChunkVersion(10, 1, epoch), kShard0.getName(), BSON("a" << 1), BSON("a" << 10));
- const auto shard1Chunk0 = generateChunkType(
- kNss, ChunkVersion(11, 2, epoch), kShard1.getName(), BSON("a" << 11), BSON("a" << 20));
+ const auto shard0Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(10, 1, epoch, boost::none /* timestamp */),
+ kShard0.getName(),
+ BSON("a" << 1),
+ BSON("a" << 10));
+ const auto shard1Chunk0 =
+ generateChunkType(kNss,
+ ChunkVersion(11, 2, epoch, boost::none /* timestamp */),
+ kShard1.getName(),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 0982c6fe76e..bd428f52954 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -77,7 +77,7 @@ protected:
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setNS(_namespace);
- chunk.setVersion({12, 7, _epoch});
+ chunk.setVersion({12, 7, _epoch, boost::none /* timestamp */});
chunk.setShard(shard.getName());
chunk.setMin(jumboChunk().getMin());
chunk.setMax(jumboChunk().getMax());
@@ -86,7 +86,7 @@ protected:
ChunkType otherChunk;
otherChunk.setName(OID::gen());
otherChunk.setNS(_namespace);
- otherChunk.setVersion({14, 7, _epoch});
+ otherChunk.setVersion({14, 7, _epoch, boost::none /* timestamp */});
otherChunk.setShard(shard.getName());
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
@@ -105,7 +105,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), jumboChunk().getMin()));
ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(15, 0, epoch()), chunkDoc.getVersion());
+ ASSERT_EQ(ChunkVersion(15, 0, epoch(), boost::none /* timestamp */), chunkDoc.getVersion());
}
TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
@@ -114,7 +114,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), nonJumboChunk().getMin()));
ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(14, 7, epoch()), chunkDoc.getVersion());
+ ASSERT_EQ(ChunkVersion(14, 7, epoch(), boost::none /* timestamp */), chunkDoc.getVersion());
}
TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 754deb1bf86..79a0cd0414d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -62,7 +62,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
ChunkType migratedChunk, controlChunk;
{
- ChunkVersion origVersion(12, 7, OID::gen());
+ ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */);
migratedChunk.setName(OID::gen());
migratedChunk.setNS(kNamespace);
@@ -100,7 +100,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
auto mver = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
ASSERT_EQ(ChunkVersion(migratedChunk.getVersion().majorVersion() + 1,
1,
- migratedChunk.getVersion().epoch()),
+ migratedChunk.getVersion().epoch(),
+ migratedChunk.getVersion().getTimestamp()),
mver);
// Verify that a collection version is returned
@@ -140,7 +141,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
setupShards({shard0, shard1});
int origMajorVersion = 15;
- auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -174,7 +176,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
BSONObj versions = resultBSON.getValue();
auto mver = ChunkVersion::parseWithField(versions, "shardVersion");
ASSERT_OK(mver.getStatus());
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch()), mver.getValue());
+ ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue());
// Verify the chunk ended up in the right shard.
auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin));
@@ -197,7 +199,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
setupShards({shard0, shard1});
int origMajorVersion = 15;
- auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -232,7 +235,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
BSONObj versions = resultBSON.getValue();
auto mver = ChunkVersion::parseWithField(versions, "shardVersion");
ASSERT_OK(mver.getStatus());
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch()), mver.getValue());
+ ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue());
// Verify the chunk ended up in the right shard.
auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin));
@@ -256,7 +259,8 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
setupShards({shard0, shard1});
int origMajorVersion = 15;
- auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -300,7 +304,8 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
setupShards({shard0, shard1});
int origMajorVersion = 12;
- auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -352,8 +357,10 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
setupShards({shard0, shard1});
int origMajorVersion = 12;
- auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
- auto const otherVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
+ auto const otherVersion =
+ ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -406,7 +413,8 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
setupShards({shard0, shard1});
int origMajorVersion = 12;
- auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -458,7 +466,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
setupShards({shard0, shard1});
int origMajorVersion = 12;
- auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
+ auto const origVersion =
+ ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -504,7 +513,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
BSONObj versions = resultBSON.getValue();
auto mver = ChunkVersion::parseWithField(versions, "shardVersion");
ASSERT_OK(mver.getStatus());
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch()), mver.getValue());
+ ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue());
// Verify the chunks ended up in the right shards.
auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin));
@@ -534,7 +543,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen());
+ ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */);
// Create migrate chunk with no chunk version set.
ChunkType migratedChunk;
@@ -581,7 +590,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
setupShards({shard0, shard1});
auto epoch = OID::gen();
- ChunkVersion origVersion(12, 7, epoch);
+ ChunkVersion origVersion(12, 7, epoch, boost::none /* timestamp */);
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
@@ -592,7 +601,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(14, 7, epoch);
+ ChunkVersion currentChunkVersion(14, 7, epoch, boost::none /* timestamp */);
ChunkType currentChunk;
currentChunk.setName(OID::gen());
@@ -630,7 +639,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen());
+ ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */);
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
@@ -641,7 +650,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(12, 7, OID::gen());
+ ChunkVersion currentChunkVersion(12, 7, OID::gen(), boost::none /* timestamp */);
ChunkType currentChunk;
currentChunk.setName(OID::gen());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index f2f308811d3..5795eb04d72 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -81,11 +81,12 @@ void assertChunkVersionWasBumpedTo(const ChunkType& chunkTypeBefore,
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess) {
- const auto requestedChunkType = generateChunkType(kNss,
- ChunkVersion(10, 2, OID::gen()),
- ShardId("shard0000"),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
@@ -95,15 +96,16 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess)
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundReturnsSuccess) {
- const auto requestedChunkType = generateChunkType(kNss,
- ChunkVersion(10, 2, OID::gen()),
- ShardId("shard0000"),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Epoch is different.
- existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen()));
+ existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */));
setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
@@ -117,11 +119,12 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundRetur
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) {
- const auto requestedChunkType = generateChunkType(kNss,
- ChunkVersion(10, 2, OID::gen()),
- ShardId("shard0000"),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Min key is different.
@@ -139,11 +142,12 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundReturnsSuccess) {
- const auto requestedChunkType = generateChunkType(kNss,
- ChunkVersion(10, 2, OID::gen()),
- ShardId("shard0000"),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Max key is different.
@@ -163,12 +167,20 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
TEST_F(EnsureChunkVersionIsGreaterThanTest,
IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) {
const auto epoch = OID::gen();
- const auto requestedChunkType = generateChunkType(
- kNss, ChunkVersion(10, 2, epoch), ShardId("shard0000"), BSON("a" << 1), BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, epoch, boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(
- kNss, ChunkVersion(20, 3, epoch), ShardId("shard0001"), BSON("a" << 11), BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(20, 3, epoch, boost::none /* timestamp */),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -177,21 +189,27 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
requestedChunkType.getMax(),
requestedChunkType.getVersion());
- assertChunkVersionWasBumpedTo(
- existingChunkType,
- getChunkDoc(operationContext(), existingChunkType.getMin()),
- ChunkVersion(highestChunkType.getVersion().majorVersion() + 1, 0, epoch));
+ assertChunkVersionWasBumpedTo(existingChunkType,
+ getChunkDoc(operationContext(), existingChunkType.getMin()),
+ ChunkVersion(highestChunkType.getVersion().majorVersion() + 1,
+ 0,
+ epoch,
+ boost::none /* timestamp */));
}
TEST_F(
EnsureChunkVersionIsGreaterThanTest,
IfChunkMatchingRequestedChunkFoundAndHasHigherChunkVersionReturnsSuccessWithoutBumpingChunkVersion) {
const auto epoch = OID::gen();
- const auto requestedChunkType = generateChunkType(
- kNss, ChunkVersion(10, 2, epoch), ShardId("shard0000"), BSON("a" << 1), BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(kNss,
+ ChunkVersion(10, 2, epoch, boost::none /* timestamp */),
+ ShardId("shard0000"),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, epoch));
+ existingChunkType.setVersion(ChunkVersion(11, 1, epoch, boost::none /* timestamp */));
setupCollection(kNss, kKeyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 269bb6e6253..50f44d531b7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -51,7 +51,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -90,8 +90,10 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 1, origVersion.epoch());
+ auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
+ origVersion.minorVersion() + 1,
+ origVersion.epoch(),
+ origVersion.getTimestamp());
ASSERT_EQ(expectedShardVersion, shardVersion);
auto findResponse = uassertStatusOK(
@@ -126,7 +128,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -203,7 +205,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setNS(kNamespace);
auto collEpoch = OID::gen();
- auto origVersion = ChunkVersion(1, 2, collEpoch);
+ auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -225,7 +227,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
// Set up other chunk with competing version
- auto competingVersion = ChunkVersion(2, 1, collEpoch);
+ auto competingVersion = ChunkVersion(2, 1, collEpoch, boost::none /* timestamp */);
otherChunk.setVersion(competingVersion);
otherChunk.setShard(ShardId("shard0000"));
otherChunk.setMin(BSON("a" << 10));
@@ -278,7 +280,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 2, OID::gen());
+ auto origVersion = ChunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -352,7 +354,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -389,7 +391,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -427,7 +429,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -492,7 +494,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
{
std::vector<ChunkType> originalChunks;
- ChunkVersion version = ChunkVersion(1, 0, epoch);
+ ChunkVersion version = ChunkVersion(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk;
chunk.setName(OID::gen());
@@ -535,7 +537,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk1.setName(OID::gen());
chunk1.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk1.setVersion(origVersion);
chunk1.setShard(ShardId("shard0000"));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index f8f73c0c7c7..ac1de7140e0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -201,15 +201,15 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
auto epoch = OID::gen();
ChunkType chunk1(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch),
+ ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
shard1.getName());
ChunkType chunk2(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch),
+ ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
shard1.getName());
ChunkType chunk3(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch),
+ ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
shard1.getName());
chunk3.setJumbo(true);
@@ -286,15 +286,15 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
auto epoch = OID::gen();
ChunkType chunk1(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch),
+ ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
shard1.getName());
ChunkType chunk2(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch),
+ ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
shard1.getName());
ChunkType chunk3(NamespaceString("testDB.testColl"),
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch),
+ ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
shard1.getName());
std::vector<ChunkType> chunks{chunk1, chunk2, chunk3};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 24281e68389..dfe32cca0ac 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -49,7 +49,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -79,8 +79,10 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 2, origVersion.epoch());
+ auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
+ origVersion.minorVersion() + 2,
+ origVersion.epoch(),
+ origVersion.getTimestamp());
ASSERT_EQ(expectedShardVersion, shardVersion);
ASSERT_EQ(shardVersion, collVersion);
@@ -121,7 +123,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -202,7 +204,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
auto collEpoch = OID::gen();
// set up first chunk
- auto origVersion = ChunkVersion(1, 2, collEpoch);
+ auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -216,7 +218,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
splitPoints.push_back(chunkSplitPoint);
// set up second chunk (chunk2)
- auto competingVersion = ChunkVersion(2, 1, collEpoch);
+ auto competingVersion = ChunkVersion(2, 1, collEpoch, boost::none /* timestamp */);
chunk2.setVersion(competingVersion);
chunk2.setShard(ShardId("shard0000"));
chunk2.setMin(BSON("a" << 10));
@@ -260,7 +262,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -289,7 +291,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -316,7 +318,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -344,7 +346,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -371,7 +373,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -399,7 +401,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
chunk.setName(OID::gen());
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
@@ -426,7 +428,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
ChunkType chunk;
chunk.setNS(kNamespace);
- auto origVersion = ChunkVersion(1, 0, OID::gen());
+ auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index a2391f2631f..106172047ab 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -85,9 +85,11 @@ protected:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
- {ChunkType{kNss, range, ChunkVersion(1, 0, epoch), kOtherShard}});
+ {ChunkType{
+ kNss, range, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen()),
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 40d63f60f3d..80380de60f8 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -154,11 +154,12 @@ protected:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("dummyShardId")}});
AutoGetDb autoDb(operationContext(), kNss.db(), MODE_IX);
@@ -187,7 +188,7 @@ protected:
MoveChunkRequest::appendAsCommand(
&cmdBuilder,
kNss,
- ChunkVersion(1, 0, OID::gen()),
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */),
kConfigConnStr,
kDonorConnStr.getSetName(),
kRecipientConnStr.getSetName(),
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 6f329a120e4..35838eb6953 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -547,7 +547,7 @@ TEST_F(SubmitRangeDeletionTaskTest, SucceedsIfFilteringMetadataUUIDMatchesTaskUU
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch)));
+ makeChangedChunks(ChunkVersion(1, 0, kEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({coll});
forceShardFilteringMetadataRefresh(opCtx, kNss);
@@ -575,7 +575,7 @@ TEST_F(
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch)));
+ makeChangedChunks(ChunkVersion(1, 0, kEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({coll});
// The task should have been submitted successfully.
@@ -607,7 +607,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(10, 0, kEpoch)));
+ makeChangedChunks(ChunkVersion(10, 0, kEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({matchingColl});
// The task should have been submitted successfully.
@@ -627,7 +627,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(staleColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, staleEpoch)));
+ makeChangedChunks(ChunkVersion(1, 0, staleEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({staleColl});
forceShardFilteringMetadataRefresh(opCtx, kNss);
@@ -644,7 +644,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(10, 0, kEpoch)));
+ makeChangedChunks(ChunkVersion(10, 0, kEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({matchingColl});
// The task should have been submitted successfully.
@@ -670,7 +670,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(otherColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, otherEpoch)));
+ makeChangedChunks(ChunkVersion(1, 0, otherEpoch, boost::none /* timestamp */)));
_mockCatalogClient->setCollections({otherColl});
// The task should not have been submitted, and the task's entry should have been removed from
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index bd4d1c03d7e..2e88a2de050 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -64,14 +64,17 @@ protected:
static CollectionMetadata makeAMetadata(BSONObj const& keyPattern) {
const OID epoch = OID::gen();
auto range = ChunkRange(BSON("key" << MINKEY), BSON("key" << MAXKEY));
- auto chunk =
- ChunkType(kTestNss, std::move(range), ChunkVersion(1, 0, epoch), ShardId("other"));
+ auto chunk = ChunkType(kTestNss,
+ std::move(range),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ShardId("other"));
auto rt = RoutingTableHistory::makeNew(kTestNss,
UUID::gen(),
KeyPattern(keyPattern),
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{std::move(chunk)});
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index a3d2cdc568e..12b5e11546d 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -96,11 +96,12 @@ public:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{ChunkType{kNss,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("dummyShardId")}});
AutoGetDb autoDb(operationContext(), kNss.db(), MODE_IX);
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 0b8eda09f7d..7484e570aac 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -157,7 +157,7 @@ protected:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(nss, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
ChunkType chunk2(nss, chunkRanges[1], version, ShardId("shard0001"));
@@ -201,7 +201,10 @@ protected:
client.insert(CollectionType::ConfigNS.ns(), originalNssCatalogEntry.toBSON());
auto tempNssCatalogEntry = resharding::createTempReshardingCollectionType(
- opCtx, coordinatorDoc, ChunkVersion(1, 1, OID::gen()), BSONObj());
+ opCtx,
+ coordinatorDoc,
+ ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */),
+ BSONObj());
client.insert(CollectionType::ConfigNS.ns(), tempNssCatalogEntry.toBSON());
return coordinatorDoc;
@@ -440,7 +443,10 @@ protected:
if (expectedCoordinatorDoc.getState() < CoordinatorStateEnum::kCommitted ||
expectedCoordinatorDoc.getState() == CoordinatorStateEnum::kError) {
tempCollType = resharding::createTempReshardingCollectionType(
- opCtx, expectedCoordinatorDoc, ChunkVersion(1, 1, OID::gen()), BSONObj());
+ opCtx,
+ expectedCoordinatorDoc,
+ ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */),
+ BSONObj());
}
readTemporaryCollectionCatalogEntryAndAssertReshardingFieldsMatchExpected(opCtx,
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index fdb30128ea3..3894b722254 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -94,20 +94,24 @@ protected:
const UUID& uuid,
const OID& epoch) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
- auto chunk = ChunkType(nss, std::move(range), ChunkVersion(1, 0, epoch), kShardTwo);
- ChunkManager cm(
- kShardOne,
- DatabaseVersion(uuid),
- makeStandaloneRoutingTableHistory(RoutingTableHistory::makeNew(nss,
- uuid,
- shardKeyPattern,
- nullptr,
- false,
- epoch,
- boost::none,
- true,
- {std::move(chunk)})),
- boost::none);
+ auto chunk = ChunkType(nss,
+ std::move(range),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ kShardTwo);
+ ChunkManager cm(kShardOne,
+ DatabaseVersion(uuid),
+ makeStandaloneRoutingTableHistory(
+ RoutingTableHistory::makeNew(nss,
+ uuid,
+ shardKeyPattern,
+ nullptr,
+ false,
+ epoch,
+ boost::none /* timestamp */,
+ boost::none,
+ true,
+ {std::move(chunk)})),
+ boost::none);
if (!OperationShardingState::isOperationVersioned(opCtx)) {
const auto version = cm.getVersion(kShardOne);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 579ba2cfdd5..341e29343f5 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -148,11 +148,11 @@ public:
std::vector<ChunkType> chunks = {
ChunkType{kCrudNs,
ChunkRange{BSON(kOriginalShardKey << MINKEY), BSON(kOriginalShardKey << 0)},
- ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
kOtherShardId},
ChunkType{kCrudNs,
ChunkRange{BSON(kOriginalShardKey << 0), BSON(kOriginalShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
_sourceId.getShardId()}};
auto rt = RoutingTableHistory::makeNew(kCrudNs,
@@ -161,6 +161,7 @@ public:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
false,
chunks);
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index b85dd36f1d2..beabdd98639 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -154,7 +154,7 @@ public:
return std::vector<BSONObj>{coll.toBSON()};
}());
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk(tempNss,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index ab9a81c876f..78108531fbb 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -157,10 +157,16 @@ public:
protected:
std::vector<ChunkType> createChunks(const OID& epoch, const std::string& shardKey) {
auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5));
- ChunkType chunk1(kNss, range1, ChunkVersion(1, 0, epoch), kShardList[0].getName());
+ ChunkType chunk1(kNss,
+ range1,
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ kShardList[0].getName());
auto range2 = ChunkRange(BSON(shardKey << 5), BSON(shardKey << MAXKEY));
- ChunkType chunk2(kNss, range2, ChunkVersion(1, 0, epoch), kShardList[1].getName());
+ ChunkType chunk2(kNss,
+ range2,
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ kShardList[1].getName());
return {chunk1, chunk2};
}
@@ -182,7 +188,7 @@ protected:
ReshardingEnv env(CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, kNss).value());
env.destShard = kShardList[1].getName();
- env.version = ChunkVersion(1, 0, OID::gen());
+ env.version = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
env.dbVersion = DatabaseVersion(UUID::gen());
env.tempNss =
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index f3512d077bf..d20ba3be849 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -102,8 +102,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
<< ChunkType::max(maxs[i]) << ChunkType::shard(kShardId.toString())
<< ChunkType::lastmod(Date_t::fromMillisSinceEpoch(maxCollVersion.toLong())));
- chunks.push_back(
- assertGet(ChunkType::fromShardBSON(shardChunk, maxCollVersion.epoch())));
+ chunks.push_back(assertGet(ChunkType::fromShardBSON(
+ shardChunk, maxCollVersion.epoch(), maxCollVersion.getTimestamp())));
}
return chunks;
@@ -148,8 +148,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
ASSERT(cursor->more());
BSONObj queryResult = cursor->nextSafe();
- ChunkType foundChunk =
- assertGet(ChunkType::fromShardBSON(queryResult, chunk.getVersion().epoch()));
+ ChunkType foundChunk = assertGet(ChunkType::fromShardBSON(
+ queryResult, chunk.getVersion().epoch(), chunk.getVersion().getTimestamp()));
ASSERT_BSONOBJ_EQ(chunk.getMin(), foundChunk.getMin());
ASSERT_BSONOBJ_EQ(chunk.getMax(), foundChunk.getMax());
ASSERT_EQUALS(chunk.getShard(), foundChunk.getShard());
@@ -160,7 +160,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
}
}
- ChunkVersion maxCollVersion{0, 0, OID::gen()};
+ ChunkVersion maxCollVersion{0, 0, OID::gen(), boost::none /* timestamp */};
const KeyPattern keyPattern{BSON("a" << 1)};
const BSONObj defaultCollation{BSON("locale"
<< "fr_CA")};
@@ -214,7 +214,8 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
ASSERT_EQUALS(state.epoch, maxCollVersion.epoch());
ASSERT_EQUALS(state.refreshing, true);
- ASSERT_EQUALS(state.lastRefreshedCollectionVersion, ChunkVersion(0, 0, maxCollVersion.epoch()));
+ ASSERT_EQUALS(state.lastRefreshedCollectionVersion,
+ ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
// Signal refresh finish
ASSERT_OK(unsetPersistedRefreshFlags(operationContext(), kNss, maxCollVersion));
@@ -232,14 +233,15 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
checkChunks(kChunkMetadataNss, chunks);
// read all the chunks
- QueryAndSort allChunkDiff =
- createShardChunkDiffQuery(ChunkVersion(0, 0, maxCollVersion.epoch()));
+ QueryAndSort allChunkDiff = createShardChunkDiffQuery(
+ ChunkVersion(0, 0, maxCollVersion.epoch(), boost::none /* timestamp */));
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
allChunkDiff.query,
allChunkDiff.sort,
boost::none,
- maxCollVersion.epoch()));
+ maxCollVersion.epoch(),
+ maxCollVersion.getTimestamp()));
for (auto chunkIt = chunks.begin(), readChunkIt = readChunks.begin();
chunkIt != chunks.end() && readChunkIt != readChunks.end();
++chunkIt, ++readChunkIt) {
@@ -253,7 +255,8 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
oneChunkDiff.query,
oneChunkDiff.sort,
boost::none,
- maxCollVersion.epoch()));
+ maxCollVersion.epoch(),
+ maxCollVersion.getTimestamp()));
ASSERT(readChunks.size() == 1);
ASSERT_BSONOBJ_EQ(chunks.back().toShardBSON(), readChunks.front().toShardBSON());
@@ -283,8 +286,8 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
}
splitChunkOneBuilder.append(ChunkType::shard(), lastChunk.getShard().toString());
collVersion.appendLegacyWithField(&splitChunkOneBuilder, ChunkType::lastmod());
- ChunkType splitChunkOne =
- assertGet(ChunkType::fromShardBSON(splitChunkOneBuilder.obj(), collVersion.epoch()));
+ ChunkType splitChunkOne = assertGet(ChunkType::fromShardBSON(
+ splitChunkOneBuilder.obj(), collVersion.epoch(), collVersion.getTimestamp()));
newChunks.push_back(splitChunkOne);
collVersion.incMajor(); // chunk split and moved
@@ -297,8 +300,8 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
splitChunkTwoMovedBuilder.append(ChunkType::max(), lastChunk.getMax());
splitChunkTwoMovedBuilder.append(ChunkType::shard(), "altShard");
collVersion.appendLegacyWithField(&splitChunkTwoMovedBuilder, ChunkType::lastmod());
- ChunkType splitChunkTwoMoved =
- assertGet(ChunkType::fromShardBSON(splitChunkTwoMovedBuilder.obj(), collVersion.epoch()));
+ ChunkType splitChunkTwoMoved = assertGet(ChunkType::fromShardBSON(
+ splitChunkTwoMovedBuilder.obj(), collVersion.epoch(), collVersion.getTimestamp()));
newChunks.push_back(splitChunkTwoMoved);
collVersion.incMinor(); // bump control chunk version
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 444093c8078..fbe67ed204f 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -193,7 +193,7 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
}
vector<ChunkType> ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
- ChunkVersion collectionVersion(1, 0, OID::gen());
+ ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -360,7 +360,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
// Then refresh again and find that the collection has been dropped and recreated.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen());
+ ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), boost::none /* timestamp */);
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
@@ -389,7 +389,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
CollectionType originalCollectionType = makeCollectionType(chunks.back().getVersion());
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen());
+ ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), boost::none /* timestamp */);
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
vector<ChunkType> mixedChunks;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 199abd55751..13227fcf77c 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -366,7 +366,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
chunkA.setNS(kNamespace);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
- chunkA.setVersion({1, 2, oid});
+ chunkA.setVersion({1, 2, oid, boost::none /* timestamp */});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
@@ -374,10 +374,10 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
chunkB.setNS(kNamespace);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
- chunkB.setVersion({3, 4, oid});
+ chunkB.setVersion({3, 4, oid, boost::none /* timestamp */});
chunkB.setShard(ShardId("shard0001"));
- ChunkVersion queryChunkVersion({1, 2, oid});
+ ChunkVersion queryChunkVersion({1, 2, oid, boost::none /* timestamp */});
const BSONObj chunksQuery(
BSON(ChunkType::ns("TestDB.TestColl")
@@ -442,7 +442,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChunkVersion queryChunkVersion({1, 2, OID::gen()});
+ ChunkVersion queryChunkVersion({1, 2, OID::gen(), boost::none /* timestamp */});
const BSONObj chunksQuery(
BSON(ChunkType::ns("TestDB.TestColl")
@@ -487,7 +487,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChunkVersion queryChunkVersion({1, 2, OID::gen()});
+ ChunkVersion queryChunkVersion({1, 2, OID::gen(), boost::none /* timestamp */});
const BSONObj chunksQuery(
BSON(ChunkType::ns("TestDB.TestColl")
@@ -511,14 +511,14 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
chunkA.setNS(kNamespace);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
- chunkA.setVersion({1, 2, OID::gen()});
+ chunkA.setVersion({1, 2, OID::gen(), boost::none /* timestamp */});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
chunkB.setNS(kNamespace);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
- chunkB.setVersion({3, 4, OID::gen()});
+ chunkB.setVersion({3, 4, OID::gen(), boost::none /* timestamp */});
// Missing shard id
return vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()};
@@ -1142,7 +1142,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
<< BSON("precondition2"
<< "second precondition"));
const NamespaceString nss("config.chunks");
- ChunkVersion lastChunkVersion(0, 0, OID());
+ ChunkVersion lastChunkVersion(0, 0, OID(), boost::none /* timestamp */);
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
auto status =
@@ -1186,7 +1186,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
<< BSON("precondition2"
<< "second precondition"));
const NamespaceString nss("config.chunks");
- ChunkVersion lastChunkVersion(0, 0, OID());
+ ChunkVersion lastChunkVersion(0, 0, OID(), boost::none /* timestamp */);
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
auto status =
@@ -1213,7 +1213,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
chunk.setNS(kNamespace);
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
- chunk.setVersion({1, 2, OID::gen()});
+ chunk.setVersion({1, 2, OID::gen(), boost::none /* timestamp */});
chunk.setShard(ShardId("shard0000"));
return vector<BSONObj>{chunk.toConfigBSON()};
});
@@ -1234,7 +1234,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedFailedWithCheck) {
<< BSON("precondition2"
<< "second precondition"));
const NamespaceString nss("config.chunks");
- ChunkVersion lastChunkVersion(0, 0, OID());
+ ChunkVersion lastChunkVersion(0, 0, OID(), boost::none /* timestamp */);
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
auto status =
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 42f7b43264d..090e21b50f5 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -47,7 +47,7 @@ const BSONObj kMax = BSON("a" << 20);
const ShardId kShard("shard0000");
TEST(ChunkType, MissingConfigRequiredFields) {
- ChunkVersion chunkVersion(1, 2, OID::gen());
+ ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -82,42 +82,44 @@ TEST(ChunkType, MissingConfigRequiredFields) {
TEST(ChunkType, MissingShardRequiredFields) {
const OID epoch = OID::gen();
- ChunkVersion chunkVersion(1, 2, epoch);
+ ChunkVersion chunkVersion(1, 2, epoch, boost::none /* timestamp */);
const auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj objModMin =
BSON(ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
- StatusWith<ChunkType> chunkRes = ChunkType::fromShardBSON(objModMin, epoch);
+ StatusWith<ChunkType> chunkRes =
+ ChunkType::fromShardBSON(objModMin, epoch, boost::none /* timestamp */);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
BSONObj objModMax = BSON(ChunkType::minShardID(kMin)
<< ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
- chunkRes = ChunkType::fromShardBSON(objModMax, epoch);
+ chunkRes = ChunkType::fromShardBSON(objModMax, epoch, boost::none /* timestamp */);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
BSONObj objModShard =
BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax) << "lastmod" << lastmod);
- chunkRes = ChunkType::fromShardBSON(objModShard, epoch);
+ chunkRes = ChunkType::fromShardBSON(objModShard, epoch, boost::none /* timestamp */);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
BSONObj objModLastmod = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
- chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch);
+ chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch, boost::none /* timestamp */);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
}
TEST(ChunkType, ToFromShardBSON) {
const OID epoch = OID::gen();
- ChunkVersion chunkVersion(1, 2, epoch);
+ ChunkVersion chunkVersion(1, 2, epoch, boost::none /* timestamp */);
auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj obj = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
<< lastmod);
- ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch));
+ ChunkType shardChunk =
+ assertGet(ChunkType::fromShardBSON(obj, epoch, boost::none /* timestamp */));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -128,7 +130,7 @@ TEST(ChunkType, ToFromShardBSON) {
}
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
- ChunkVersion chunkVersion(1, 2, OID::gen());
+ ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
@@ -140,7 +142,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
}
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
- ChunkVersion chunkVersion(1, 2, OID::gen());
+ ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
@@ -152,7 +154,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
}
TEST(ChunkType, MinToMaxNotAscending) {
- ChunkVersion chunkVersion(1, 2, OID::gen());
+ ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
@@ -164,7 +166,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
TEST(ChunkType, ToFromConfigBSON) {
const auto chunkID = OID::gen();
- ChunkVersion chunkVersion(1, 2, OID::gen());
+ ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
BSONObj obj =
BSON(ChunkType::name(chunkID)
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index 3ba23ed1085..fed17821f42 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -103,7 +103,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoad) {
expectGetCollectionWithReshardingFields(epoch, shardKeyPattern, reshardingUUID);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(kNss,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
@@ -300,11 +300,12 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
// Return no chunks three times, which is how frequently the catalog cache retries
expectGetCollection(epoch, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&] {
- return std::vector<BSONObj>{ChunkType(kNss,
- {shardKeyPattern.getKeyPattern().globalMin(),
- BSON("_id" << 0)},
- ChunkVersion(1, 0, epoch),
- {"0"})
+ return std::vector<BSONObj>{ChunkType(
+ kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(),
+ BSON("_id" << 0)},
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ {"0"})
.toConfigBSON(),
BSON("BadValue"
<< "This value should not be in a chunk config document")};
@@ -328,7 +329,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithLowestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -383,7 +384,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithHighestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -434,13 +435,14 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithLowestVersion) {
auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
const OID epoch = initialRoutingInfo.getVersion().epoch();
+ const auto timestamp = initialRoutingInfo.getVersion().getTimestamp();
ASSERT_EQ(1, initialRoutingInfo.numChunks());
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, timestamp);
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -491,13 +493,14 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithHighestVersion) {
auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
const OID epoch = initialRoutingInfo.getVersion().epoch();
+ const auto timestamp = initialRoutingInfo.getVersion().getTimestamp();
ASSERT_EQ(1, initialRoutingInfo.numChunks());
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, timestamp);
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -560,7 +563,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoad) {
ChunkType chunk2(kNss,
{BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()},
- ChunkVersion(1, 0, OID::gen()),
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */),
{"1"});
chunk2.setName(OID::gen());
@@ -624,7 +627,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
// recreated collection.
ChunkType chunk3(kNss,
{BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
- ChunkVersion(5, 2, newEpoch),
+ ChunkVersion(5, 2, newEpoch, boost::none /* timestamp */),
{"1"});
chunk3.setName(OID::gen());
@@ -634,7 +637,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
// On the second retry attempt, return the correct set of chunks from the recreated collection
expectGetCollection(newEpoch, shardKeyPattern);
- ChunkVersion newVersion(5, 0, newEpoch);
+ ChunkVersion newVersion(5, 0, newEpoch, boost::none /* timestamp */);
onFindCommand([&](const RemoteCommandRequest& request) {
// Ensure it is a differential query but starting from version zero (to fetch all the
// chunks) since the incremental refresh above produced a different version
@@ -668,8 +671,10 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
ASSERT(cm.isSharded());
ASSERT_EQ(3, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(5, 1, newVersion.epoch()), cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(5, 2, newVersion.epoch()), cm.getVersion({"1"}));
+ ASSERT_EQ(ChunkVersion(5, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ cm.getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(5, 2, newVersion.epoch(), newVersion.getTimestamp()),
+ cm.getVersion({"1"}));
}
TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
@@ -682,7 +687,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
- ChunkVersion newVersion(1, 0, OID::gen());
+ ChunkVersion newVersion(1, 0, OID::gen(), boost::none /* timestamp */);
// Return collection with a different epoch
expectGetCollection(newVersion.epoch(), shardKeyPattern);
@@ -715,8 +720,10 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
ASSERT(cm.isSharded());
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(1, 0, newVersion.epoch()), cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(1, 1, newVersion.epoch()), cm.getVersion({"1"}));
+ ASSERT_EQ(ChunkVersion(1, 0, newVersion.epoch(), newVersion.getTimestamp()),
+ cm.getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(1, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ cm.getVersion({"1"}));
}
TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
@@ -759,7 +766,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
ASSERT_EQ(version, cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), cm.getVersion({"1"}));
+ ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"1"}));
}
TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveWithReshardingFieldsAdded) {
@@ -843,7 +850,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveLastChunkWithReshardingF
ASSERT(cm.isSharded());
ASSERT_EQ(1, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), cm.getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"0"}));
ASSERT_EQ(version, cm.getVersion({"1"}));
ASSERT(boost::none == cm.getReshardingFields());
}
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index 73b6e65cafc..fe6811ff341 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -234,7 +234,7 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen());
+ const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadCollection(cachedCollVersion);
@@ -245,7 +245,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen());
+ const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadCollection(cachedCollVersion);
@@ -258,8 +258,9 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen());
- const auto wantedCollVersion = ChunkVersion(2, 0, cachedCollVersion.epoch());
+ const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ const auto wantedCollVersion =
+ ChunkVersion(2, 0, cachedCollVersion.epoch(), cachedCollVersion.getTimestamp());
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadCollection(cachedCollVersion);
@@ -271,7 +272,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
}
TEST_F(CatalogCacheTest, CheckEpochNoDatabase) {
- const auto collVersion = ChunkVersion(1, 0, OID::gen());
+ const auto collVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
ASSERT_THROWS_WITH_CHECK(_catalogCache->checkEpochOrThrow(kNss, collVersion, kShards[0]),
StaleConfigException,
[&](const StaleConfigException& ex) {
@@ -286,7 +287,7 @@ TEST_F(CatalogCacheTest, CheckEpochNoDatabase) {
TEST_F(CatalogCacheTest, CheckEpochNoCollection) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto collVersion = ChunkVersion(1, 0, OID::gen());
+ const auto collVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
ASSERT_THROWS_WITH_CHECK(_catalogCache->checkEpochOrThrow(kNss, collVersion, kShards[0]),
@@ -303,7 +304,7 @@ TEST_F(CatalogCacheTest, CheckEpochNoCollection) {
TEST_F(CatalogCacheTest, CheckEpochUnshardedCollection) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto collVersion = ChunkVersion(1, 0, OID::gen());
+ const auto collVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadUnshardedCollection(kNss);
@@ -321,8 +322,8 @@ TEST_F(CatalogCacheTest, CheckEpochUnshardedCollection) {
TEST_F(CatalogCacheTest, CheckEpochWithMismatch) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto wantedCollVersion = ChunkVersion(1, 0, OID::gen());
- const auto receivedCollVersion = ChunkVersion(1, 0, OID::gen());
+ const auto wantedCollVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ const auto receivedCollVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadCollection(wantedCollVersion);
@@ -343,7 +344,7 @@ TEST_F(CatalogCacheTest, CheckEpochWithMismatch) {
TEST_F(CatalogCacheTest, CheckEpochWithMatch) {
const auto dbVersion = DatabaseVersion(UUID::gen());
- const auto collVersion = ChunkVersion(1, 0, OID::gen());
+ const auto collVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
loadCollection(collVersion);
diff --git a/src/mongo/s/catalog_cache_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp
index 0f17e230134..0bb9c38ce9c 100644
--- a/src/mongo/s/catalog_cache_test_fixture.cpp
+++ b/src/mongo/s/catalog_cache_test_fixture.cpp
@@ -130,7 +130,7 @@ ChunkManager CatalogCacheTestFixture::makeChunkManager(
bool unique,
const std::vector<BSONObj>& splitPoints,
boost::optional<ReshardingFields> reshardingFields) {
- ChunkVersion version(1, 0, OID::gen());
+ ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
const BSONObj databaseBSON = [&]() {
DatabaseType db(nss.db().toString(), {"0"}, true, DatabaseVersion(UUID::gen()));
@@ -239,7 +239,7 @@ ChunkManager CatalogCacheTestFixture::loadRoutingTableWithTwoChunksAndTwoShardsI
}
expectGetCollection(nss, epoch, uuid, shardKeyPattern);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(
nss, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index f86119f95f4..901da0d311e 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -503,7 +503,7 @@ TEST_F(ChunkManagerQueryTest, SimpleCollationNumbersMultiShard) {
TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
const auto epoch = OID::gen();
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk0(kNss, {BSON("x" << MINKEY), BSON("x" << 0)}, version, ShardId("0"));
chunk0.setName(OID::gen());
@@ -518,6 +518,7 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{chunk0, chunk1});
diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp
index 28f36dd07fd..bb1d6220257 100644
--- a/src/mongo/s/chunk_manager_refresh_bm.cpp
+++ b/src/mongo/s/chunk_manager_refresh_bm.cpp
@@ -75,12 +75,20 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(kNss,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch},
+ ChunkVersion{i + 1, 0, collEpoch, boost::none /* timestamp */},
selectShard(i, nShards, nChunks));
}
- auto rt = RoutingTableHistory::makeNew(
- kNss, UUID::gen(), shardKeyPattern, nullptr, true, collEpoch, boost::none, true, chunks);
+ auto rt = RoutingTableHistory::makeNew(kNss,
+ UUID::gen(),
+ shardKeyPattern,
+ nullptr,
+ true,
+ collEpoch,
+ boost::none /* timestamp */,
+ boost::none,
+ true,
+ chunks);
return CollectionMetadata(ChunkManager(ShardId("Shard0"),
DatabaseVersion(UUID::gen()),
makeStandaloneRoutingTableHistory(std::move(rt)),
@@ -155,7 +163,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(kNss,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch},
+ ChunkVersion{i + 1, 0, collEpoch, boost::none /* timestamp */},
selectShard(i, nShards, nChunks));
}
@@ -166,6 +174,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
nullptr,
true,
collEpoch,
+ boost::none /* timestamp */,
boost::none,
true,
chunks);
diff --git a/src/mongo/s/chunk_map_test.cpp b/src/mongo/s/chunk_map_test.cpp
index 9bc1c70594a..dacfa6013f3 100644
--- a/src/mongo/s/chunk_map_test.cpp
+++ b/src/mongo/s/chunk_map_test.cpp
@@ -53,7 +53,7 @@ private:
TEST_F(ChunkMapTest, TestAddChunk) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto chunk = std::make_shared<ChunkInfo>(
ChunkType{kNss,
@@ -61,7 +61,7 @@ TEST_F(ChunkMapTest, TestAddChunk) {
version,
kThisShard});
- ChunkMap chunkMap{epoch};
+ ChunkMap chunkMap{epoch, boost::none /* timestamp */};
auto newChunkMap = chunkMap.createMerged({chunk});
ASSERT_EQ(newChunkMap.size(), 1);
@@ -69,8 +69,8 @@ TEST_F(ChunkMapTest, TestAddChunk) {
TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
const OID epoch = OID::gen();
- ChunkMap chunkMap{epoch};
- ChunkVersion version{1, 0, epoch};
+ ChunkMap chunkMap{epoch, boost::none /* timestamp */};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -104,8 +104,8 @@ TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
TEST_F(ChunkMapTest, TestIntersectingChunk) {
const OID epoch = OID::gen();
- ChunkMap chunkMap{epoch};
- ChunkVersion version{1, 0, epoch};
+ ChunkMap chunkMap{epoch, boost::none /* timestamp */};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -134,8 +134,8 @@ TEST_F(ChunkMapTest, TestIntersectingChunk) {
TEST_F(ChunkMapTest, TestEnumerateOverlappingChunks) {
const OID epoch = OID::gen();
- ChunkMap chunkMap{epoch};
- ChunkVersion version{1, 0, epoch};
+ ChunkMap chunkMap{epoch, boost::none /* timestamp */};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp
index 7a45a033fbd..9bea0bc3b50 100644
--- a/src/mongo/s/chunk_test.cpp
+++ b/src/mongo/s/chunk_test.cpp
@@ -47,7 +47,7 @@ const KeyPattern kShardKeyPattern(BSON("a" << 1));
TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
ChunkType chunkType(kNss,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -63,7 +63,7 @@ TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
ChunkType chunkType(kNss,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -80,7 +80,7 @@ TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
ChunkType chunkType(kNss,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -97,7 +97,7 @@ TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
ChunkType chunkType(kNss,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -112,7 +112,7 @@ TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_MoreThanOneEntry) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
ChunkType chunkType(kNss,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp
index 0e1fd283a48..fd26f42d037 100644
--- a/src/mongo/s/chunk_version_test.cpp
+++ b/src/mongo/s/chunk_version_test.cpp
@@ -40,7 +40,7 @@ namespace {
using unittest::assertGet;
TEST(ChunkVersionParsing, ToFromBSONRoundtrip) {
- ChunkVersion version(1, 2, OID::gen());
+ ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
const auto roundTripVersion = assertGet(ChunkVersion::parseWithField(
[&] {
BSONObjBuilder builder;
@@ -53,7 +53,7 @@ TEST(ChunkVersionParsing, ToFromBSONRoundtrip) {
}
TEST(ChunkVersionParsing, ToFromBSONLegacyRoundtrip) {
- ChunkVersion version(1, 2, OID::gen());
+ ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
const auto roundTripVersion = assertGet(ChunkVersion::parseLegacyWithField(
[&] {
BSONObjBuilder builder;
@@ -116,29 +116,42 @@ TEST(ChunkVersionParsing, FromBSONLegacyEpochIsOptional) {
TEST(ChunkVersionComparison, EqualityOperators) {
OID epoch = OID::gen();
- ASSERT_EQ(ChunkVersion(3, 1, epoch), ChunkVersion(3, 1, epoch));
- ASSERT_EQ(ChunkVersion(3, 1, OID()), ChunkVersion(3, 1, OID()));
-
- ASSERT_NE(ChunkVersion(3, 1, epoch), ChunkVersion(3, 1, OID()));
- ASSERT_NE(ChunkVersion(3, 1, OID()), ChunkVersion(3, 1, epoch));
- ASSERT_NE(ChunkVersion(4, 2, epoch), ChunkVersion(4, 1, epoch));
+ ASSERT_EQ(ChunkVersion(3, 1, epoch, boost::none /* timestamp */),
+ ChunkVersion(3, 1, epoch, boost::none /* timestamp */));
+ ASSERT_EQ(ChunkVersion(3, 1, OID(), boost::none /* timestamp */),
+ ChunkVersion(3, 1, OID(), boost::none /* timestamp */));
+
+ ASSERT_NE(ChunkVersion(3, 1, epoch, boost::none /* timestamp */),
+ ChunkVersion(3, 1, OID(), boost::none /* timestamp */));
+ ASSERT_NE(ChunkVersion(3, 1, OID(), boost::none /* timestamp */),
+ ChunkVersion(3, 1, epoch, boost::none /* timestamp */));
+ ASSERT_NE(ChunkVersion(4, 2, epoch, boost::none /* timestamp */),
+ ChunkVersion(4, 1, epoch, boost::none /* timestamp */));
}
TEST(ChunkVersionComparison, OlderThan) {
OID epoch = OID::gen();
- ASSERT(ChunkVersion(3, 1, epoch).isOlderThan(ChunkVersion(4, 1, epoch)));
- ASSERT(!ChunkVersion(4, 1, epoch).isOlderThan(ChunkVersion(3, 1, epoch)));
+ ASSERT(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(4, 1, epoch, boost::none /* timestamp */)));
+ ASSERT(!ChunkVersion(4, 1, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)));
- ASSERT(ChunkVersion(3, 1, epoch).isOlderThan(ChunkVersion(3, 2, epoch)));
- ASSERT(!ChunkVersion(3, 2, epoch).isOlderThan(ChunkVersion(3, 1, epoch)));
+ ASSERT(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(3, 2, epoch, boost::none /* timestamp */)));
+ ASSERT(!ChunkVersion(3, 2, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)));
- ASSERT(!ChunkVersion(3, 1, epoch).isOlderThan(ChunkVersion(4, 1, OID())));
- ASSERT(!ChunkVersion(4, 1, OID()).isOlderThan(ChunkVersion(3, 1, epoch)));
+ ASSERT(!ChunkVersion(3, 1, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(4, 1, OID(), boost::none /* timestamp */)));
+ ASSERT(!ChunkVersion(4, 1, OID(), boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)));
- ASSERT(ChunkVersion(3, 2, epoch).isOlderThan(ChunkVersion(4, 1, epoch)));
+ ASSERT(ChunkVersion(3, 2, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(4, 1, epoch, boost::none /* timestamp */)));
- ASSERT(!ChunkVersion(3, 1, epoch).isOlderThan(ChunkVersion(3, 1, epoch)));
+ ASSERT(!ChunkVersion(3, 1, epoch, boost::none /* timestamp */)
+ .isOlderThan(ChunkVersion(3, 1, epoch, boost::none /* timestamp */)));
}
TEST(ChunkVersionConstruction, CreateWithLargeValues) {
@@ -146,7 +159,7 @@ TEST(ChunkVersionConstruction, CreateWithLargeValues) {
const uint32_t majorVersion = 1 << 24;
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch);
+ ChunkVersion version(majorVersion, minorVersion, epoch, boost::none /* timestamp */);
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -157,7 +170,7 @@ TEST(ChunkVersionManipulation, ThrowsErrorIfOverflowIsAttemptedForMajorVersion)
const uint32_t majorVersion = std::numeric_limits<uint32_t>::max();
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch);
+ ChunkVersion version(majorVersion, minorVersion, epoch, boost::none /* timestamp */);
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -170,7 +183,7 @@ TEST(ChunkVersionManipulation, ThrowsErrorIfOverflowIsAttemptedForMinorVersion)
const uint32_t majorVersion = 0;
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch);
+ ChunkVersion version(majorVersion, minorVersion, epoch, boost::none /* timestamp */);
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
diff --git a/src/mongo/s/comparable_chunk_version_test.cpp b/src/mongo/s/comparable_chunk_version_test.cpp
index 93c2231d54f..73cb7bc7987 100644
--- a/src/mongo/s/comparable_chunk_version_test.cpp
+++ b/src/mongo/s/comparable_chunk_version_test.cpp
@@ -37,26 +37,26 @@ namespace {
TEST(ComparableChunkVersionTest, VersionsEqual) {
auto epoch = OID::gen();
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, epoch));
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, epoch));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */));
ASSERT(version1.getVersion() == version2.getVersion());
ASSERT(version1 == version2);
}
TEST(ComparableChunkVersionTest, VersionsEqualAfterCopy) {
- ChunkVersion chunkVersion(1, 0, OID::gen());
+ ChunkVersion chunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const auto version2 = version1;
ASSERT(version1 == version2);
}
TEST(ComparableChunkVersionTest, CompareVersionDifferentEpochs) {
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(2, 0, OID::gen()));
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, OID::gen()));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(2, 0, OID::gen(), boost::none /* timestamp */));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
ASSERT(version2 != version1);
ASSERT(version2 > version1);
ASSERT_FALSE(version2 < version1);
@@ -64,12 +64,12 @@ TEST(ComparableChunkVersionTest, CompareVersionDifferentEpochs) {
TEST(ComparableChunkVersionTest, VersionGreaterSameEpochs) {
const auto epoch = OID::gen();
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, epoch));
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 1, epoch));
- const auto version3 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(2, 0, epoch));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 1, epoch, boost::none /* timestamp */));
+ const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(2, 0, epoch, boost::none /* timestamp */));
ASSERT(version2 != version1);
ASSERT(version2 > version1);
ASSERT_FALSE(version2 < version1);
@@ -80,12 +80,12 @@ TEST(ComparableChunkVersionTest, VersionGreaterSameEpochs) {
TEST(ComparableChunkVersionTest, VersionLessSameEpoch) {
const auto epoch = OID::gen();
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, epoch));
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 1, epoch));
- const auto version3 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(2, 0, epoch));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 1, epoch, boost::none /* timestamp */));
+ const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(2, 0, epoch, boost::none /* timestamp */));
ASSERT(version1 != version2);
ASSERT(version1 < version2);
ASSERT_FALSE(version1 > version2);
@@ -103,8 +103,8 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionsAreEqual) {
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithChunksVersion) {
const ComparableChunkVersion defaultVersion{};
- const auto withChunksVersion =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, OID::gen()));
+ const auto withChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */));
ASSERT(defaultVersion != withChunksVersion);
ASSERT(defaultVersion < withChunksVersion);
ASSERT_FALSE(defaultVersion > withChunksVersion);
@@ -112,8 +112,8 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithCh
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanNoChunksVersion) {
const ComparableChunkVersion defaultVersion{};
- const auto noChunksVersion =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, OID::gen()));
+ const auto noChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
ASSERT(defaultVersion != noChunksVersion);
ASSERT(defaultVersion < noChunksVersion);
ASSERT_FALSE(defaultVersion > noChunksVersion);
@@ -130,10 +130,10 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanUnshar
TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
const auto oid = OID::gen();
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, oid));
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, oid));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, oid, boost::none /* timestamp */));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, oid, boost::none /* timestamp */));
ASSERT(version1 == version2);
ASSERT_FALSE(version1 < version2);
ASSERT_FALSE(version1 > version2);
@@ -141,17 +141,17 @@ TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
TEST(ComparableChunkVersionTest, NoChunksCompareBySequenceNum) {
const auto oid = OID::gen();
- const auto version1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(1, 0, oid));
+ const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(1, 0, oid, boost::none /* timestamp */));
- const auto noChunkSV1 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, oid));
+ const auto noChunkSV1 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, oid, boost::none /* timestamp */));
ASSERT(version1 != noChunkSV1);
ASSERT(noChunkSV1 > version1);
- const auto version2 =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(2, 0, oid));
+ const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(2, 0, oid, boost::none /* timestamp */));
ASSERT(version2 != noChunkSV1);
ASSERT(version2 > noChunkSV1);
@@ -160,16 +160,16 @@ TEST(ComparableChunkVersionTest, NoChunksCompareBySequenceNum) {
TEST(ComparableChunkVersionTest, NoChunksGreaterThanUnshardedBySequenceNum) {
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
- const auto noChunkSV =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, OID::gen()));
+ const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
ASSERT(noChunkSV != unsharded);
ASSERT(noChunkSV > unsharded);
}
TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
- const auto noChunkSV =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, OID::gen()));
+ const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
@@ -178,8 +178,8 @@ TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
}
TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
- const auto noChunkSV =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(0, 0, OID::gen()));
+ const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
const ComparableChunkVersion defaultVersion{};
ASSERT(noChunkSV != defaultVersion);
@@ -189,14 +189,14 @@ TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
TEST(ComparableChunkVersionTest, CompareForcedRefreshVersionVersusValidChunkVersion) {
auto oid = OID::gen();
const ComparableChunkVersion defaultVersionBeforeForce;
- const auto versionBeforeForce =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(100, 0, oid));
+ const auto versionBeforeForce = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(100, 0, oid, boost::none /* timestamp */));
const auto forcedRefreshVersion =
ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh();
- const auto versionAfterForce =
- ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion(100, 0, oid));
+ const auto versionAfterForce = ComparableChunkVersion::makeComparableChunkVersion(
+ ChunkVersion(100, 0, oid, boost::none /* timestamp */));
const ComparableChunkVersion defaultVersionAfterForce;
ASSERT(defaultVersionBeforeForce != forcedRefreshVersion);
diff --git a/src/mongo/s/query/cluster_exchange_test.cpp b/src/mongo/s/query/cluster_exchange_test.cpp
index bb5ef977d46..22bf973c0cb 100644
--- a/src/mongo/s/query/cluster_exchange_test.cpp
+++ b/src/mongo/s/query/cluster_exchange_test.cpp
@@ -528,7 +528,7 @@ TEST_F(ClusterExchangeTest, CompoundShardKeyThreeShards) {
const std::vector<std::string> xBoundaries = {"a", "g", "m", "r", "u"};
auto chunks = [&]() {
std::vector<ChunkType> chunks;
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
chunks.emplace_back(kTestTargetNss,
ChunkRange{BSON("x" << MINKEY << "y" << MINKEY),
BSON("x" << xBoundaries[0] << "y" << MINKEY)},
diff --git a/src/mongo/s/query/sharded_agg_test_fixture.h b/src/mongo/s/query/sharded_agg_test_fixture.h
index ccad5872238..621a7e368a9 100644
--- a/src/mongo/s/query/sharded_agg_test_fixture.h
+++ b/src/mongo/s/query/sharded_agg_test_fixture.h
@@ -79,7 +79,7 @@ public:
std::vector<ChunkType> makeChunks(const NamespaceString& nss,
const OID epoch,
std::vector<std::pair<ChunkRange, ShardId>> chunkInfos) {
- ChunkVersion version(1, 0, epoch);
+ ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
std::vector<ChunkType> chunks;
for (auto&& pair : chunkInfos) {
chunks.emplace_back(nss, pair.first, version, pair.second);
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index df15b79669d..5f4db5b72a3 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -43,7 +43,7 @@ namespace {
using unittest::assertGet;
TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
- const ChunkVersion version(1, 0, OID::gen());
+ const ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
BSON("_configsvrMoveChunk"
<< 1 << "ns"
@@ -65,7 +65,7 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
}
TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
- const ChunkVersion version(1, 0, OID::gen());
+ const ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
BSON("_configsvrMoveChunk"
<< 1 << "ns"
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
index 883ecfa2164..fa9582e8ebb 100644
--- a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
+++ b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
@@ -58,9 +58,9 @@ TEST(CommitChunkMigrationRequest, WithoutControlChunk) {
ChunkType migratedChunk;
migratedChunk.setMin(kKey0);
migratedChunk.setMax(kKey1);
- migratedChunk.setVersion({12, 7, OID::gen()});
+ migratedChunk.setVersion({12, 7, OID::gen(), boost::none /* timestamp */});
- ChunkVersion fromShardCollectionVersion(1, 2, OID::gen());
+ ChunkVersion fromShardCollectionVersion(1, 2, OID::gen(), boost::none /* timestamp */);
Timestamp validAfter{1};
diff --git a/src/mongo/s/request_types/move_chunk_request_test.cpp b/src/mongo/s/request_types/move_chunk_request_test.cpp
index 825257971d2..02b0c001182 100644
--- a/src/mongo/s/request_types/move_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/move_chunk_request_test.cpp
@@ -51,7 +51,7 @@ const int kMaxChunkSizeBytes = 1024;
const bool kWaitForDelete = true;
TEST(MoveChunkRequest, Roundtrip) {
- const ChunkVersion chunkVersion(3, 1, OID::gen());
+ const ChunkVersion chunkVersion(3, 1, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -86,7 +86,7 @@ TEST(MoveChunkRequest, Roundtrip) {
}
TEST(MoveChunkRequest, EqualityOperatorSameValue) {
- const ChunkVersion chunkVersion(3, 1, OID::gen());
+ const ChunkVersion chunkVersion(3, 1, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -112,7 +112,7 @@ TEST(MoveChunkRequest, EqualityOperatorSameValue) {
}
TEST(MoveChunkRequest, EqualityOperatorDifferentValues) {
- const ChunkVersion chunkVersion(3, 1, OID::gen());
+ const ChunkVersion chunkVersion(3, 1, OID::gen(), boost::none /* timestamp */);
BSONObjBuilder builder1;
MoveChunkRequest::appendAsCommand(
diff --git a/src/mongo/s/request_types/set_shard_version_request_test.cpp b/src/mongo/s/request_types/set_shard_version_request_test.cpp
index ce3d2202cb8..7c7bd819da6 100644
--- a/src/mongo/s/request_types/set_shard_version_request_test.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request_test.cpp
@@ -41,7 +41,7 @@ using unittest::assertGet;
namespace {
TEST(SetShardVersionRequest, ParseFull) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
SetShardVersionRequest request = assertGet(
SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
@@ -58,7 +58,7 @@ TEST(SetShardVersionRequest, ParseFull) {
}
TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
SetShardVersionRequest request =
assertGet(SetShardVersionRequest::parseFromBSON(
@@ -76,7 +76,7 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
}
TEST(SetShardVersionRequest, ParseFullNoNS) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
auto ssvStatus =
SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
@@ -88,7 +88,7 @@ TEST(SetShardVersionRequest, ParseFullNoNS) {
}
TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
auto ssvStatus =
SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
@@ -100,7 +100,7 @@ TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
SetShardVersionRequest ssv(NamespaceString("db.coll"), chunkVersion, false);
@@ -119,7 +119,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
}
TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
SetShardVersionRequest ssv(NamespaceString("db.coll"), chunkVersion, true);
@@ -138,7 +138,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
}
TEST(SetShardVersionRequest, ToSSVCommandFullForceRefresh) {
- const ChunkVersion chunkVersion(1, 2, OID::gen());
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
SetShardVersionRequest ssv(NamespaceString("db.coll"), chunkVersion, false, true);
diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp
index c0139b4cfee..07ac4872891 100644
--- a/src/mongo/s/routing_table_history_test.cpp
+++ b/src/mongo/s/routing_table_history_test.cpp
@@ -152,7 +152,7 @@ public:
void setUp() override {
const OID epoch = OID::gen();
const Timestamp kRouting(100, 0);
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto initChunk =
ChunkType{kNss,
@@ -166,6 +166,7 @@ public:
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{initChunk}));
@@ -325,7 +326,7 @@ TEST_F(RoutingTableHistoryTestThreeInitialChunks,
TEST_F(RoutingTableHistoryTest, TestSplits) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch};
+ ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
auto chunkAll =
ChunkType{kNss,
@@ -339,6 +340,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
{chunkAll});
@@ -346,33 +348,33 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
std::vector<ChunkType> chunks1 = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, chunks1);
- auto v1 = ChunkVersion{2, 2, epoch};
+ auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
std::vector<ChunkType> chunks2 = {
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -1)},
- ChunkVersion{3, 1, epoch},
+ ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << -1), BSON("a" << 0)},
- ChunkVersion{3, 2, epoch},
+ ChunkVersion{3, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt2 = rt1.makeUpdated(boost::none, true, chunks2);
- auto v2 = ChunkVersion{3, 2, epoch};
+ auto v2 = ChunkVersion{3, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v2, rt2.getVersion(kThisShard));
}
@@ -382,7 +384,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch},
+ ChunkVersion{1, 0, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -391,6 +393,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
initialChunks);
@@ -399,15 +402,15 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> changedChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch};
+ auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -431,7 +434,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch},
+ ChunkVersion{1, 0, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -440,6 +443,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
initialChunks);
@@ -448,19 +452,19 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> changedChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch},
+ ChunkVersion{1, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch};
+ auto v1 = ChunkVersion{2, 2, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -471,11 +475,11 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -484,6 +488,7 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
initialChunks);
@@ -492,20 +497,20 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> changedChunks = {
ChunkType{kNss,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch},
+ ChunkVersion{3, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{3, 1, epoch},
+ ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch};
+ auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 0, epoch));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 0, epoch, boost::none /* timestamp */));
ASSERT_EQ(chunk1->getMin().woCompare(BSON("a" << 0)), 0);
ASSERT_EQ(chunk1->getMax().woCompare(getShardKeyPattern().globalMax()), 0);
}
@@ -516,15 +521,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{BSON("a" << 0), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch},
+ ChunkVersion{2, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -534,23 +539,24 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
false,
epoch,
boost::none,
+ boost::none /* timestamp */,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, boost::none /* timestamp */));
std::vector<ChunkType> changedChunks = {
ChunkType{kNss,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch},
+ ChunkVersion{3, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{3, 1, epoch},
+ ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch};
+ auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -561,15 +567,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{BSON("a" << -10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 0, epoch},
+ ChunkVersion{2, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -500)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -578,29 +584,30 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, boost::none /* timestamp */));
std::vector<ChunkType> changedChunks = {
ChunkType{kNss,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -10)},
- ChunkVersion{3, 1, epoch},
+ ChunkVersion{3, 1, epoch, boost::none /* timestamp */},
kThisShard}};
auto rt1 = rt.makeUpdated(boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch};
+ auto v1 = ChunkVersion{3, 1, epoch, boost::none /* timestamp */};
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << -500));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 1, epoch));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 1, epoch, boost::none /* timestamp */));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << -10)), 0);
}
@@ -611,27 +618,27 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
std::vector<ChunkType> initialChunks = {
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch},
+ ChunkVersion{2, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 10), BSON("a" << 20)},
- ChunkVersion{2, 1, epoch},
+ ChunkVersion{2, 1, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 20), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch},
+ ChunkVersion{2, 2, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch},
+ ChunkVersion{3, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{4, 0, epoch},
+ ChunkVersion{4, 0, epoch, boost::none /* timestamp */},
kThisShard},
ChunkType{kNss,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{4, 1, epoch},
+ ChunkVersion{4, 1, epoch, boost::none /* timestamp */},
kThisShard},
};
@@ -641,14 +648,15 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
nullptr,
false,
epoch,
+ boost::none /* timestamp */,
boost::none,
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(4, 1, epoch));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion(4, 1, epoch, boost::none /* timestamp */));
auto chunk1 = rt.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(4, 0, epoch));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(4, 0, epoch, boost::none /* timestamp */));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << 10)), 0);
}
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 0f2fe610ecb..951d9837069 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -92,8 +92,10 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss,
WriteErrorDetail* error = new WriteErrorDetail;
error->setStatus({ErrorCodes::StaleShardVersion, ""});
error->setErrInfo([&] {
- StaleConfigInfo sci(
- nss, ChunkVersion(1, 0, epoch), ChunkVersion(2, 0, epoch), ShardId(kShardName1));
+ StaleConfigInfo sci(nss,
+ ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(2, 0, epoch, boost::none /* timestamp */),
+ ShardId(kShardName1));
BSONObjBuilder builder;
sci.serialize(&builder);
return builder.obj();
@@ -278,7 +280,9 @@ public:
MockNSTargeter singleShardNSTargeter{
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, OID::gen()), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, OID::gen(), boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << MAXKEY))}};
};
@@ -348,16 +352,22 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -432,16 +442,22 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) {
std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -621,17 +637,25 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -665,11 +689,12 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
errDetail->setIndex(0);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName2)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName2)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -726,17 +751,25 @@ TEST_F(BatchWriteExecTest,
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -770,11 +803,12 @@ TEST_F(BatchWriteExecTest,
errDetail->setIndex(0);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName2)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName2)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -786,11 +820,12 @@ TEST_F(BatchWriteExecTest,
errDetail->setIndex(1);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName2)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName2)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -846,17 +881,25 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -880,11 +923,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
errDetail->setIndex(1);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName1)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName1)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -906,11 +950,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
errDetail->setIndex(0);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName2)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName2)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -977,17 +1022,25 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1011,11 +1064,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
errDetail->setIndex(1);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName1)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName1)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -1037,11 +1091,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
errDetail->setIndex(1);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName2)),
- "Stale shard version");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName2)),
+ "Stale shard version");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -1104,11 +1159,17 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
const BatchItemRef& itemRef) const override {
if (targetAll) {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
} else {
return std::vector{
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
}
@@ -1117,10 +1178,14 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1145,11 +1210,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
errDetail->setIndex(0);
errDetail->setStatus({ErrorCodes::StaleShardVersion, "Stale shard version"});
errDetail->setErrInfo([&] {
- Status ssvStatus(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch),
- ChunkVersion(105, 200, epoch),
- ShardId(kShardName1)),
- "Migration happened");
+ Status ssvStatus(
+ StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ ChunkVersion(105, 200, epoch, boost::none /* timestamp */),
+ ShardId(kShardName1)),
+ "Migration happened");
BSONObjBuilder builder;
ssvStatus.serializeErrorToBSON(&builder);
return builder.obj();
@@ -1673,17 +1739,25 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -1811,17 +1885,25 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -1952,17 +2034,25 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none)};
+ ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
+ ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion(100, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion(101, 200, epoch, boost::none /* timestamp */),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index b438cc2842f..1f200ea272a 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -73,7 +73,8 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns());
ASSERT(insertRequest.hasShardVersion());
- ASSERT_EQ(ChunkVersion(1, 2, epoch).toString(), insertRequest.getShardVersion().toString());
+ ASSERT_EQ(ChunkVersion(1, 2, epoch, boost::none /* timestamp */).toString(),
+ insertRequest.getShardVersion().toString());
}
}
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 7e62062cddc..b262e0f4544 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -124,9 +124,12 @@ TEST_F(WriteOpTest, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST_F(WriteOpTest, TargetMultiOneShard) {
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
- ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
+ ShardEndpoint endpointA(
+ ShardId("shardA"), ChunkVersion(10, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointB(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointC(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(kNss);
@@ -157,9 +160,12 @@ TEST_F(WriteOpTest, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST_F(WriteOpTest, TargetMultiAllShards) {
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
- ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
+ ShardEndpoint endpointA(
+ ShardId("shardA"), ChunkVersion(10, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointB(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointC(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(kNss);
@@ -197,8 +203,10 @@ TEST_F(WriteOpTest, TargetMultiAllShards) {
}
TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
+ ShardEndpoint endpointA(
+ ShardId("shardA"), ChunkVersion(10, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointB(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(kNss);
@@ -347,9 +355,12 @@ private:
};
TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
- ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
+ ShardEndpoint endpointA(
+ ShardId("shardA"), ChunkVersion(10, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointB(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointC(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(kNss);
@@ -385,8 +396,10 @@ TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
}
TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
- ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()), boost::none);
- ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()), boost::none);
+ ShardEndpoint endpointA(
+ ShardId("shardA"), ChunkVersion(10, 0, OID(), boost::none /* timestamp */), boost::none);
+ ShardEndpoint endpointB(
+ ShardId("shardB"), ChunkVersion(20, 0, OID(), boost::none /* timestamp */), boost::none);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(kNss);