summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2021-09-21 13:39:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-21 14:39:04 +0000
commit9e1d37df7bf5bb4c8312f155bd671214f75ea296 (patch)
treec0895615554be30d482ec9923af19ea75b9745ae /src/mongo/db
parentbfef41e47abf95ec8f8114552d44df6c58409c9c (diff)
downloadmongo-9e1d37df7bf5bb4c8312f155bd671214f75ea296.tar.gz
SERVER-52847 Make timestamp required in CollectionType and ShardCollectionType IDL
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp5
-rw-r--r--src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp2
-rw-r--r--src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp16
-rw-r--r--src/mongo/db/s/active_migrations_registry_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp7
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp14
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp24
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp21
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h4
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp17
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp18
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp32
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp13
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp14
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp29
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp5
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp7
-rw-r--r--src/mongo/db/s/migration_util_test.cpp32
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp8
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp89
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp72
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp4
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp26
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp2
-rw-r--r--src/mongo/db/s/shard_metadata_util.h2
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp4
-rw-r--r--src/mongo/db/s/type_shard_collection.cpp17
-rw-r--r--src/mongo/db/s/type_shard_collection.h6
-rw-r--r--src/mongo/db/s/type_shard_collection.idl6
-rw-r--r--src/mongo/db/s/type_shard_collection_test.cpp16
49 files changed, 341 insertions, 313 deletions
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index 932645f78d1..8c9528e0e09 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -219,9 +219,10 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
// Mock the expected config server queries.
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
+ const Timestamp timestamp(1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(
uuid, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
@@ -233,7 +234,7 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
chunk2.setName(OID::gen());
version.incMinor();
expectCollectionAndChunksAggregation(
- kTestAggregateNss, epoch, uuid, shardKeyPattern, {chunk1, chunk2});
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
// That error should be retried, but only the one on that shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
index dbf4f571c1c..d57abe6b45f 100644
--- a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp
@@ -63,7 +63,7 @@ public:
TEST_F(MongosProcessInterfaceTest, FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecified) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp()));
auto processInterface = makeProcessInterface();
ASSERT_THROWS_CODE(processInterface->ensureFieldsUniqueOrResolveDocumentKey(
diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
index 05f5adc6976..dd4a80daab5 100644
--- a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
@@ -67,7 +67,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecifiedOnMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp()));
auto processInterface = makeProcessInterface();
// Test that 'targetCollectionVersion' is not accepted if not from mongos.
@@ -90,7 +90,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
TEST_F(ProcessInterfaceStandaloneTest, FailsToEnsureFieldsUniqueIfJoinFieldsAreNotSentFromMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), boost::none /* timestamp */));
+ boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp()));
auto processInterface = makeProcessInterface();
expCtx->fromMongos = true;
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 395990e5309..e27be5c5477 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -168,9 +168,10 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
// Mock the expected config server queries.
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
+ const Timestamp timestamp;
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(*cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -187,7 +188,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
version.incMinor();
expectCollectionAndChunksAggregation(
- kTestAggregateNss, epoch, uuid, shardKeyPattern, {chunk1, chunk2});
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
// That error should be retried, but only the one on that shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
@@ -246,9 +247,10 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
// created and moved to the first shard.
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
+ const Timestamp timestamp;
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(*cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -269,7 +271,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
chunk3.setName(OID::gen());
expectCollectionAndChunksAggregation(
- kTestAggregateNss, epoch, uuid, shardKeyPattern, {chunk1, chunk2, chunk3});
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2, chunk3});
// That error should be retried, this time two shards.
onCommand([&](const executor::RemoteCommandRequest& request) {
@@ -335,8 +337,9 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
// the same shard.
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
+ const Timestamp timestamp;
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(
*cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), shardKeyPattern.getKeyPattern().globalMax()},
@@ -344,7 +347,8 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
{shards[0].getName()});
chunk1.setName(OID::gen());
- expectCollectionAndChunksAggregation(kTestAggregateNss, epoch, uuid, shardKeyPattern, {chunk1});
+ expectCollectionAndChunksAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1});
// That error should be retried, this time targetting only one shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
diff --git a/src/mongo/db/s/active_migrations_registry_test.cpp b/src/mongo/db/s/active_migrations_registry_test.cpp
index bfe59908b24..28d0d71f27d 100644
--- a/src/mongo/db/s/active_migrations_registry_test.cpp
+++ b/src/mongo/db/s/active_migrations_registry_test.cpp
@@ -59,7 +59,7 @@ protected:
};
MoveChunkRequest createMoveChunkRequest(const NamespaceString& nss) {
- const ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion chunkVersion(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index ed1a430126f..a4c2f2b00bf 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -58,7 +58,7 @@ public:
{}, // collator
false, // unique
_epoch,
- boost::none, // timestamp
+ _timestamp, // timestamp
boost::none, // time series fields
boost::none, // resharding fields
boost::none, // chunk size bytes
@@ -75,9 +75,10 @@ private:
const NamespaceString _nss{"foo.bar"};
const UUID _uuid = UUID::gen();
const OID _epoch{OID::gen()};
+ const Timestamp _timestamp{Timestamp(1, 1)};
const ShardId _shardPrimary{"dummyShardPrimary"};
- const DatabaseVersion _dbVersion{UUID::gen(), Timestamp()};
- ChunkVersion _nextVersion{1, 0, _epoch, boost::none};
+ const DatabaseVersion _dbVersion{UUID::gen(), _timestamp};
+ ChunkVersion _nextVersion{1, 0, _epoch, _timestamp};
};
TEST_F(BalanceStatsTest, SingleChunkNoZones) {
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 43b340f2906..077e1a731f5 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -77,7 +77,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
int64_t currentChunk = 0;
- ChunkVersion chunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp());
const UUID uuid = UUID::gen();
const KeyPattern shardKeyPattern(BSON("x" << 1));
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index db4ddcf77f5..44ad31f3820 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -102,7 +102,7 @@ ScopedMigrationRequest ScopedMigrationRequestTest::makeScopedMigrationRequest(
MigrateInfo ScopedMigrationRequestTest::makeMigrateInfo() {
const auto collUuid = UUID::gen();
- const ChunkVersion kChunkVersion{1, 2, OID::gen(), boost::none /* timestamp */};
+ const ChunkVersion kChunkVersion{1, 2, OID::gen(), Timestamp()};
BSONObjBuilder chunkBuilder;
collUuid.appendToBuilder(&chunkBuilder, ChunkType::collectionUUID.name());
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index b8af0eaa339..0b013a750b5 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -50,7 +50,7 @@ const bool kWaitForDelete{true};
TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1, 1);
const ChunkVersion version(1, 2, collEpoch, collTimestamp);
BSONObjBuilder chunkBuilder;
@@ -88,7 +88,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
}
TEST(MigrationTypeTest, FromAndToBSON) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -108,7 +108,7 @@ TEST(MigrationTypeTest, FromAndToBSON) {
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::min(), kMin);
@@ -125,7 +125,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
}
TEST(MigrationTypeTest, MissingRequiredMinField) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -142,7 +142,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
}
TEST(MigrationTypeTest, MissingRequiredMaxField) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -159,7 +159,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
}
TEST(MigrationTypeTest, MissingRequiredFromShardField) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -176,7 +176,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
}
TEST(MigrationTypeTest, MissingRequiredToShardField) {
- const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */);
+ const ChunkVersion version(1, 2, OID::gen(), Timestamp());
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 231808a7b5c..a21b5fd1a28 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -73,13 +73,13 @@ protected:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ Timestamp(),
timeseriesFields,
boost::none,
boost::none,
true,
[&] {
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, Timestamp());
ChunkType chunk1(uuid,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 7d01b0fa39f..139bb5b40b5 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -62,7 +62,7 @@ CollectionMetadata makeCollectionMetadataImpl(
std::vector<ChunkType> allChunks;
auto nextMinKey = shardKeyPattern.globalMin();
- ChunkVersion version{1, 0, epoch, boost::none /* timestamp */};
+ ChunkVersion version{1, 0, epoch, timestamp};
for (const auto& myNextChunk : thisShardsChunks) {
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
@@ -94,7 +94,7 @@ CollectionMetadata makeCollectionMetadataImpl(
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ timestamp,
boost::none /* timeseriesFields */,
std::move(reshardingFields),
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index 54bb235b299..d1e5a6ff784 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -56,13 +56,12 @@ protected:
static CollectionMetadata makeShardedMetadata(OperationContext* opCtx,
UUID uuid = UUID::gen()) {
const OID epoch = OID::gen();
+ const Timestamp timestamp;
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
- auto chunk = ChunkType(uuid,
- std::move(range),
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
- ShardId("other"));
+ auto chunk = ChunkType(
+ uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), ShardId("other"));
ChunkManager cm(ShardId("0"),
- DatabaseVersion(UUID::gen(), Timestamp()),
+ DatabaseVersion(UUID::gen(), timestamp),
makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kTestNss,
uuid,
@@ -70,7 +69,7 @@ protected:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ timestamp,
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
@@ -265,8 +264,7 @@ public:
return std::make_unique<StaticCatalogClient>(kShardList);
}
- CollectionType createCollection(const OID& epoch,
- boost::optional<Timestamp> timestamp = boost::none) {
+ CollectionType createCollection(const OID& epoch, const Timestamp& timestamp) {
CollectionType res(kNss, epoch, timestamp, Date_t::now(), kCollUUID);
res.setKeyPattern(BSON(kShardKey << 1));
res.setUnique(false);
@@ -276,7 +274,7 @@ public:
std::vector<ChunkType> createChunks(const OID& epoch,
const UUID& uuid,
- boost::optional<Timestamp> timestamp = boost::none) {
+ const Timestamp& timestamp) {
auto range1 = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << 5));
ChunkType chunk1(
uuid, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
@@ -302,8 +300,8 @@ TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
const auto epoch = OID::gen();
const Timestamp timestamp(42);
- const auto coll = createCollection(epoch);
- const auto chunks = createChunks(epoch, coll.getUuid());
+ const auto coll = createCollection(epoch, timestamp);
+ const auto chunks = createChunks(epoch, coll.getUuid(), timestamp);
const auto timestampedColl = createCollection(epoch, timestamp);
const auto timestampedChunks = createChunks(epoch, timestampedColl.getUuid(), timestamp);
@@ -340,8 +338,8 @@ TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
const auto epoch = OID::gen();
const Timestamp timestamp(42);
- const auto coll = createCollection(epoch);
- const auto chunks = createChunks(epoch, coll.getUuid());
+ const auto coll = createCollection(epoch, timestamp);
+ const auto chunks = createChunks(epoch, coll.getUuid(), timestamp);
const auto collVersion = chunks.back().getVersion();
const auto timestampedColl = createCollection(epoch, timestamp);
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index f6b6566ef99..3144add6236 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -342,12 +342,11 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
}
}
-StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
- OperationContext* opCtx,
- const UUID& uuid,
- const BSONObj& minKey,
- const OID& collEpoch,
- const boost::optional<Timestamp>& collTimestamp) {
+StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
+ const UUID& uuid,
+ const BSONObj& minKey,
+ const OID& collEpoch,
+ const Timestamp& collTimestamp) {
const auto query = BSON(ChunkType::collectionUUID() << uuid << ChunkType::min(minKey));
auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, query);
@@ -357,11 +356,10 @@ StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
return ChunkType::fromConfigBSON(doc.getValue(), collEpoch, collTimestamp);
}
-StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
- OperationContext* opCtx,
- const BSONObj& minKey,
- const OID& collEpoch,
- const boost::optional<Timestamp>& collTimestamp) {
+StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
+ const BSONObj& minKey,
+ const OID& collEpoch,
+ const Timestamp& collTimestamp) {
auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min(minKey)));
if (!doc.isOK())
return doc.getStatus();
@@ -378,7 +376,6 @@ StatusWith<ChunkVersion> ConfigServerTestFixture::getCollectionVersion(Operation
const CollectionType coll(collectionDoc.getValue());
- invariant(coll.getTimestamp());
auto chunkDoc =
findOneOnConfigCollection(opCtx,
ChunkType::ConfigNS,
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index 624cf39ab5d..5e00755a652 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -114,7 +114,7 @@ protected:
const UUID& uuid,
const BSONObj& minKey,
const OID& collEpoch,
- const boost::optional<Timestamp>& collTimestamp);
+ const Timestamp& collTimestamp);
/**
* Retrieves the chunk document <minKey> from the config server.
@@ -125,7 +125,7 @@ protected:
StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx,
const BSONObj& minKey,
const OID& collEpoch,
- const boost::optional<Timestamp>& collTimestamp);
+ const Timestamp& collTimestamp);
/**
* Returns the collection version.
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 0555915be5b..ec871090f73 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -206,7 +206,7 @@ public:
std::vector<ChunkType> chunks;
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
- ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion version(1, 0, OID::gen(), Timestamp());
ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 733b0164171..c23f6fc5a54 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -576,7 +576,7 @@ private:
StatusWith<ChunkType> _findChunkOnConfig(OperationContext* opCtx,
const UUID& uuid,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
const BSONObj& key);
/**
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
index ab0a6d3a182..4c49d7e5943 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
@@ -61,7 +61,8 @@ public:
setupShards({shard});
- CollectionType shardedCollection(shardedNS(), OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(
+ shardedNS(), OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -259,7 +260,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, RemoveZoneWithDollarPrefixedShardKeysSho
TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToFullShardKey) {
NamespaceString ns("compound.shard");
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -276,7 +277,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToF
TEST_F(AssignKeyRangeToZoneTestFixture, MaxThatIsAShardKeyPrefixShouldConvertToFullShardKey) {
NamespaceString ns("compound.shard");
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -328,7 +329,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinMaxThatIsNotAShardKeyPrefixShouldFail
TEST_F(AssignKeyRangeToZoneTestFixture, MinMaxThatIsAShardKeyPrefixShouldSucceed) {
NamespaceString ns("compound.shard");
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -371,7 +372,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, TimeseriesCollMustHaveTimeKeyRangeMinKey
const std::string controlTimeField =
timeseries::kControlMinFieldNamePrefix.toString() + timeField;
const TimeseriesOptions timeseriesOptions(timeField.toString());
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
TypeCollectionTimeseriesFields timeseriesFields;
timeseriesFields.setTimeseriesOptions(timeseriesOptions);
shardedCollection.setTimeseriesFields(timeseriesFields);
@@ -522,7 +523,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingInsideExistingShoul
*/
TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShouldSucceed) {
CollectionType shardedCollection(
- NamespaceString("other.coll"), OID::gen(), Date_t::now(), UUID::gen());
+ NamespaceString("other.coll"), OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -742,7 +743,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithInvalidMaxShardKeyShouldFail
TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithPartialMinPrefixShouldRemoveRange) {
NamespaceString ns("compound.shard");
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@@ -765,7 +766,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithPartialMinPrefixShouldRemove
TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithPartialMaxPrefixShouldRemoveRange) {
NamespaceString ns("compound.shard");
- CollectionType shardedCollection(ns, OID::gen(), Date_t::now(), UUID::gen());
+ CollectionType shardedCollection(ns, OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index aa0abe9b814..2ac8d4dac46 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -138,18 +138,14 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
const ChunkVersion& collVersion) {
BSONArrayBuilder preCond;
- const bool collHasTimestamp = (bool)collVersion.getTimestamp();
- invariant(collHasTimestamp);
for (const auto& chunk : chunksToMerge) {
BSONObj query = BSON(ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID()
<< chunk.getCollectionUUID());
- const auto collectionIdentityMatchCondition = collHasTimestamp
- ? BSON(ChunkType::collectionUUID()
- << chunk.getCollectionUUID() << ChunkType::shard(chunk.getShard().toString()))
- : BSON(ChunkType::epoch(collVersion.epoch())
- << ChunkType::shard(chunk.getShard().toString()));
+ const auto collectionIdentityMatchCondition =
+ BSON(ChunkType::collectionUUID()
+ << chunk.getCollectionUUID() << ChunkType::shard(chunk.getShard().toString()));
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS.ns());
@@ -167,13 +163,12 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx,
const UUID& uuid,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
const ChunkType& requestedChunk) {
uassert(4683300,
"Config server rejecting commitChunkMigration request that does not have a "
"ChunkVersion",
- requestedChunk.isVersionSet() && requestedChunk.getVersion().isSet() &&
- requestedChunk.getVersion().epoch().isSet());
+ requestedChunk.isVersionSet() && requestedChunk.getVersion().isSet());
BSONObj chunkQuery =
BSON(ChunkType::min() << requestedChunk.getMin() << ChunkType::max()
@@ -259,7 +254,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx,
const UUID& uuid,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
const ChunkType& migratedChunk,
const ShardId& fromShard) {
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
@@ -327,7 +322,6 @@ StatusWith<ChunkVersion> getCollectionVersion(OperationContext* opCtx, const Nam
}
const CollectionType coll(findCollResponse.getValue().docs[0]);
- invariant(coll.getTimestamp());
const auto chunksQuery = BSON(ChunkType::collectionUUID << coll.getUuid());
return getMaxChunkVersionFromQueryResponse(
coll,
@@ -345,7 +339,6 @@ ChunkVersion getShardVersion(OperationContext* opCtx,
const CollectionType& coll,
const ShardId& fromShard,
const ChunkVersion& collectionVersion) {
- invariant(coll.getTimestamp());
const auto chunksQuery =
BSON(ChunkType::collectionUUID << coll.getUuid() << ChunkType::shard(fromShard.toString()));
@@ -392,7 +385,6 @@ void bumpCollectionMinorVersion(OperationContext* opCtx,
const CollectionType coll(findCollResponse.docs[0]);
// Find the newest chunk
- invariant(coll.getTimestamp());
const auto findChunkResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -457,7 +449,6 @@ std::vector<ShardId> getShardsOwningChunksForCollection(OperationContext* opCtx,
ErrorCodes::NamespaceNotFound, "Collection does not exist", !findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- invariant(coll.getTimestamp());
DistinctCommandRequest distinctCmd(ChunkType::ConfigNS, ChunkType::shard.name());
distinctCmd.setQuery(BSON(ChunkType::collectionUUID << coll.getUuid()));
@@ -625,17 +616,15 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS.ns());
- invariant(origChunk.getValue().getVersion().getTimestamp());
BSONObj query = BSON(ChunkType::min() << range.getMin() << ChunkType::max()
<< range.getMax() << ChunkType::collectionUUID
<< origChunk.getValue().getCollectionUUID());
b.append("q", BSON("query" << query << "orderby" << BSON(ChunkType::lastmod() << -1)));
- const auto resultMustMatch = origChunk.getValue().getVersion().getTimestamp()
- ? BSON(ChunkType::collectionUUID()
- << origChunk.getValue().getCollectionUUID() << ChunkType::shard(shardName))
- : BSON(ChunkType::epoch(requestEpoch) << ChunkType::shard(shardName));
+ const auto resultMustMatch =
+ BSON(ChunkType::collectionUUID()
+ << origChunk.getValue().getCollectionUUID() << ChunkType::shard(shardName));
b.append("res", resultMustMatch);
@@ -750,7 +739,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
}
const auto shardChunksInRangeQuery = [&]() {
BSONObjBuilder queryBuilder;
- invariant(coll.getTimestamp());
queryBuilder << ChunkType::collectionUUID << coll.getUuid();
queryBuilder << ChunkType::shard(shardId.toString());
queryBuilder << ChunkType::min(BSON("$gte" << chunkRange.getMin()));
@@ -921,7 +909,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
"Collection is undergoing changes and chunks cannot be moved",
coll.getAllowMigrations());
- invariant(coll.getTimestamp());
const auto findChunkQuery = BSON(ChunkType::collectionUUID() << coll.getUuid());
auto findResponse = uassertStatusOK(
@@ -1106,12 +1093,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
return response.obj();
}
-StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(
- OperationContext* opCtx,
- const UUID& uuid,
- const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
- const BSONObj& key) {
+StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(OperationContext* opCtx,
+ const UUID& uuid,
+ const OID& epoch,
+ const Timestamp& timestamp,
+ const BSONObj& key) {
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const auto query = BSON(ChunkType::collectionUUID << uuid << ChunkType::min(key));
@@ -1169,7 +1155,6 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- invariant(coll.getTimestamp());
BSONObj targetChunkQuery =
BSON(ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID << coll.getUuid());
@@ -1196,7 +1181,6 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
return;
}
- invariant(coll.getTimestamp());
const auto allChunksQuery = BSON(ChunkType::collectionUUID << coll.getUuid());
// Must use local read concern because we will perform subsequent writes.
@@ -1239,7 +1223,6 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
currentCollectionVersion.getTimestamp());
- invariant(coll.getTimestamp());
BSONObj chunkQuery(BSON(ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID
<< coll.getUuid()));
@@ -1307,7 +1290,6 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
dassert(collUuid == coll.getUuid());
}
- invariant(coll.getTimestamp());
const auto requestedChunkQuery =
BSON(ChunkType::min(minKey)
<< ChunkType::max(maxKey) << ChunkType::collectionUUID() << collUuid);
@@ -1365,8 +1347,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
// Get the chunk with the current collectionVersion for this epoch.
ChunkType highestChunk;
{
- const auto query = coll.getTimestamp() ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::epoch(version.epoch()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
const auto highestChunksVector =
uassertStatusOK(configShard->exhaustiveFindOnConfig(
opCtx,
@@ -1509,7 +1490,6 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- invariant(coll.getTimestamp());
const auto chunkQuery = BSON(ChunkType::collectionUUID()
<< coll.getUuid() << ChunkType::min(chunk.getMin()));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 168c53f9b24..ef66e5f5d33 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -95,15 +95,14 @@ protected:
};
TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, *collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, collEpoch, jumboChunk());
- invariant(collTimestamp);
auto chunkDoc = uassertStatusOK(getChunkDoc(
operationContext(), collUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
@@ -114,15 +113,14 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
}
TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, *collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, collEpoch, nonJumboChunk());
- invariant(collTimestamp);
auto chunkDoc = uassertStatusOK(getChunkDoc(
operationContext(), collUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
@@ -133,10 +131,10 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
}
TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, *collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, OID::gen(), jumboChunk()),
@@ -148,10 +146,10 @@ TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
}
TEST_F(ClearJumboFlagTest, AssertsIfChunkCantBeFound) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
const auto collUuid = UUID::gen();
- makeCollection(nss, collUuid, collEpoch, *collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
ChunkRange imaginaryChunk(BSON("x" << 0), BSON("x" << 10));
ASSERT_THROWS(ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 591917bfad4..aa25021aa4e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -134,7 +134,6 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
}
void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const CollectionType& coll) {
- invariant(coll.getTimestamp());
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto allShards = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllShards(
opCtx, repl::ReadConcernLevel::kLocalReadConcern))
@@ -177,7 +176,6 @@ void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const Collectio
//
// The chunk updates:
// [{$set: {
-// lastmodEpoch: <new epoch>,
// min: {$arrayToObject: {$concatArrays: [
// {$objectToArray: "$min"},
// {$literal: [{k: <new_sk_suffix_1>, v: MinKey}, ...]},
@@ -225,9 +223,7 @@ void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const Collectio
// }}
// }}]
std::pair<std::vector<BSONObj>, std::vector<BSONObj>> makeChunkAndTagUpdatesForRefine(
- const BSONObj& newShardKeyFields,
- OID newEpoch,
- const boost::optional<Timestamp>& newTimestamp) {
+ const BSONObj& newShardKeyFields) {
// Make the $literal objects used in the $set below to add new fields to the boundaries of the
// existing chunks and tags that may include "." characters.
//
@@ -273,12 +269,9 @@ std::pair<std::vector<BSONObj>, std::vector<BSONObj>> makeChunkAndTagUpdatesForR
<< "then" << literalMaxObject << "else"
<< literalMinObject))))))));
- // The chunk updates change the min and max fields and unset the jumbo field. If the collection
- // is in the old (pre-5.0 format, it also sets the new epoch).
+ // The chunk updates change the min and max fields and unset the jumbo field.
std::vector<BSONObj> chunkUpdates;
- chunkUpdates.emplace_back(BSON("$set" << (newTimestamp ? extendMinAndMaxModifier.getOwned()
- : extendMinAndMaxModifier.addFields(BSON(
- ChunkType::epoch(newEpoch))))));
+ chunkUpdates.emplace_back(BSON("$set" << extendMinAndMaxModifier.getOwned()));
chunkUpdates.emplace_back(BSON("$unset" << ChunkType::jumbo()));
// The tag updates only change the min and max fields.
@@ -319,12 +312,9 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
collType.setEpoch(newEpoch);
collType.setKeyPattern(newShardKeyPattern.getKeyPattern());
- boost::optional<Timestamp> newTimestamp;
- if (collType.getTimestamp()) {
- auto now = VectorClock::get(opCtx)->getTime();
- newTimestamp = now.clusterTime().asTimestamp();
- collType.setTimestamp(newTimestamp);
- }
+ auto now = VectorClock::get(opCtx)->getTime();
+ Timestamp newTimestamp = now.clusterTime().asTimestamp();
+ collType.setTimestamp(newTimestamp);
auto updateCollectionAndChunksFn = [&](OperationContext* opCtx, TxnNumber txnNumber) {
// Update the config.collections entry for the given namespace.
@@ -345,13 +335,11 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
hangRefineCollectionShardKeyBeforeUpdatingChunks.pauseWhileSet(opCtx);
}
- auto [chunkUpdates, tagUpdates] =
- makeChunkAndTagUpdatesForRefine(newFields, newEpoch, newTimestamp);
+ auto [chunkUpdates, tagUpdates] = makeChunkAndTagUpdatesForRefine(newFields);
- // Update all config.chunks entries for the given namespace by setting (i) their epoch
- // to the newly-generated objectid, (ii) their bounds for each new field in the refined
- // key to MinKey (except for the global max chunk where the max bounds are set to
- // MaxKey), and unsetting (iii) their jumbo field.
+ // Update all config.chunks entries for the given namespace by setting (i) their bounds for
+ // each new field in the refined key to MinKey (except for the global max chunk where the
+ // max bounds are set to MaxKey), and unsetting (ii) their jumbo field.
const auto chunksQuery = BSON(ChunkType::collectionUUID << collType.getUuid());
writeToConfigDocumentInTxn(
opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 4d511518aca..fa1a1818897 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -92,13 +92,12 @@ void assertChunkVersionWasBumpedTo(const ChunkType& chunkTypeBefore,
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
- const auto requestedChunkType =
- generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion(10, 2, OID::gen(), Timestamp()),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 17b1f907751..2d681f93e09 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -60,7 +60,7 @@ protected:
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
ChunkType chunk;
@@ -141,7 +141,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
@@ -214,7 +214,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
ChunkType chunk, otherChunk;
chunk.setName(OID::gen());
@@ -292,7 +292,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
ShardId shardId(_shardName);
ChunkType chunk;
@@ -370,7 +370,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
TEST_F(MergeChunkTest, NonExistingNamespace) {
const auto collEpoch = OID::gen();
const auto collUuidAtRequest = UUID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
@@ -408,7 +408,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
const auto requestUuid = UUID::gen();
ChunkType chunk;
@@ -446,7 +446,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
// Construct chunk range to be merged
@@ -498,7 +498,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
const auto collEpoch = OID::gen();
- const boost::optional<Timestamp> collTimestamp(42);
+ const Timestamp collTimestamp(42);
const auto collUuid = UUID::gen();
ChunkType chunk1;
chunk1.setName(OID::gen());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index 397eca70a61..55af4454e3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -200,17 +200,18 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
auto epoch = OID::gen();
const auto uuid = UUID::gen();
+ const auto timestamp = Timestamp(1);
ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 1, epoch, timestamp),
shard1.getName());
ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 2, epoch, timestamp),
shard1.getName());
ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 3, epoch, timestamp),
shard1.getName());
chunk3.setJumbo(true);
@@ -286,17 +287,18 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
auto epoch = OID::gen();
auto uuid = UUID::gen();
+ Timestamp timestamp = Timestamp(1);
ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 1, epoch, timestamp),
shard1.getName());
ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 2, epoch, timestamp),
shard1.getName());
ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 3, epoch, timestamp),
shard1.getName());
std::vector<ChunkType> chunks{chunk1, chunk2, chunk3};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 525011fd0ee..e7986eb2a27 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -57,7 +57,7 @@ protected:
};
TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
const auto collUuid = UUID::gen();
@@ -100,9 +100,6 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_EQ(expectedShardVersion, shardVersion);
ASSERT_EQ(shardVersion, collVersion);
- const auto nssOrUuid =
- collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
-
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
@@ -141,7 +138,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
}
TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
const auto collUuid = UUID::gen();
@@ -174,9 +171,6 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
splitPoints,
"shard0000"));
- const auto nssOrUuid =
- collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
-
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
@@ -231,7 +225,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
}
TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
const auto collUuid = UUID::gen();
@@ -272,9 +266,6 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
splitPoints,
"shard0000"));
- const auto nssOrUuid =
- collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
-
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
@@ -304,7 +295,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
}
TEST_F(SplitChunkTest, PreConditionFailErrors) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -340,7 +331,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
}
TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -373,7 +364,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
}
TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -406,7 +397,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
}
TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -440,7 +431,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
}
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -473,7 +464,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
}
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
@@ -507,7 +498,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
}
TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
- auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ auto test = [&](const NamespaceString& nss, const Timestamp& collTimestamp) {
const auto collEpoch = OID::gen();
ChunkType chunk;
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2b2b4c846b7..948821e2f91 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -85,13 +85,12 @@ protected:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{
- uuid, range, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), kOtherShard}});
+ {ChunkType{uuid, range, ChunkVersion(1, 0, epoch, Timestamp()), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen(), Timestamp()),
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index c6591e51e52..73a39a676b2 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -162,6 +162,7 @@ protected:
[&] {
const OID epoch = OID::gen();
+ const Timestamp timestamp(1);
auto rt = RoutingTableHistory::makeNew(
kNss,
@@ -170,14 +171,14 @@ protected:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ timestamp,
boost::none /* timeseriesFields */,
boost::none /* resharding Fields */,
boost::none /* chunkSizeBytes */,
true,
{ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 0, epoch, timestamp),
ShardId("dummyShardId")}});
AutoGetDb autoDb(operationContext(), kNss.db(), MODE_IX);
@@ -206,7 +207,7 @@ protected:
MoveChunkRequest::appendAsCommand(
&cmdBuilder,
kNss,
- ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */),
+ ChunkVersion(1, 0, OID::gen(), Timestamp()),
kDonorConnStr.getSetName(),
kRecipientConnStr.getSetName(),
chunkRange,
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index ccb1ae3bbec..c640421d435 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -350,7 +350,7 @@ public:
const ShardKeyPattern kShardKeyPattern = ShardKeyPattern(BSON("_id" << 1));
const UUID kDefaultUUID = UUID::gen();
const OID kEpoch = OID::gen();
- const Timestamp kDefaultTimestamp = Timestamp();
+ const Timestamp kDefaultTimestamp = Timestamp(1);
const DatabaseType kDefaultDatabaseType = DatabaseType(
kNss.db().toString(), ShardId("0"), true, DatabaseVersion(kDefaultUUID, kDefaultTimestamp));
const std::vector<ShardType> kShardList = {ShardType("0", "Host0:12345"),
@@ -440,8 +440,8 @@ public:
return mockCatalogClient;
}
- CollectionType makeCollectionType(UUID uuid, OID epoch) {
- CollectionType coll(kNss, epoch, Date_t::now(), uuid);
+ CollectionType makeCollectionType(UUID uuid, OID epoch, Timestamp timestamp) {
+ CollectionType coll(kNss, epoch, timestamp, Date_t::now(), uuid);
coll.setKeyPattern(kShardKeyPattern.getKeyPattern());
coll.setUnique(true);
return coll;
@@ -572,11 +572,11 @@ TEST_F(SubmitRangeDeletionTaskTest, SucceedsIfFilteringMetadataUUIDMatchesTaskUU
migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Force a metadata refresh with the task's UUID before the task is submitted.
- auto coll = makeCollectionType(collectionUUID, kEpoch);
+ auto coll = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp);
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(1, 0, kEpoch, kDefaultTimestamp)));
_mockCatalogClient->setCollections({coll});
forceShardFilteringMetadataRefresh(opCtx, kNss);
@@ -600,11 +600,11 @@ TEST_F(
migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
- auto coll = makeCollectionType(collectionUUID, kEpoch);
+ auto coll = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp);
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(1, 0, kEpoch, kDefaultTimestamp)));
_mockCatalogClient->setCollections({coll});
// The task should have been submitted successfully.
@@ -633,10 +633,10 @@ TEST_F(SubmitRangeDeletionTaskTest,
migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
- auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
+ auto matchingColl = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(10, 0, kEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(10, 0, kEpoch, kDefaultTimestamp)));
_mockCatalogClient->setCollections({matchingColl});
// The task should have been submitted successfully.
@@ -652,11 +652,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
// stale when the task is submitted.
const auto staleUUID = UUID::gen();
const auto staleEpoch = OID::gen();
- auto staleColl = makeCollectionType(staleUUID, staleEpoch);
+ const auto staleTimestamp = Timestamp(0);
+ auto staleColl = makeCollectionType(staleUUID, staleEpoch, staleTimestamp);
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(staleColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, staleEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(1, 0, staleEpoch, staleTimestamp)));
_mockCatalogClient->setCollections({staleColl});
forceShardFilteringMetadataRefresh(opCtx, kNss);
@@ -670,10 +671,10 @@ TEST_F(SubmitRangeDeletionTaskTest,
migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
- auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
+ auto matchingColl = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(10, 0, kEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(10, 0, kEpoch, kDefaultTimestamp)));
_mockCatalogClient->setCollections({matchingColl});
// The task should have been submitted successfully.
@@ -695,11 +696,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
// Make the refresh triggered by submitting the task return an arbitrary UUID.
const auto otherEpoch = OID::gen();
- auto otherColl = makeCollectionType(UUID::gen(), otherEpoch);
+ const auto otherTimestamp = Timestamp(2);
+ auto otherColl = makeCollectionType(UUID::gen(), otherEpoch, otherTimestamp);
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(otherColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, otherEpoch, boost::none /* timestamp */)));
+ makeChangedChunks(ChunkVersion(1, 0, otherEpoch, otherTimestamp)));
_mockCatalogClient->setCollections({otherColl});
// The task should not have been submitted, and the task's entry should have been removed from
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index b0e6de71d6c..2ae932483e2 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -65,17 +65,15 @@ protected:
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
auto range = ChunkRange(BSON("key" << MINKEY), BSON("key" << MAXKEY));
- auto chunk = ChunkType(uuid,
- std::move(range),
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
- ShardId("other"));
+ auto chunk = ChunkType(
+ uuid, std::move(range), ChunkVersion(1, 0, epoch, Timestamp()), ShardId("other"));
auto rt = RoutingTableHistory::makeNew(kTestNss,
uuid,
KeyPattern(keyPattern),
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index fc5b0d61787..bc87d18e0f8 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -105,14 +105,14 @@ public:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
true,
{ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 0, epoch, Timestamp()),
ShardId("dummyShardId")}});
AutoGetDb autoDb(operationContext(), kNss.db(), MODE_IX);
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index ea3d2c37ca6..082699876ee 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -831,7 +831,6 @@ ReshardingCoordinatorExternalStateImpl::calculateParticipantShardsAndChunks(
if (const auto& chunks = coordinatorDoc.getPresetReshardedChunks()) {
auto version = calculateChunkVersionForInitialChunks(opCtx);
- invariant(version.getTimestamp());
// Use the provided shardIds from presetReshardedChunks to construct the
// recipient list.
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index d91d0b19836..d7b736119a0 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -87,7 +87,6 @@ class ExternalStateForTest : public ReshardingCoordinatorExternalState {
// Use the provided shardIds from presetReshardedChunks to construct the
// recipient list.
if (const auto& chunks = coordinatorDoc.getPresetReshardedChunks()) {
- invariant(version.getTimestamp());
for (const auto& reshardedChunk : *chunks) {
initialChunks.emplace_back(
coordinatorDoc.getReshardingUUID(),
@@ -404,13 +403,14 @@ public:
std::vector<ChunkType> makeChunks(const UUID& uuid,
OID epoch,
+ const Timestamp& timestamp,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
auto chunkRanges =
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
@@ -423,9 +423,10 @@ public:
// Returns the chunk for the donor shard.
ChunkType makeAndInsertChunksForDonorShard(const UUID& uuid,
OID epoch,
+ const Timestamp& timestamp,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
- auto chunks = makeChunks(uuid, epoch, shardKey, ids);
+ auto chunks = makeChunks(uuid, epoch, timestamp, shardKey, ids);
// Only the chunk corresponding to shard0000 is stored as a donor in the coordinator state
// document constructed.
@@ -508,10 +509,12 @@ public:
NamespaceString _originalNss = NamespaceString("db.foo");
UUID _originalUUID = UUID::gen();
OID _originalEpoch = OID::gen();
+ Timestamp _originalTimestamp = Timestamp(1);
NamespaceString _tempNss = NamespaceString("db.system.resharding." + _originalUUID.toString());
UUID _reshardingUUID = UUID::gen();
OID _tempEpoch = OID::gen();
+ Timestamp _tempTimestamp = Timestamp(2);
ShardKeyPattern _oldShardKey = ShardKeyPattern(BSON("oldShardKey" << 1));
ShardKeyPattern _newShardKey = ShardKeyPattern(BSON("newShardKey" << 1));
@@ -545,11 +548,17 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorSuccessfullyTransi
auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch);
auto opCtx = operationContext();
- auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
-
- auto initialChunks =
- makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ auto donorChunk = makeAndInsertChunksForDonorShard(_originalUUID,
+ _originalEpoch,
+ _originalTimestamp,
+ _oldShardKey,
+ std::vector{OID::gen(), OID::gen()});
+
+ auto initialChunks = makeChunks(_reshardingUUID,
+ _tempEpoch,
+ _tempTimestamp,
+ _newShardKey,
+ std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
@@ -603,11 +612,17 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorTransitionsTokDone
auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch);
auto opCtx = operationContext();
- auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
-
- auto initialChunks =
- makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ auto donorChunk = makeAndInsertChunksForDonorShard(_originalUUID,
+ _originalEpoch,
+ _originalTimestamp,
+ _oldShardKey,
+ std::vector{OID::gen(), OID::gen()});
+
+ auto initialChunks = makeChunks(_reshardingUUID,
+ _tempEpoch,
+ _tempTimestamp,
+ _newShardKey,
+ std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
@@ -667,11 +682,17 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpDuringInitializing) {
doc.setRecipientShards({});
doc.setDonorShards({});
- auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
+ auto donorChunk = makeAndInsertChunksForDonorShard(_originalUUID,
+ _originalEpoch,
+ _originalTimestamp,
+ _oldShardKey,
+ std::vector{OID::gen(), OID::gen()});
- auto initialChunks =
- makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ auto initialChunks = makeChunks(_reshardingUUID,
+ _tempEpoch,
+ _tempTimestamp,
+ _newShardKey,
+ std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
@@ -727,11 +748,17 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpEachTransition) {
auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch);
auto opCtx = operationContext();
- auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
-
- auto initialChunks =
- makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ auto donorChunk = makeAndInsertChunksForDonorShard(_originalUUID,
+ _originalEpoch,
+ _originalTimestamp,
+ _oldShardKey,
+ std::vector{OID::gen(), OID::gen()});
+
+ auto initialChunks = makeChunks(_reshardingUUID,
+ _tempEpoch,
+ _tempTimestamp,
+ _newShardKey,
+ std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
@@ -841,7 +868,7 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpEachTransition) {
ChunkType::ConfigNS, BSON(ChunkType::collectionUUID() << doc.getReshardingUUID()));
while (chunkCursor->more()) {
auto d = uassertStatusOK(ChunkType::fromConfigBSON(
- chunkCursor->nextSafe().getOwned(), _originalEpoch, boost::none));
+ chunkCursor->nextSafe().getOwned(), _originalEpoch, _originalTimestamp));
foundChunks.push_back(d);
}
ASSERT_EQUALS(foundChunks.size(), initialChunks.size());
@@ -862,11 +889,17 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpEachTransition) {
TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorFailsIfMigrationNotAllowed) {
auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch);
auto opCtx = operationContext();
- auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
-
- auto initialChunks =
- makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ auto donorChunk = makeAndInsertChunksForDonorShard(_originalUUID,
+ _originalEpoch,
+ _originalTimestamp,
+ _oldShardKey,
+ std::vector{OID::gen(), OID::gen()});
+
+ auto initialChunks = makeChunks(_reshardingUUID,
+ _tempEpoch,
+ _tempTimestamp,
+ _newShardKey,
+ std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index 6cf1d5005d0..b33c3487be9 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -76,7 +76,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 0, epoch, Timestamp()),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -85,7 +85,7 @@ public:
std::move(defaultCollator),
false /* unique */,
std::move(epoch),
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
index c6202d8a8d7..8ceec20e5bd 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
@@ -109,7 +109,7 @@ protected:
const ShardId& shardThatChunkExistsOn) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, boost::none), shardThatChunkExistsOn);
+ uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), shardThatChunkExistsOn);
ChunkManager cm(kThisShard.getShardId(),
DatabaseVersion(uuid, timestamp),
makeStandaloneRoutingTableHistory(
@@ -119,7 +119,7 @@ protected:
nullptr,
false,
epoch,
- boost::none,
+ timestamp,
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 036d84d5698..3c88a00b273 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -170,17 +170,17 @@ public:
kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << MINKEY),
BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 0, epoch, Timestamp()),
_sourceId.getShardId()},
ChunkType{
kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity()),
BSON(kOriginalShardKey << 0)},
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 0, epoch, Timestamp()),
kOtherShardId},
ChunkType{kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << 0), BSON(kOriginalShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(1, 0, epoch, Timestamp()),
_sourceId.getShardId()}};
auto rt = RoutingTableHistory::makeNew(kCrudNs,
@@ -189,7 +189,7 @@ public:
nullptr,
false,
epoch,
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index f89da1f5b2d..c437eb3ec05 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -295,7 +295,7 @@ private:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 0, epoch, Timestamp()),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -304,7 +304,7 @@ private:
nullptr /* defaultCollator */,
false /* unique */,
std::move(epoch),
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index 2e81a7be471..3a96cd1672e 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -241,16 +241,16 @@ private:
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY),
BSON(_currentShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 0, epoch, Timestamp()),
_myDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << -std::numeric_limits<double>::infinity()),
BSON(_currentShardKey << 0)},
- ChunkVersion(100, 1, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 1, epoch, Timestamp()),
_otherDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << 0), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 2, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 2, epoch, Timestamp()),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -259,7 +259,7 @@ private:
nullptr /* defaultCollator */,
false /* unique */,
std::move(epoch),
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
index ad04e9623dd..4d36d53415e 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
@@ -58,10 +58,12 @@ public:
const ShardKeyPattern kShardKey = ShardKeyPattern(BSON("oldKey" << 1));
const OID kOrigEpoch = OID::gen();
const UUID kOrigUUID = UUID::gen();
+ const Timestamp kOrigTimestamp = Timestamp(1);
const NamespaceString kOrigNss = NamespaceString("db.foo");
const ShardKeyPattern kReshardingKey = ShardKeyPattern(BSON("newKey" << 1));
const OID kReshardingEpoch = OID::gen();
const UUID kReshardingUUID = UUID::gen();
+ const Timestamp kReshardingTimestamp = Timestamp(2);
const NamespaceString kReshardingNss = NamespaceString(
str::stream() << "db." << NamespaceString::kTemporaryReshardingCollectionPrefix
<< kOrigUUID);
@@ -139,11 +141,12 @@ public:
const ShardKeyPattern& skey,
UUID uuid,
OID epoch,
+ Timestamp timestamp,
const BSONObj& collation = {}) {
auto future = scheduleRoutingInfoForcedRefresh(tempNss);
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- CollectionType coll(tempNss, epoch, Date_t::now(), uuid);
+ CollectionType coll(tempNss, epoch, timestamp, Date_t::now(), uuid);
coll.setKeyPattern(skey.getKeyPattern());
coll.setUnique(false);
coll.setDefaultCollation(collation);
@@ -161,7 +164,7 @@ public:
reshardingFields.setRecipientFields(recipientFields);
coll.setReshardingFields(reshardingFields);
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
@@ -180,13 +183,14 @@ public:
void expectRefreshReturnForOriginalColl(const NamespaceString& origNss,
const ShardKeyPattern& skey,
UUID uuid,
- OID epoch) {
+ OID epoch,
+ Timestamp timestamp) {
expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
- CollectionType coll(origNss, epoch, Date_t::now(), uuid);
+ CollectionType coll(origNss, epoch, timestamp, Date_t::now(), uuid);
coll.setKeyPattern(skey.getKeyPattern());
coll.setUnique(false);
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
+ ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
@@ -280,8 +284,12 @@ TEST_F(RecipientServiceExternalStateTest, CreateLocalReshardingCollectionBasic)
}
// Simulate a refresh for the temporary resharding collection.
- loadOneChunkMetadataForTemporaryReshardingColl(
- kReshardingNss, kOrigNss, kReshardingKey, kReshardingUUID, kReshardingEpoch);
+ loadOneChunkMetadataForTemporaryReshardingColl(kReshardingNss,
+ kOrigNss,
+ kReshardingKey,
+ kReshardingUUID,
+ kReshardingEpoch,
+ kReshardingTimestamp);
const std::vector<BSONObj> indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"
<< "_id_"),
@@ -291,7 +299,8 @@ TEST_F(RecipientServiceExternalStateTest, CreateLocalReshardingCollectionBasic)
<< "name"
<< "indexOne")};
auto future = launchAsync([&] {
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectListCollections(
kOrigNss,
kOrigUUID,
@@ -328,8 +337,12 @@ TEST_F(RecipientServiceExternalStateTest,
}
// Simulate a refresh for the temporary resharding collection.
- loadOneChunkMetadataForTemporaryReshardingColl(
- kReshardingNss, kOrigNss, kReshardingKey, kReshardingUUID, kReshardingEpoch);
+ loadOneChunkMetadataForTemporaryReshardingColl(kReshardingNss,
+ kOrigNss,
+ kReshardingKey,
+ kReshardingUUID,
+ kReshardingEpoch,
+ kReshardingTimestamp);
const std::vector<BSONObj> indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"
<< "_id_"),
@@ -339,7 +352,8 @@ TEST_F(RecipientServiceExternalStateTest,
<< "name"
<< "indexOne")};
auto future = launchAsync([&] {
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectStaleDbVersionError(kOrigNss, "listCollections");
expectGetDatabase(kOrigNss, shards[1].getHost());
expectListCollections(
@@ -352,7 +366,8 @@ TEST_F(RecipientServiceExternalStateTest,
HostAndPort(shards[1].getHost()));
expectStaleEpochError(kOrigNss, "listIndexes");
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectListIndexes(kOrigNss, kOrigUUID, indexes, HostAndPort(shards[0].getHost()));
});
@@ -381,8 +396,12 @@ TEST_F(RecipientServiceExternalStateTest,
}
// Simulate a refresh for the temporary resharding collection.
- loadOneChunkMetadataForTemporaryReshardingColl(
- kReshardingNss, kOrigNss, kReshardingKey, kReshardingUUID, kReshardingEpoch);
+ loadOneChunkMetadataForTemporaryReshardingColl(kReshardingNss,
+ kOrigNss,
+ kReshardingKey,
+ kReshardingUUID,
+ kReshardingEpoch,
+ kReshardingTimestamp);
const std::vector<BSONObj> indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"
<< "_id_"),
@@ -407,7 +426,8 @@ TEST_F(RecipientServiceExternalStateTest,
}
auto future = launchAsync([&] {
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectListCollections(
kOrigNss,
kOrigUUID,
@@ -444,8 +464,12 @@ TEST_F(RecipientServiceExternalStateTest,
}
// Simulate a refresh for the temporary resharding collection.
- loadOneChunkMetadataForTemporaryReshardingColl(
- kReshardingNss, kOrigNss, kReshardingKey, kReshardingUUID, kReshardingEpoch);
+ loadOneChunkMetadataForTemporaryReshardingColl(kReshardingNss,
+ kOrigNss,
+ kReshardingKey,
+ kReshardingUUID,
+ kReshardingEpoch,
+ kReshardingTimestamp);
const std::vector<BSONObj> indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"
<< "_id_"),
@@ -472,7 +496,8 @@ TEST_F(RecipientServiceExternalStateTest,
}
auto future = launchAsync([&] {
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectListCollections(
kOrigNss,
kOrigUUID,
@@ -509,8 +534,12 @@ TEST_F(RecipientServiceExternalStateTest,
}
// Simulate a refresh for the temporary resharding collection.
- loadOneChunkMetadataForTemporaryReshardingColl(
- kReshardingNss, kOrigNss, kReshardingKey, kReshardingUUID, kReshardingEpoch);
+ loadOneChunkMetadataForTemporaryReshardingColl(kReshardingNss,
+ kOrigNss,
+ kReshardingKey,
+ kReshardingUUID,
+ kReshardingEpoch,
+ kReshardingTimestamp);
const std::vector<BSONObj> indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"
<< "_id_"),
@@ -527,7 +556,8 @@ TEST_F(RecipientServiceExternalStateTest,
operationContext(), kReshardingNss, optionsAndIndexes);
auto future = launchAsync([&] {
- expectRefreshReturnForOriginalColl(kOrigNss, kShardKey, kOrigUUID, kOrigEpoch);
+ expectRefreshReturnForOriginalColl(
+ kOrigNss, kShardKey, kOrigUUID, kOrigEpoch, kOrigTimestamp);
expectListCollections(
kOrigNss,
kOrigUUID,
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index a2ee7a5521b..5ec1423036c 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -80,7 +80,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
+ ChunkVersion(100, 0, epoch, Timestamp()),
_someDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -89,7 +89,7 @@ public:
nullptr /* defaultCollator */,
false /* unique */,
std::move(epoch),
- boost::none /* timestamp */,
+ Timestamp(),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
boost::none /* chunkSizeBytes */,
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index 6592bb4f35d..676154a6a44 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -161,18 +161,15 @@ public:
protected:
std::vector<ChunkType> createChunks(const OID& epoch,
const UUID& uuid,
+ const Timestamp& timestamp,
const std::string& shardKey) {
auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5));
- ChunkType chunk1(uuid,
- range1,
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
- kShardList[0].getName());
+ ChunkType chunk1(
+ uuid, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
auto range2 = ChunkRange(BSON(shardKey << 5), BSON(shardKey << MAXKEY));
- ChunkType chunk2(uuid,
- range2,
- ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
- kShardList[1].getName());
+ ChunkType chunk2(
+ uuid, range2, ChunkVersion(1, 0, epoch, timestamp), kShardList[1].getName());
return {chunk1, chunk2};
}
@@ -201,7 +198,7 @@ protected:
ReshardingEnv env(CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, kNss).value());
env.destShard = kShardList[1].getName();
- env.version = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ env.version = ChunkVersion(1, 0, OID::gen(), Timestamp());
env.tempNss =
NamespaceString(kNss.db(),
fmt::format("{}{}",
@@ -219,7 +216,8 @@ protected:
{ShardId{kShardList[0].getName()}, ShardId{kShardList[1].getName()}}});
reshardingFields.setState(CoordinatorStateEnum::kPreparingToDonate);
- CollectionType coll(kNss, env.version.epoch(), Date_t::now(), UUID::gen());
+ CollectionType coll(
+ kNss, env.version.epoch(), env.version.getTimestamp(), Date_t::now(), UUID::gen());
coll.setKeyPattern(BSON(kShardKey << 1));
coll.setUnique(false);
coll.setAllowMigrations(false);
@@ -229,10 +227,14 @@ protected:
_mockCatalogCacheLoader->setCollectionRefreshValues(
kNss,
coll,
- createChunks(env.version.epoch(), env.sourceUuid, kShardKey),
+ createChunks(
+ env.version.epoch(), env.sourceUuid, env.version.getTimestamp(), kShardKey),
reshardingFields);
_mockCatalogCacheLoader->setCollectionRefreshValues(
- env.tempNss, coll, createChunks(env.version.epoch(), env.sourceUuid, "y"), boost::none);
+ env.tempNss,
+ coll,
+ createChunks(env.version.epoch(), env.sourceUuid, env.version.getTimestamp(), "y"),
+ boost::none);
forceDatabaseRefresh(opCtx, kNss.db());
forceShardFilteringMetadataRefresh(opCtx, kNss);
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 4a5adb921ef..6b134bbe591 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -299,7 +299,7 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
const BSONObj& sort,
boost::optional<long long> limit,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp) {
+ const Timestamp& timestamp) {
const auto chunksNss = getShardChunksNss(nss, uuid, supportingLongName);
try {
diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h
index b2b25b4c8c0..ce4c0c0cd4d 100644
--- a/src/mongo/db/s/shard_metadata_util.h
+++ b/src/mongo/db/s/shard_metadata_util.h
@@ -175,7 +175,7 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
const BSONObj& sort,
boost::optional<long long> limit,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp);
+ const Timestamp& timestamp);
/**
* Takes a vector of 'chunks' and updates the shard's chunks collection for 'nss' or 'uuid'. Any
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 814517f17d9..bfd1b812f27 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -65,6 +65,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
ShardCollectionType shardCollectionType(
BSON(ShardCollectionType::kNssFieldName
<< kNss.ns() << ShardCollectionType::kEpochFieldName << maxCollVersion.epoch()
+ << ShardCollectionType::kTimestampFieldName << maxCollVersion.getTimestamp()
<< ShardCollectionType::kUuidFieldName << uuid
<< ShardCollectionType::kKeyPatternFieldName << keyPattern.toBSON()
<< ShardCollectionType::kDefaultCollationFieldName << defaultCollation
@@ -162,7 +163,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
}
}
- ChunkVersion maxCollVersion{0, 0, OID::gen(), boost::none /* timestamp */};
+ ChunkVersion maxCollVersion{0, 0, OID::gen(), Timestamp(1, 1)};
const KeyPattern keyPattern{BSON("a" << 1)};
const BSONObj defaultCollation{BSON("locale"
<< "fr_CA")};
@@ -177,6 +178,7 @@ TEST_F(ShardMetadataUtilTest, UpdateAndReadCollectionsEntry) {
ASSERT_EQUALS(updateShardCollectionType.getUuid(), readShardCollectionType.getUuid());
ASSERT_EQUALS(updateShardCollectionType.getNss(), readShardCollectionType.getNss());
ASSERT_EQUALS(updateShardCollectionType.getEpoch(), readShardCollectionType.getEpoch());
+ ASSERT_EQUALS(updateShardCollectionType.getTimestamp(), readShardCollectionType.getTimestamp());
ASSERT_BSONOBJ_EQ(updateShardCollectionType.getKeyPattern().toBSON(),
readShardCollectionType.getKeyPattern().toBSON());
ASSERT_BSONOBJ_EQ(updateShardCollectionType.getDefaultCollation(),
@@ -199,6 +201,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
ASSERT_EQUALS(shardCollectionsEntry.getUuid(), uuid);
ASSERT_EQUALS(shardCollectionsEntry.getNss().ns(), kNss.ns());
ASSERT_EQUALS(shardCollectionsEntry.getEpoch(), maxCollVersion.epoch());
+ ASSERT_EQUALS(shardCollectionsEntry.getTimestamp(), maxCollVersion.getTimestamp());
ASSERT_BSONOBJ_EQ(shardCollectionsEntry.getKeyPattern().toBSON(), keyPattern.toBSON());
ASSERT_BSONOBJ_EQ(shardCollectionsEntry.getDefaultCollation(), defaultCollation);
ASSERT_EQUALS(shardCollectionsEntry.getUnique(), kUnique);
@@ -237,7 +240,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
// read all the chunks
QueryAndSort allChunkDiff = createShardChunkDiffQuery(
- ChunkVersion(0, 0, maxCollVersion.epoch(), boost::none /* timestamp */));
+ ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
uuid,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 780138ffc0c..677a502fef3 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -387,7 +387,7 @@ void forcePrimaryDatabaseRefreshAndWaitForReplication(OperationContext* opCtx, S
* Does nothing otherwise.
*/
void patchUpChangedChunksIfNeeded(bool mustPatchUpMetadataResults,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
std::vector<ChunkType>& changedChunks) {
if (!mustPatchUpMetadataResults)
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index e9800a39810..80409adc861 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -203,7 +203,7 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
std::pair<CollectionType, vector<ChunkType>>
ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
- ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp());
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -371,7 +371,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
// Then refresh again and find that the collection has been dropped and recreated.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp());
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
@@ -398,7 +398,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
// Then refresh again and retrieve chunks from the config server that have mixed epoches, like
// as if the chunks read yielded around a drop and recreate of the collection.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp());
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
vector<ChunkType> mixedChunks;
@@ -458,7 +458,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDbMetadat
}
TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedOnSSCCL) {
- ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp());
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -500,7 +500,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
}
void ShardServerCatalogCacheLoaderTest::refreshCollectionEpochOnRemoteLoader() {
- ChunkVersion collectionVersion(1, 2, OID::gen(), boost::none);
+ ChunkVersion collectionVersion(1, 2, OID::gen(), Timestamp());
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
@@ -530,7 +530,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, CollAndChunkTasksConsistency) {
}
TEST_F(ShardServerCatalogCacheLoaderTest, SupportingLongNameFieldsAreProperlyPropagatedOnSSCCL) {
- ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp());
CollectionType collectionType = makeCollectionType(collectionVersion);
collectionType.setSupportingLongName(SupportingLongNameStatusEnum::kExplicitlyEnabled);
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index 9421a389193..e182d0f4431 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -217,7 +217,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenamePreconditionsAreMet) {
sharding_ddl_util::checkShardedRenamePreconditions(opCtx, kToNss, false /* dropTarget */);
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), boost::none);
+ ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
@@ -237,7 +237,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenamePreconditionsTargetCollectionExists) {
auto opCtx = operationContext();
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), boost::none);
+ ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp
index 707032eaeaf..66ab2759fbb 100644
--- a/src/mongo/db/s/type_shard_collection.cpp
+++ b/src/mongo/db/s/type_shard_collection.cpp
@@ -33,21 +33,18 @@
namespace mongo {
-ShardCollectionType::ShardCollectionType(
- NamespaceString nss, OID epoch, UUID uuid, KeyPattern keyPattern, bool unique)
- : ShardCollectionTypeBase(
- std::move(nss), std::move(epoch), std::move(uuid), std::move(keyPattern), unique) {}
-
ShardCollectionType::ShardCollectionType(NamespaceString nss,
OID epoch,
- boost::optional<Timestamp> creationTime,
+ Timestamp creationTime,
UUID uuid,
KeyPattern keyPattern,
bool unique)
- : ShardCollectionTypeBase(
- std::move(nss), std::move(epoch), std::move(uuid), std::move(keyPattern), unique) {
- setTimestamp(std::move(creationTime));
-}
+ : ShardCollectionTypeBase(std::move(nss),
+ std::move(epoch),
+ std::move(creationTime),
+ std::move(uuid),
+ std::move(keyPattern),
+ unique) {}
ShardCollectionType::ShardCollectionType(const BSONObj& obj) {
ShardCollectionTypeBase::parseProtected(IDLParserErrorContext("ShardCollectionType"), obj);
diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h
index f2d8123655e..f6a86b0b388 100644
--- a/src/mongo/db/s/type_shard_collection.h
+++ b/src/mongo/db/s/type_shard_collection.h
@@ -48,6 +48,7 @@ public:
using ShardCollectionTypeBase::kReshardingFieldsFieldName;
using ShardCollectionTypeBase::kSupportingLongNameFieldName;
using ShardCollectionTypeBase::kTimeseriesFieldsFieldName;
+ using ShardCollectionTypeBase::kTimestampFieldName;
using ShardCollectionTypeBase::kUniqueFieldName;
using ShardCollectionTypeBase::kUuidFieldName;
@@ -80,12 +81,9 @@ public:
using ShardCollectionTypeBase::setUnique;
using ShardCollectionTypeBase::setUuid;
- ShardCollectionType(
- NamespaceString nss, OID epoch, UUID uuid, KeyPattern keyPattern, bool unique);
-
ShardCollectionType(NamespaceString nss,
OID epoch,
- boost::optional<Timestamp> creationTime,
+ Timestamp creationTime,
UUID uuid,
KeyPattern keyPattern,
bool unique);
diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl
index 2bf73f19f7e..fbc5cd6ac8f 100644
--- a/src/mongo/db/s/type_shard_collection.idl
+++ b/src/mongo/db/s/type_shard_collection.idl
@@ -105,11 +105,7 @@ structs:
collection was created or it's shard key last refined. Because
timestamps are comparable, we are able to define a total order in time
in the collection. This field will replace Epoch, which are not
- comparable.
-
- It is optional for parsing purposes, because in versions of MongoDB
- prior to 5.0, this value wasn't being written."
- optional: true
+ comparable."
uuid:
type: uuid
description: "The UUID that will be used to create the local collection on each of
diff --git a/src/mongo/db/s/type_shard_collection_test.cpp b/src/mongo/db/s/type_shard_collection_test.cpp
index 3a62e513af3..bd5ed844a75 100644
--- a/src/mongo/db/s/type_shard_collection_test.cpp
+++ b/src/mongo/db/s/type_shard_collection_test.cpp
@@ -48,6 +48,7 @@ TEST(ShardCollectionType, FromBSONEmptyShardKeyFails) {
ASSERT_THROWS_CODE(
ShardCollectionType(BSON(ShardCollectionType::kNssFieldName
<< kNss.ns() << ShardCollectionType::kEpochFieldName << OID::gen()
+ << ShardCollectionType::kTimestampFieldName << Timestamp()
<< ShardCollectionType::kUuidFieldName << UUID::gen()
<< ShardCollectionType::kKeyPatternFieldName << BSONObj()
<< ShardCollectionType::kUniqueFieldName << true)),
@@ -57,32 +58,39 @@ TEST(ShardCollectionType, FromBSONEmptyShardKeyFails) {
TEST(ShardCollectionType, FromBSONEpochMatchesLastRefreshedCollectionVersionWhenBSONTimestamp) {
OID epoch = OID::gen();
+ Timestamp timestamp(1, 1);
ShardCollectionType shardCollType(
BSON(ShardCollectionType::kNssFieldName
<< kNss.ns() << ShardCollectionType::kEpochFieldName << epoch
+ << ShardCollectionType::kTimestampFieldName << timestamp
<< ShardCollectionType::kUuidFieldName << UUID::gen()
<< ShardCollectionType::kKeyPatternFieldName << kKeyPattern
<< ShardCollectionType::kUniqueFieldName << true
<< ShardCollectionType::kLastRefreshedCollectionVersionFieldName << Timestamp()));
ASSERT_EQ(epoch, shardCollType.getLastRefreshedCollectionVersion()->epoch());
+ ASSERT_EQ(timestamp, shardCollType.getLastRefreshedCollectionVersion()->getTimestamp());
}
TEST(ShardCollectionType, FromBSONEpochMatchesLastRefreshedCollectionVersionWhenDate) {
OID epoch = OID::gen();
+ Timestamp timestamp(1, 1);
ShardCollectionType shardCollType(
BSON(ShardCollectionType::kNssFieldName
<< kNss.ns() << ShardCollectionType::kEpochFieldName << epoch
<< ShardCollectionType::kUuidFieldName << UUID::gen()
+ << ShardCollectionType::kTimestampFieldName << timestamp
<< ShardCollectionType::kKeyPatternFieldName << kKeyPattern
<< ShardCollectionType::kUniqueFieldName << true
<< ShardCollectionType::kLastRefreshedCollectionVersionFieldName << Date_t()));
ASSERT_EQ(epoch, shardCollType.getLastRefreshedCollectionVersion()->epoch());
+ ASSERT_EQ(timestamp, shardCollType.getLastRefreshedCollectionVersion()->getTimestamp());
}
TEST(ShardCollectionType, ToBSONEmptyDefaultCollationNotIncluded) {
- ShardCollectionType shardCollType(kNss, OID::gen(), UUID::gen(), kKeyPattern, true);
+ ShardCollectionType shardCollType(
+ kNss, OID::gen(), Timestamp(), UUID::gen(), kKeyPattern, true);
BSONObj obj = shardCollType.toBSON();
ASSERT_FALSE(obj.hasField(ShardCollectionType::kDefaultCollationFieldName));
@@ -94,7 +102,8 @@ TEST(ShardCollectionType, ToBSONEmptyDefaultCollationNotIncluded) {
}
TEST(ShardCollectionType, ReshardingFieldsIncluded) {
- ShardCollectionType shardCollType(kNss, OID::gen(), UUID::gen(), kKeyPattern, true);
+ ShardCollectionType shardCollType(
+ kNss, OID::gen(), Timestamp(), UUID::gen(), kKeyPattern, true);
TypeCollectionReshardingFields reshardingFields;
const auto reshardingUUID = UUID::gen();
@@ -110,7 +119,8 @@ TEST(ShardCollectionType, ReshardingFieldsIncluded) {
}
TEST(ShardCollectionType, AllowMigrationsFieldBackwardsCompatibility) {
- ShardCollectionType shardCollType(kNss, OID::gen(), UUID::gen(), kKeyPattern, true);
+ ShardCollectionType shardCollType(
+ kNss, OID::gen(), Timestamp(), UUID::gen(), kKeyPattern, true);
shardCollType.setAllowMigrations(false);
ASSERT_EQ(false, shardCollType.toBSON()[ShardCollectionType::kAllowMigrationsFieldName].Bool());