summaryrefslogtreecommitdiff
path: root/src/mongo/s/catalog
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2021-09-21 13:39:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-21 14:39:04 +0000
commit9e1d37df7bf5bb4c8312f155bd671214f75ea296 (patch)
treec0895615554be30d482ec9923af19ea75b9745ae /src/mongo/s/catalog
parentbfef41e47abf95ec8f8114552d44df6c58409c9c (diff)
downloadmongo-9e1d37df7bf5bb4c8312f155bd671214f75ea296.tar.gz
SERVER-52847 Make timestamp required in CollectionType and ShardCollectionType IDL
Diffstat (limited to 'src/mongo/s/catalog')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp27
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp2
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp10
-rw-r--r--src/mongo/s/catalog/type_chunk.h4
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp55
-rw-r--r--src/mongo/s/catalog/type_collection.cpp16
-rw-r--r--src/mongo/s/catalog/type_collection.h9
-rw-r--r--src/mongo/s/catalog/type_collection.idl6
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp48
14 files changed, 90 insertions, 97 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index 0eb7027eb78..60df2570447 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -184,7 +184,7 @@ public:
boost::optional<int> limit,
repl::OpTime* opTime,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint = boost::none) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index fabe24b7928..80a826bfbc9 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -593,7 +593,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
boost::optional<int> limit,
OpTime* opTime,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index b614d3a3839..99c86ef03a1 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -97,7 +97,7 @@ public:
boost::optional<int> limit,
repl::OpTime* opTime,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint = boost::none) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index e5c24f4bfb6..a4153e80d50 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -90,7 +90,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientMock::getChunks(
boost::optional<int> limit,
repl::OpTime* opTime,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) {
return {ErrorCodes::InternalError, "Method not implemented"};
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 794744e30ac..fdab949c024 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -72,7 +72,7 @@ public:
boost::optional<int> limit,
repl::OpTime* opTime,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp,
+ const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 35a5371bb89..49ec74a361a 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -86,7 +86,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType expectedColl(
- NamespaceString("TestDB.TestNS"), OID::gen(), Date_t::now(), UUID::gen());
+ NamespaceString("TestDB.TestNS"), OID::gen(), Timestamp(), Date_t::now(), UUID::gen());
expectedColl.setKeyPattern(BSON("KeyName" << 1));
const OpTime newOpTime(Timestamp(7, 6), 5);
@@ -355,7 +355,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1, 1);
ChunkType chunkA;
chunkA.setName(OID::gen());
@@ -441,7 +441,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForUUIDNoSortNoLimit) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp();
ChunkVersion queryChunkVersion({1, 2, collEpoch, collTimestamp});
@@ -490,7 +490,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
const auto collUuid = UUID::gen();
- ChunkVersion queryChunkVersion({1, 2, OID::gen(), boost::none /* timestamp */});
+ ChunkVersion queryChunkVersion({1, 2, OID::gen(), Timestamp()});
const BSONObj chunksQuery(
BSON(ChunkType::collectionUUID()
@@ -516,14 +516,14 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
chunkA.setCollectionUUID(collUuid);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
- chunkA.setVersion({1, 2, OID::gen(), boost::none /* timestamp */});
+ chunkA.setVersion({1, 2, OID::gen(), Timestamp()});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
chunkB.setCollectionUUID(collUuid);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
- chunkB.setVersion({3, 4, OID::gen(), boost::none /* timestamp */});
+ chunkB.setVersion({3, 4, OID::gen(), Timestamp()});
// Missing shard id
return vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()};
@@ -765,13 +765,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotWritablePrimar
TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- CollectionType coll1(NamespaceString{"test.coll1"}, OID::gen(), network()->now(), UUID::gen());
+ CollectionType coll1(
+ NamespaceString{"test.coll1"}, OID::gen(), Timestamp(), network()->now(), UUID::gen());
coll1.setKeyPattern(KeyPattern{BSON("_id" << 1)});
coll1.setUnique(false);
CollectionType coll2(
- NamespaceString{"anotherdb.coll1"}, OID::gen(), network()->now(), UUID::gen());
+ NamespaceString{"anotherdb.coll1"}, OID::gen(), Timestamp(), network()->now(), UUID::gen());
coll2.setKeyPattern(KeyPattern{BSON("_id" << 1)});
coll2.setUnique(false);
@@ -818,11 +819,13 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- CollectionType coll1(NamespaceString{"test.coll1"}, OID::gen(), network()->now(), UUID::gen());
+ CollectionType coll1(
+ NamespaceString{"test.coll1"}, OID::gen(), Timestamp(), network()->now(), UUID::gen());
coll1.setKeyPattern(KeyPattern{BSON("_id" << 1)});
coll1.setUnique(true);
- CollectionType coll2(NamespaceString{"test.coll2"}, OID::gen(), network()->now(), UUID::gen());
+ CollectionType coll2(
+ NamespaceString{"test.coll2"}, OID::gen(), Timestamp(), network()->now(), UUID::gen());
coll2.setKeyPattern(KeyPattern{BSON("_id" << 1)});
coll2.setUnique(false);
@@ -863,7 +866,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
});
CollectionType validColl(
- NamespaceString{"test.coll1"}, OID::gen(), network()->now(), UUID::gen());
+ NamespaceString{"test.coll1"}, OID::gen(), Timestamp(), network()->now(), UUID::gen());
validColl.setKeyPattern(KeyPattern{BSON("_id" << 1)});
validColl.setUnique(true);
@@ -1185,7 +1188,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
<< "second precondition"));
const NamespaceString nss("config.chunks");
const UUID uuid = UUID::gen();
- ChunkVersion lastChunkVersion(0, 0, OID(), boost::none /* timestamp */);
+ ChunkVersion lastChunkVersion(0, 0, OID(), Timestamp());
auto future = launchAsync([this, updateOps, preCondition, uuid, nss, lastChunkVersion] {
auto status =
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index 7ad45617c41..149d7950fb3 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -408,7 +408,7 @@ TEST_F(UpdateRetryTest, NotWritablePrimaryOnceSuccessAfterRetry) {
configTargeter()->setFindHostReturnValue(host1);
CollectionType collection(
- NamespaceString("db.coll"), OID::gen(), network()->now(), UUID::gen());
+ NamespaceString("db.coll"), OID::gen(), Timestamp(), network()->now(), UUID::gen());
collection.setKeyPattern(KeyPattern(BSON("_id" << 1)));
BSONObj objToUpdate = BSON("_id" << 1 << "Value"
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 1235bfb75f0..85615320d18 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -312,7 +312,7 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc
StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp) {
+ const Timestamp& timestamp) {
StatusWith<ChunkType> chunkStatus = parseFromConfigBSONCommand(source);
if (!chunkStatus.isOK()) {
return chunkStatus.getStatus();
@@ -331,10 +331,8 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source,
}
const ChunkVersion& version = chunk.getVersion();
- if (version.epoch() == OID()) {
- chunk.setVersion(
- ChunkVersion(version.majorVersion(), version.minorVersion(), epoch, timestamp));
- }
+ chunk.setVersion(
+ ChunkVersion(version.majorVersion(), version.minorVersion(), epoch, timestamp));
return chunk;
}
@@ -362,7 +360,7 @@ BSONObj ChunkType::toConfigBSON() const {
StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp) {
+ const Timestamp& timestamp) {
ChunkType chunk;
{
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index 1d678aecc67..925bb7075fe 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -227,7 +227,7 @@ public:
static StatusWith<ChunkType> parseFromConfigBSONCommand(const BSONObj& source);
static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp);
+ const Timestamp& timestamp);
/**
* Returns the BSON representation of the entry for the config server's config.chunks
@@ -243,7 +243,7 @@ public:
*/
static StatusWith<ChunkType> fromShardBSON(const BSONObj& source,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp);
+ const Timestamp& timestamp);
/**
* Returns the BSON representation of the entry for a shard server's config.chunks.<epoch>
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index b09047edf92..385417734d3 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -49,7 +49,7 @@ const ShardId kShard("shard0000");
TEST(ChunkType, MissingConfigRequiredFields) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp();
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
@@ -86,44 +86,44 @@ TEST(ChunkType, MissingConfigRequiredFields) {
TEST(ChunkType, MissingShardRequiredFields) {
const OID epoch = OID::gen();
- ChunkVersion chunkVersion(1, 2, epoch, boost::none /* timestamp */);
+ const Timestamp timestamp;
+ ChunkVersion chunkVersion(1, 2, epoch, timestamp);
const auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj objModMin =
BSON(ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
- StatusWith<ChunkType> chunkRes =
- ChunkType::fromShardBSON(objModMin, epoch, boost::none /* timestamp */);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromShardBSON(objModMin, epoch, timestamp);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
BSONObj objModMax = BSON(ChunkType::minShardID(kMin)
<< ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
- chunkRes = ChunkType::fromShardBSON(objModMax, epoch, boost::none /* timestamp */);
+ chunkRes = ChunkType::fromShardBSON(objModMax, epoch, timestamp);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
BSONObj objModShard =
BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax) << "lastmod" << lastmod);
- chunkRes = ChunkType::fromShardBSON(objModShard, epoch, boost::none /* timestamp */);
+ chunkRes = ChunkType::fromShardBSON(objModShard, epoch, timestamp);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
BSONObj objModLastmod = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
- chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch, boost::none /* timestamp */);
+ chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch, timestamp);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
}
TEST(ChunkType, ToFromShardBSON) {
const OID epoch = OID::gen();
- ChunkVersion chunkVersion(1, 2, epoch, boost::none /* timestamp */);
+ const Timestamp timestamp;
+ ChunkVersion chunkVersion(1, 2, epoch, timestamp);
auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj obj = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
<< lastmod);
- ChunkType shardChunk =
- assertGet(ChunkType::fromShardBSON(obj, epoch, boost::none /* timestamp */));
+ ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch, timestamp));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -136,14 +136,15 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj = BSON(
ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ << "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -152,14 +153,15 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ << "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -168,7 +170,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
@@ -183,7 +185,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
TEST(ChunkType, ToFromConfigBSON) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
const auto chunkID = OID::gen();
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
@@ -209,7 +211,7 @@ TEST(ChunkType, ToFromConfigBSON) {
TEST(ChunkType, BadType) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
BSONObj obj = BSON(ChunkType::name() << 0);
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
@@ -219,7 +221,7 @@ TEST(ChunkType, BadType) {
TEST(ChunkType, BothNsAndUUID) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
@@ -228,23 +230,24 @@ TEST(ChunkType, BothNsAndUUID) {
<< ChunkType::collectionUUID() << collUuid << ChunkType::collectionUUID()
<< mongo::UUID::gen() << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ << "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
}
TEST(ChunkType, UUIDPresentAndNsMissing) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
- BSONObj objModNS =
- BSON(ChunkType::name(OID::gen())
- << ChunkType::collectionUUID() << mongo::UUID::gen()
- << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
- << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
- << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj objModNS = BSON(
+ ChunkType::name(OID::gen())
+ << ChunkType::collectionUUID() << mongo::UUID::gen()
+ << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch()
+ << "lastmodTimestamp" << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
}
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index b6e549aa626..816787e1ce4 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -43,23 +43,13 @@ namespace mongo {
const NamespaceString CollectionType::ConfigNS("config.collections");
-CollectionType::CollectionType(NamespaceString nss, OID epoch, Date_t updatedAt, UUID uuid)
- : CollectionTypeBase(std::move(nss), std::move(updatedAt)) {
+CollectionType::CollectionType(
+ NamespaceString nss, OID epoch, Timestamp creationTime, Date_t updatedAt, UUID uuid)
+ : CollectionTypeBase(std::move(nss), std::move(updatedAt), std::move(creationTime)) {
setEpoch(std::move(epoch));
setUuid(std::move(uuid));
}
-CollectionType::CollectionType(NamespaceString nss,
- OID epoch,
- boost::optional<Timestamp> creationTime,
- Date_t updatedAt,
- UUID uuid)
- : CollectionTypeBase(std::move(nss), std::move(updatedAt)) {
- setEpoch(std::move(epoch));
- setUuid(std::move(uuid));
- setTimestamp(creationTime);
-}
-
CollectionType::CollectionType(const BSONObj& obj) {
CollectionType::parseProtected(IDLParserErrorContext("CollectionType"), obj);
uassert(ErrorCodes::BadValue,
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index 79a4896306a..a903da4d224 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -113,13 +113,8 @@ public:
// Name of the collections collection in the config server.
static const NamespaceString ConfigNS;
- CollectionType(NamespaceString nss, OID epoch, Date_t updatedAt, UUID uuid);
-
- CollectionType(NamespaceString nss,
- OID epoch,
- boost::optional<Timestamp> creationTime,
- Date_t updatedAt,
- UUID uuid);
+ CollectionType(
+ NamespaceString nss, OID epoch, Timestamp creationTime, Date_t updatedAt, UUID uuid);
explicit CollectionType(const BSONObj& obj);
diff --git a/src/mongo/s/catalog/type_collection.idl b/src/mongo/s/catalog/type_collection.idl
index 1d258b4811f..10612997ec8 100644
--- a/src/mongo/s/catalog/type_collection.idl
+++ b/src/mongo/s/catalog/type_collection.idl
@@ -70,11 +70,7 @@ structs:
collection was created or it's shard key last refined. Because
timestamps are comparable, we are able to define a total order in time
in the collection. This field will replace Epoch, which are not
- comparable.
-
- It is optional for parsing purposes, because in versions of MongoDB
- prior to 5.0, this value wasn't being written."
- optional: true
+ comparable."
uuid:
cpp_name: pre50CompatibleUuid
type: uuid
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index a96da4c40b5..187112ad361 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -44,8 +44,10 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
+ const Timestamp timestamp;
CollectionType coll(BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::kEpochFieldName << oid
+ << CollectionType::kTimestampFieldName << timestamp
<< CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
@@ -56,6 +58,7 @@ TEST(CollectionType, Basic) {
ASSERT(coll.getNss() == NamespaceString{"db.coll"});
ASSERT_EQUALS(coll.getEpoch(), oid);
+ ASSERT_EQUALS(coll.getTimestamp(), timestamp);
ASSERT_EQUALS(coll.getUpdatedAt(), Date_t::fromMillisSinceEpoch(1));
ASSERT_BSONOBJ_EQ(coll.getKeyPattern().toBSON(), BSON("a" << 1));
ASSERT_BSONOBJ_EQ(coll.getDefaultCollation(),
@@ -68,26 +71,26 @@ TEST(CollectionType, Basic) {
TEST(CollectionType, AllFieldsPresent) {
const OID oid = OID::gen();
const auto uuid = UUID::gen();
+ const Timestamp timestamp;
const auto reshardingUuid = UUID::gen();
ReshardingFields reshardingFields;
reshardingFields.setReshardingUUID(reshardingUuid);
- CollectionType coll(BSON(CollectionType::kNssFieldName
- << "db.coll" << CollectionType::kEpochFieldName << oid
- << CollectionType::kUpdatedAtFieldName
- << Date_t::fromMillisSinceEpoch(1)
- << CollectionType::kKeyPatternFieldName << BSON("a" << 1)
- << CollectionType::kDefaultCollationFieldName
- << BSON("locale"
- << "fr_CA")
- << CollectionType::kUniqueFieldName << true
- << CollectionType::kUuidFieldName << uuid
- << CollectionType::kReshardingFieldsFieldName
- << reshardingFields.toBSON()));
+ CollectionType coll(BSON(
+ CollectionType::kNssFieldName
+ << "db.coll" << CollectionType::kEpochFieldName << oid
+ << CollectionType::kTimestampFieldName << timestamp << CollectionType::kUpdatedAtFieldName
+ << Date_t::fromMillisSinceEpoch(1) << CollectionType::kKeyPatternFieldName << BSON("a" << 1)
+ << CollectionType::kDefaultCollationFieldName
+ << BSON("locale"
+ << "fr_CA")
+ << CollectionType::kUniqueFieldName << true << CollectionType::kUuidFieldName << uuid
+ << CollectionType::kReshardingFieldsFieldName << reshardingFields.toBSON()));
ASSERT(coll.getNss() == NamespaceString{"db.coll"});
ASSERT_EQUALS(coll.getEpoch(), oid);
+ ASSERT_EQUALS(coll.getTimestamp(), timestamp);
ASSERT_EQUALS(coll.getUpdatedAt(), Date_t::fromMillisSinceEpoch(1));
ASSERT_BSONOBJ_EQ(coll.getKeyPattern().toBSON(), BSON("a" << 1));
ASSERT_BSONOBJ_EQ(coll.getDefaultCollation(),
@@ -102,19 +105,22 @@ TEST(CollectionType, AllFieldsPresent) {
TEST(CollectionType, MissingDefaultCollationParses) {
const OID oid = OID::gen();
- CollectionType coll(BSON(CollectionType::kNssFieldName
- << "db.coll" << CollectionType::kEpochFieldName << oid
- << CollectionType::kUpdatedAtFieldName
- << Date_t::fromMillisSinceEpoch(1)
- << CollectionType::kKeyPatternFieldName << BSON("a" << 1)
- << CollectionType::kUniqueFieldName << true));
+ const Timestamp timestamp;
+ CollectionType coll(BSON(
+ CollectionType::kNssFieldName
+ << "db.coll" << CollectionType::kEpochFieldName << oid
+ << CollectionType::kTimestampFieldName << timestamp << CollectionType::kUpdatedAtFieldName
+ << Date_t::fromMillisSinceEpoch(1) << CollectionType::kKeyPatternFieldName << BSON("a" << 1)
+ << CollectionType::kUniqueFieldName << true));
ASSERT_BSONOBJ_EQ(coll.getDefaultCollation(), BSONObj());
}
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
const OID oid = OID::gen();
+ const Timestamp timestamp;
CollectionType coll(BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::kEpochFieldName << oid
+ << CollectionType::kTimestampFieldName << timestamp
<< CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
@@ -130,7 +136,7 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
TEST(CollectionType, Pre22Format) {
CollectionType coll(BSON("_id"
- << "db.coll"
+ << "db.coll" << CollectionType::kTimestampFieldName << Timestamp()
<< "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped" << false
<< "key" << BSON("a" << 1) << "unique" << false));
@@ -145,7 +151,8 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidNamespace) {
ASSERT_THROWS(CollectionType(BSON(CollectionType::kNssFieldName
<< "foo\\bar.coll" << CollectionType::kEpochFieldName
- << OID::gen() << CollectionType::kUpdatedAtFieldName
+ << OID::gen() << CollectionType::kTimestampFieldName
+ << Timestamp() << CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::kUniqueFieldName << true)),
@@ -155,6 +162,7 @@ TEST(CollectionType, InvalidNamespace) {
TEST(CollectionType, BadNamespaceType) {
ASSERT_THROWS(CollectionType(BSON(CollectionType::kNssFieldName
<< 1 << CollectionType::kEpochFieldName << OID::gen()
+ << CollectionType::kTimestampFieldName << Timestamp()
<< CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)