summaryrefslogtreecommitdiff
path: root/src/mongo/s/catalog
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2021-06-01 06:48:00 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-14 17:18:34 +0000
commite7f91b95941b2e636ba4715c2cea5baf5bc3e2d2 (patch)
tree6d823474977fe3bb42b0b451b8b922fef5946b10 /src/mongo/s/catalog
parent389ecf35aa15a97a3bf855518c19a4ad05075acb (diff)
downloadmongo-e7f91b95941b2e636ba4715c2cea5baf5bc3e2d2.tar.gz
SERVER-57313 Pass the collection epoch and timestamp when building a ChunkType from a config.chunks BSON
- Pass the epoch and the timestamp to the functions that build ChunkTypes from config.chunks BSON - Fixing our tests (cherry picked from commit 7b30ab1943ecbb48e8bdbc50bf928eab09f619b5)
Diffstat (limited to 'src/mongo/s/catalog')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h5
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp71
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp26
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp4
-rw-r--r--src/mongo/s/catalog/type_chunk.h4
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp60
9 files changed, 125 insertions, 51 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index 6866f2561ef..f33b89e188b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -165,7 +165,10 @@ public:
* @param optime an out parameter that will contain the opTime of the config server.
* Can be null. Note that chunks can be fetched in multiple batches and each batch
* can have a unique opTime. This opTime will be the one from the last batch.
+ * @param epoch epoch associated to the collection, needed to build the chunks.
+ * @param timestamp timestamp associated to the collection, needed to build the chunks.
* @param readConcern The readConcern to use while querying for chunks.
+
*
* Returns a vector of ChunkTypes, or a !OK status if an error occurs.
*/
@@ -175,6 +178,8 @@ public:
const BSONObj& sort,
boost::optional<int> limit,
repl::OpTime* opTime,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint = boost::none) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index d3ea4bcc0e6..1a810b73f39 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -689,6 +689,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
const BSONObj& sort,
boost::optional<int> limit,
OpTime* opTime,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
@@ -707,7 +709,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
std::vector<ChunkType> chunks;
chunks.reserve(chunkDocsOpTimePair.value.size());
for (const BSONObj& obj : chunkDocsOpTimePair.value) {
- auto chunkRes = ChunkType::fromConfigBSON(obj);
+ auto chunkRes = ChunkType::fromConfigBSON(obj, epoch, timestamp);
if (!chunkRes.isOK()) {
return chunkRes.getStatus().withContext(stream() << "Failed to parse chunk with id "
<< obj[ChunkType::name()]);
@@ -762,34 +764,50 @@ std::pair<CollectionType, std::vector<ChunkType>> ShardingCatalogClientImpl::get
stream() << "Collection " << nss.ns() << " not found",
!aggResult.empty());
- boost::optional<CollectionType> coll;
- std::vector<ChunkType> chunks;
- chunks.reserve(aggResult.size() - 1);
// The aggregation may return the config.collections document anywhere between the
// config.chunks documents.
- for (const auto& elem : aggResult) {
- const auto chunkElem = elem.getField("chunks");
- if (chunkElem) {
- auto chunkRes = uassertStatusOK(ChunkType::fromConfigBSON(chunkElem.Obj()));
- chunks.emplace_back(std::move(chunkRes));
- } else {
- uassert(5520100,
- "Found more than one 'collections' documents in aggregation response",
- !coll);
- coll.emplace(elem);
-
- uassert(ErrorCodes::NamespaceNotFound,
- str::stream() << "Collection " << nss.ns() << " is dropped.",
- !coll->getDropped());
+ // 1st: look for the collection since it is needed to properly build the chunks.
+ boost::optional<CollectionType> coll;
+ {
+ for (const auto& elem : aggResult) {
+ const auto chunkElem = elem.getField("chunks");
+ if (!chunkElem) {
+ coll.emplace(elem);
+ break;
+ }
}
+ uassert(5520101, "'collections' document not found in aggregation response", coll);
+
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Collection " << nss.ns() << " is dropped.",
+ !coll->getDropped());
}
- uassert(5520101, "'collections' document not found in aggregation response", coll);
+ // 2nd: Traverse all the elements and build the chunks.
+ std::vector<ChunkType> chunks;
+ {
+ chunks.reserve(aggResult.size() - 1);
+ bool foundCollection = false;
+ for (const auto& elem : aggResult) {
+ const auto chunkElem = elem.getField("chunks");
+ if (chunkElem) {
+ auto chunkRes = uassertStatusOK(ChunkType::fromConfigBSON(
+ chunkElem.Obj(), coll->getEpoch(), coll->getTimestamp()));
+ chunks.emplace_back(std::move(chunkRes));
+ } else {
+ uassert(5520100,
+ "Found more than one 'collections' documents in aggregation response",
+ !foundCollection);
+ foundCollection = true;
+ }
+ }
+
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ stream() << "No chunks were found for the collection " << nss,
+ !chunks.empty());
+ }
- uassert(ErrorCodes::ConflictingOperationInProgress,
- stream() << "No chunks were found for the collection " << nss,
- !chunks.empty());
return {std::move(*coll), std::move(chunks)};
};
@@ -1017,7 +1035,14 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
} else {
query.append(ChunkType::ns(), nsOrUUID.nss()->ns());
}
- auto chunkWithStatus = getChunks(opCtx, query.obj(), BSONObj(), 1, nullptr, readConcern);
+ auto chunkWithStatus = getChunks(opCtx,
+ query.obj(),
+ BSONObj(),
+ 1,
+ nullptr,
+ lastChunkVersion.epoch(),
+ lastChunkVersion.getTimestamp(),
+ readConcern);
if (!chunkWithStatus.isOK()) {
errMsg = str::stream()
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index b1e399372d4..1526dfd0302 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -91,6 +91,8 @@ public:
const BSONObj& sort,
boost::optional<int> limit,
repl::OpTime* opTime,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint = boost::none) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index ea9696f30ff..90a1661b481 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -82,6 +82,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientMock::getChunks(
const BSONObj& sort,
boost::optional<int> limit,
repl::OpTime* opTime,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) {
return {ErrorCodes::InternalError, "Method not implemented"};
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 2b7ade924be..bc58f1fd84e 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -67,6 +67,8 @@ public:
const BSONObj& sort,
boost::optional<int> limit,
repl::OpTime* opTime,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp,
repl::ReadConcernLevel readConcern,
const boost::optional<BSONObj>& hint) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 8051ad28f35..eaffa9445d9 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -351,14 +351,15 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsWithInvalidShard) {
TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- OID oid = OID::gen();
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
ChunkType chunkA;
chunkA.setName(OID::gen());
chunkA.setNS(kNamespace);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
- chunkA.setVersion({1, 2, oid, boost::none /* timestamp */});
+ chunkA.setVersion({1, 2, collEpoch, collTimestamp});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
@@ -366,10 +367,10 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
chunkB.setNS(kNamespace);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
- chunkB.setVersion({3, 4, oid, boost::none /* timestamp */});
+ chunkB.setVersion({3, 4, collEpoch, collTimestamp});
chunkB.setShard(ShardId("shard0001"));
- ChunkVersion queryChunkVersion({1, 2, oid, boost::none /* timestamp */});
+ ChunkVersion queryChunkVersion({1, 2, collEpoch, collTimestamp});
const BSONObj chunksQuery(
BSON(ChunkType::ns("TestDB.TestColl")
@@ -378,7 +379,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
const OpTime newOpTime(Timestamp(7, 6), 5);
- auto future = launchAsync([this, &chunksQuery, newOpTime] {
+ auto future = launchAsync([this, &chunksQuery, newOpTime, &collEpoch, &collTimestamp] {
OpTime opTime;
const auto chunks =
@@ -387,6 +388,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
BSON(ChunkType::lastmod() << -1),
1,
&opTime,
+ collEpoch,
+ collTimestamp,
repl::ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQ(2U, chunks.size());
ASSERT_EQ(newOpTime, opTime);
@@ -433,20 +436,25 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChunkVersion queryChunkVersion({1, 2, OID::gen(), boost::none /* timestamp */});
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion queryChunkVersion({1, 2, collEpoch, collTimestamp});
const BSONObj chunksQuery(
BSON(ChunkType::ns("TestDB.TestColl")
<< ChunkType::lastmod()
<< BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong()))));
- auto future = launchAsync([this, &chunksQuery] {
+ auto future = launchAsync([this, &chunksQuery, &collEpoch, &collTimestamp] {
const auto chunks =
assertGet(catalogClient()->getChunks(operationContext(),
chunksQuery,
BSONObj(),
boost::none,
nullptr,
+ collEpoch,
+ collTimestamp,
repl::ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQ(0U, chunks.size());
@@ -484,13 +492,15 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
<< ChunkType::lastmod()
<< BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong()))));
- auto future = launchAsync([this, &chunksQuery] {
+ auto future = launchAsync([this, &chunksQuery, &queryChunkVersion] {
const auto swChunks =
catalogClient()->getChunks(operationContext(),
chunksQuery,
BSONObj(),
boost::none,
nullptr,
+ queryChunkVersion.epoch(),
+ queryChunkVersion.getTimestamp(),
repl::ReadConcernLevel::kMajorityReadConcern);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, swChunks.getStatus());
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index ee72ab44f1b..3798e7d5c32 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -343,7 +343,9 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc
return chunk;
}
-StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
+StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp) {
StatusWith<ChunkType> chunkStatus = parseFromConfigBSONCommand(source);
if (!chunkStatus.isOK()) {
return chunkStatus.getStatus();
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index bff0d350c8c..778d3af77aa 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -233,7 +233,9 @@ public:
* ErrorCodes::NoSuchKey if the '_id' field is missing while 'fromConfigBSON' does.
*/
static StatusWith<ChunkType> parseFromConfigBSONCommand(const BSONObj& source);
- static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source);
+ static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp);
/**
* Returns the BSON representation of the entry for the config server's config.chunks
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 38de85bba2c..0e2fe0d0eda 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -47,21 +47,24 @@ const BSONObj kMax = BSON("a" << 20);
const ShardId kShard("shard0000");
TEST(ChunkType, MissingConfigRequiredFields) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
<< chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- chunkRes = ChunkType::fromConfigBSON(objModKeys);
+ chunkRes = ChunkType::fromConfigBSON(objModKeys, collEpoch, collTimestamp);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
@@ -69,14 +72,14 @@ TEST(ChunkType, MissingConfigRequiredFields) {
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch());
- chunkRes = ChunkType::fromConfigBSON(objModShard);
+ chunkRes = ChunkType::fromConfigBSON(objModShard, collEpoch, collTimestamp);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
- chunkRes = ChunkType::fromConfigBSON(objModVersion);
+ chunkRes = ChunkType::fromConfigBSON(objModVersion, collEpoch, collTimestamp);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -130,49 +133,61 @@ TEST(ChunkType, ToFromShardBSON) {
}
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
}
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
}
TEST(ChunkType, MinToMaxNotAscending) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
<< ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
TEST(ChunkType, ToFromConfigBSON) {
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
const auto chunkID = OID::gen();
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj obj =
BSON(ChunkType::name(chunkID)
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod"
<< Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch());
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -189,13 +204,19 @@ TEST(ChunkType, ToFromConfigBSON) {
}
TEST(ChunkType, BadType) {
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
BSONObj obj = BSON(ChunkType::name() << 0);
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_FALSE(chunkRes.isOK());
}
TEST(ChunkType, BothNsAndUUID) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -203,12 +224,15 @@ TEST(ChunkType, BothNsAndUUID) {
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
<< chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
}
TEST(ChunkType, UUIDPresentAndNsMissing) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */);
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
+
+ ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -216,7 +240,7 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
<< chunkVersion.epoch() << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
}