summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2017-01-24 15:46:28 -0500
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-01-30 09:23:10 -0500
commit03028591aec8f6e8f08c8c2be2f829772822d7dd (patch)
treeb41905dc8f1ff01571c6b507cba4cce1c32398be
parent5605483c22231697d163005a4d6cf9ff194a179d (diff)
downloadmongo-03028591aec8f6e8f08c8c2be2f829772822d7dd.tar.gz
SERVER-27804 add additional parsers to ChunkType for shard's config.chunks.uuid collections
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp2
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp2
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp6
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp14
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp43
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp212
-rw-r--r--src/mongo/s/catalog/type_chunk.h106
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp130
-rw-r--r--src/mongo/s/chunk_diff_test.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp5
-rw-r--r--src/mongo/s/chunk_version.cpp10
-rw-r--r--src/mongo/s/chunk_version.h7
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp5
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_type.cpp6
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_type.cpp4
22 files changed, 399 insertions, 189 deletions
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 206a50257a6..e3237063ac1 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -185,7 +185,7 @@ void DistributionStatus::report(BSONObjBuilder* builder) const {
BSONArrayBuilder chunkArr(shardEntry.subarrayStart("chunks"));
for (const auto& chunk : shardChunk.second) {
- chunkArr.append(chunk.toBSON());
+ chunkArr.append(chunk.toConfigBSON());
}
chunkArr.doneFast();
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index bcca90e4e25..aa09c329844 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -210,7 +210,7 @@ ChunkType MigrationManagerTest::setUpChunk(const std::string& collName,
chunk.setShard(shardId);
chunk.setVersion(version);
ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ChunkType::ConfigNS, chunk.toBSON(), kMajorityWriteConcern));
+ operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON(), kMajorityWriteConcern));
return chunk;
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index b945ea6cb9b..10fb8da0e22 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -101,7 +101,7 @@ MigrateInfo makeMigrateInfo() {
kChunkVersion.appendForChunk(&chunkBuilder);
chunkBuilder.append(ChunkType::shard(), kFromShard.toString());
- ChunkType chunkType = assertGet(ChunkType::fromBSON(chunkBuilder.obj()));
+ ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj()));
ASSERT_OK(chunkType.validate());
return MigrateInfo(kToShard, chunkType);
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index 56b25d85f2d..8d5d7cd07db 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -59,7 +59,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
version.appendForChunk(&chunkBuilder);
chunkBuilder.append(ChunkType::shard(), kFromShard.toString());
- ChunkType chunkType = assertGet(ChunkType::fromBSON(chunkBuilder.obj()));
+ ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj()));
ASSERT_OK(chunkType.validate());
MigrateInfo migrateInfo(kToShard, chunkType);
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index e3c0cc318e8..c75405f33bb 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -72,7 +72,7 @@ protected:
chunkType.setMax(BSON("a" << MAXKEY));
chunkType.setVersion(ChunkVersion(1, 0, epoch));
ASSERT_OK(chunkType.validate());
- std::vector<BSONObj> chunksToSend{chunkType.toBSON()};
+ std::vector<BSONObj> chunksToSend{chunkType.toConfigBSON()};
auto future = launchAsync([this] {
auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 13a16b313ad..99a4cf065bf 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -110,7 +110,7 @@ protected:
}
ASSERT(chunk.validate().isOK());
- chunksToSend.push_back(chunk.toBSON());
+ chunksToSend.push_back(chunk.toConfigBSON());
}
auto future = launchAsync([this, ns, shardName, metadata] {
@@ -226,7 +226,7 @@ TEST_F(MetadataLoaderFixture, BadChunk) {
});
expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkInfo.toBSON()});
+ expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkInfo.toConfigBSON()});
future.timed_get(kFutureTimeout);
}
@@ -295,7 +295,7 @@ TEST_F(MetadataLoaderFixture, CheckNumChunk) {
});
expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkType.toBSON()});
+ expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkType.toConfigBSON()});
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index b42af89e774..dce1326b0b9 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -541,7 +541,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldUseDiffQuery) {
chunk.setMax(BSON("x" << 10));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toBSON()});
+ setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
}
const ChunkVersion newVersion(3, 0, initEpoch);
@@ -573,7 +573,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldUseDiffQuery) {
chunk.setMax(BSON("x" << 20));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(3, 10, initEpoch));
- return std::vector<BSONObj>{chunk.toBSON()};
+ return std::vector<BSONObj>{chunk.toConfigBSON()};
});
future.timed_get(kFutureTimeout);
@@ -601,7 +601,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldUseFullQueryOnEpochMismatch) {
chunk.setMax(BSON("x" << 10));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toBSON()});
+ setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
}
@@ -636,7 +636,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldUseFullQueryOnEpochMismatch) {
chunk.setMax(BSON("x" << 20));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(3, 10, newVersion.epoch()));
- return std::vector<BSONObj>{chunk.toBSON()};
+ return std::vector<BSONObj>{chunk.toConfigBSON()};
});
// Retry the refresh again. Now doing a full reload.
@@ -664,7 +664,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldUseFullQueryOnEpochMismatch) {
chunk.setMax(BSON("x" << 20));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(3, 10, newVersion.epoch()));
- return std::vector<BSONObj>{chunk.toBSON()};
+ return std::vector<BSONObj>{chunk.toConfigBSON()};
});
future.timed_get(kFutureTimeout);
@@ -689,7 +689,7 @@ TEST_F(ShardingStateTest, FullMetadataOnEpochMismatchShouldStopAfterMaxRetries)
chunk.setMax(BSON("x" << 10));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toBSON()});
+ setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
}
@@ -728,7 +728,7 @@ TEST_F(ShardingStateTest, FullMetadataOnEpochMismatchShouldStopAfterMaxRetries)
chunk.setMax(BSON("x" << 20));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(3, 10, nextEpoch));
- return std::vector<BSONObj>{chunk.toBSON()};
+ return std::vector<BSONObj>{chunk.toConfigBSON()};
});
lastEpoch = nextEpoch;
@@ -757,7 +757,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldBeOkWhenCollectionWasDropped) {
chunk.setMax(BSON("x" << 10));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toBSON()});
+ setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
}
const ChunkVersion newVersion(3, 0, initEpoch);
@@ -798,7 +798,7 @@ TEST_F(ShardingStateTest, MetadataRefreshShouldNotRetryOtherTypesOfError) {
chunk.setMax(BSON("x" << 10));
chunk.setShard(ShardId(shardName()));
chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toBSON()});
+ setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
}
auto configTargeter =
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 01bb420f49a..0138dd0fb37 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -1095,7 +1095,7 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
const auto& chunkDocsOpTimePair = findStatus.getValue();
for (const BSONObj& obj : chunkDocsOpTimePair.value) {
- auto chunkRes = ChunkType::fromBSON(obj);
+ auto chunkRes = ChunkType::fromConfigBSON(obj);
if (!chunkRes.isOK()) {
chunks->clear();
return {chunkRes.getStatus().code(),
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index e6b7bd78f08..fe3e427f228 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -88,7 +88,7 @@ BSONArray buildMergeChunksApplyOpsUpdates(const std::vector<ChunkType>& chunksTo
mergedChunk.setVersion(mergeVersion);
// add the new chunk information as the update object
- op.append("o", mergedChunk.toBSON());
+ op.append("o", mergedChunk.toConfigBSON());
// query object
op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
@@ -174,7 +174,7 @@ Status checkCollectionVersionEpoch(OperationContext* txn,
<< ").");
}
- auto chunkWith = ChunkType::fromBSON(findResponseWith.getValue().docs.front());
+ auto chunkWith = ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front());
if (!chunkWith.isOK()) {
return chunkWith.getStatus();
} else if (chunkWith.getValue().getVersion().epoch() != collectionEpoch) {
@@ -571,7 +571,7 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
{
BSONArrayBuilder b(logDetail.subarrayStart("merged"));
for (auto chunkToMerge : chunksToMerge) {
- b.append(chunkToMerge.toBSON());
+ b.append(chunkToMerge.toConfigBSON());
}
}
collVersion.addToBSON(logDetail, "prevShardVersion");
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index d457bd5de2d..cdb71cfa7cf 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -86,7 +86,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_EQ(1u, chunksVector.size());
// MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
@@ -149,7 +149,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
ASSERT_EQ(1u, chunksVector.size());
// MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
@@ -216,7 +216,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ASSERT_EQ(2u, chunksVector.size());
// MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
@@ -279,7 +279,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
ASSERT_EQ(2u, chunksVector.size());
// MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
@@ -290,7 +290,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
}
// OtherChunk should have been left alone
- auto foundOtherChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.back()));
+ auto foundOtherChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back()));
ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
}
@@ -415,8 +415,8 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
ASSERT_EQ(1u, chunksVector.size());
// MergedChunk should have range [chunkMin, chunkMax]
- ChunkType foundChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
- ASSERT_BSONOBJ_EQ(mergedChunk.toBSON(), foundChunk.toBSON());
+ ChunkType foundChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
}
TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index 920714e4ad3..b27b3743271 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -180,7 +180,7 @@ public:
std::transform(chunks.begin(),
chunks.end(),
std::back_inserter(chunksToReturn),
- [](const ChunkType& chunk) { return chunk.toBSON(); });
+ [](const ChunkType& chunk) { return chunk.toConfigBSON(); });
return chunksToReturn;
});
}
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 49fc124e9e6..d062d59023e 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -495,33 +495,34 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
return chunks;
});
- onFindWithMetadataCommand([this, &chunksQuery, chunkA, chunkB, newOpTime](
- const RemoteCommandRequest& request) {
- ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata,
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
+ onFindWithMetadataCommand(
+ [this, &chunksQuery, chunkA, chunkB, newOpTime](const RemoteCommandRequest& request) {
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata,
+ rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
+ const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
+ ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
- ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
- ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::DEPRECATED_lastmod() << -1));
- ASSERT_EQ(query->getLimit().get(), 1);
+ ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
+ ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::DEPRECATED_lastmod() << -1));
+ ASSERT_EQ(query->getLimit().get(), 1);
- checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
+ checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
- BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
+ BSONObjBuilder builder;
+ metadata.writeToMetadata(&builder);
- return std::make_tuple(vector<BSONObj>{chunkA.toBSON(), chunkB.toBSON()}, builder.obj());
- });
+ return std::make_tuple(vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()},
+ builder.obj());
+ });
const auto& chunks = future.timed_get(kFutureTimeout);
- ASSERT_BSONOBJ_EQ(chunkA.toBSON(), chunks[0].toBSON());
- ASSERT_BSONOBJ_EQ(chunkB.toBSON(), chunks[1].toBSON());
+ ASSERT_BSONOBJ_EQ(chunkA.toConfigBSON(), chunks[0].toConfigBSON());
+ ASSERT_BSONOBJ_EQ(chunkB.toConfigBSON(), chunks[1].toConfigBSON());
}
TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
@@ -610,7 +611,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
chunkB.setVersion({3, 4, OID::gen()});
// Missing shard id
- return vector<BSONObj>{chunkA.toBSON(), chunkB.toBSON()};
+ return vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()};
});
future.timed_get(kFutureTimeout);
@@ -1364,7 +1365,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, oid});
chunk.setShard(ShardId("shard0000"));
- return vector<BSONObj>{chunk.toBSON()};
+ return vector<BSONObj>{chunk.toConfigBSON()};
});
// Now wait for the applyChunkOpsDeprecated call to return
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index e0cd4c1215a..ff6df77d6f5 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -45,6 +45,7 @@ namespace mongo {
const std::string ChunkType::ConfigNS = "config.chunks";
const BSONField<std::string> ChunkType::name("_id");
+const BSONField<BSONObj> ChunkType::minShardID("_id");
const BSONField<std::string> ChunkType::ns("ns");
const BSONField<BSONObj> ChunkType::min("min");
const BSONField<BSONObj> ChunkType::max("max");
@@ -58,6 +59,25 @@ namespace {
const char kMinKey[] = "min";
const char kMaxKey[] = "max";
+/**
+ * Extracts an Object value from 'obj's field 'fieldName'. Sets the result to 'bsonElement'.
+ */
+Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONElement* bsonElement) {
+ Status elementStatus = bsonExtractTypedField(obj, fieldName, Object, bsonElement);
+ if (!elementStatus.isOK()) {
+ return {elementStatus.code(),
+ str::stream() << "The field '" << fieldName << "' cannot be parsed due to "
+ << elementStatus.reason()};
+ }
+
+ if (bsonElement->Obj().isEmpty()) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "The field '" << fieldName << "' cannot be empty"};
+ }
+
+ return Status::OK();
+}
+
} // namespace
ChunkRange::ChunkRange(BSONObj minKey, BSONObj maxKey)
@@ -68,27 +88,17 @@ ChunkRange::ChunkRange(BSONObj minKey, BSONObj maxKey)
StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
BSONElement minKey;
{
- Status minKeyStatus = bsonExtractTypedField(obj, kMinKey, Object, &minKey);
+ Status minKeyStatus = extractObject(obj, kMinKey, &minKey);
if (!minKeyStatus.isOK()) {
- return {minKeyStatus.code(),
- str::stream() << "Invalid min key due to " << minKeyStatus.reason()};
- }
-
- if (minKey.Obj().isEmpty()) {
- return {ErrorCodes::BadValue, "The min key cannot be empty"};
+ return minKeyStatus;
}
}
BSONElement maxKey;
{
- Status maxKeyStatus = bsonExtractTypedField(obj, kMaxKey, Object, &maxKey);
+ Status maxKeyStatus = extractObject(obj, kMaxKey, &maxKey);
if (!maxKeyStatus.isOK()) {
- return {maxKeyStatus.code(),
- str::stream() << "Invalid max key due to " << maxKeyStatus.reason()};
- }
-
- if (maxKey.Obj().isEmpty()) {
- return {ErrorCodes::BadValue, "The max key cannot be empty"};
+ return maxKeyStatus;
}
}
@@ -122,7 +132,7 @@ bool ChunkRange::operator!=(const ChunkRange& other) const {
return !(*this == other);
}
-StatusWith<ChunkType> ChunkType::fromBSON(const BSONObj& source) {
+StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
ChunkType chunk;
{
@@ -174,6 +184,119 @@ StatusWith<ChunkType> ChunkType::fromBSON(const BSONObj& source) {
return chunk;
}
+BSONObj ChunkType::toConfigBSON() const {
+ BSONObjBuilder builder;
+ if (_ns && _min)
+ builder.append(name.name(), getName());
+ if (_ns)
+ builder.append(ns.name(), getNS());
+ if (_min)
+ builder.append(min.name(), getMin());
+ if (_max)
+ builder.append(max.name(), getMax());
+ if (_shard)
+ builder.append(shard.name(), getShard().toString());
+ if (_version)
+ _version->appendForChunk(&builder);
+ if (_jumbo)
+ builder.append(jumbo.name(), getJumbo());
+
+ return builder.obj();
+}
+
+StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID& epoch) {
+ ChunkType chunk;
+
+ {
+ BSONElement minKey;
+ Status minKeyStatus = extractObject(source, minShardID.name(), &minKey);
+ if (!minKeyStatus.isOK()) {
+ return minKeyStatus;
+ }
+
+ BSONElement maxKey;
+ Status maxKeyStatus = extractObject(source, max.name(), &maxKey);
+ if (!maxKeyStatus.isOK()) {
+ return maxKeyStatus;
+ }
+
+ if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "min: " << minKey.Obj() << " should be less than max: "
+ << maxKey.Obj()};
+ }
+
+ chunk._min = minKey.Obj().getOwned();
+ chunk._max = maxKey.Obj().getOwned();
+ }
+
+ {
+ std::string chunkShard;
+ Status status = bsonExtractStringField(source, shard.name(), &chunkShard);
+ if (!status.isOK())
+ return status;
+ chunk._shard = chunkShard;
+ }
+
+ {
+ auto statusWithChunkVersion = ChunkVersion::parseFromBSONAndSetEpoch(source, epoch);
+ if (!statusWithChunkVersion.isOK()) {
+ return statusWithChunkVersion.getStatus();
+ }
+ chunk._version = std::move(statusWithChunkVersion.getValue());
+ }
+
+ return chunk;
+}
+
+BSONObj ChunkType::toShardBSON() const {
+ BSONObjBuilder builder;
+ invariant(_min);
+ invariant(_max);
+ invariant(_shard);
+ invariant(_version);
+ builder.append(minShardID.name(), getMin());
+ builder.append(max.name(), getMax());
+ builder.append(shard.name(), getShard().toString());
+ builder.appendTimestamp(DEPRECATED_lastmod.name(), _version->toLong());
+ return builder.obj();
+}
+
+std::string ChunkType::getName() const {
+ invariant(_ns);
+ invariant(_min);
+ return genID(*_ns, *_min);
+}
+
+void ChunkType::setNS(const std::string& ns) {
+ invariant(!ns.empty());
+ _ns = ns;
+}
+
+void ChunkType::setMin(const BSONObj& min) {
+ invariant(!min.isEmpty());
+ _min = min;
+}
+
+void ChunkType::setMax(const BSONObj& max) {
+ invariant(!max.isEmpty());
+ _max = max;
+}
+
+void ChunkType::setVersion(const ChunkVersion& version) {
+ invariant(version.isSet());
+ _version = version;
+}
+
+void ChunkType::setShard(const ShardId& shard) {
+ invariant(shard.isValid());
+ _shard = shard;
+}
+
+void ChunkType::setJumbo(bool jumbo) {
+ _jumbo = jumbo;
+}
+
std::string ChunkType::genID(StringData ns, const BSONObj& o) {
StringBuilder buf;
buf << ns << "-";
@@ -237,63 +360,10 @@ Status ChunkType::validate() const {
return Status::OK();
}
-BSONObj ChunkType::toBSON() const {
- BSONObjBuilder builder;
- if (_ns && _min)
- builder.append(name.name(), getName());
- if (_ns)
- builder.append(ns.name(), getNS());
- if (_min)
- builder.append(min.name(), getMin());
- if (_max)
- builder.append(max.name(), getMax());
- if (_shard)
- builder.append(shard.name(), getShard().toString());
- if (_version)
- _version->appendForChunk(&builder);
- if (_jumbo)
- builder.append(jumbo.name(), getJumbo());
-
- return builder.obj();
-}
-
std::string ChunkType::toString() const {
- return toBSON().toString();
-}
-
-std::string ChunkType::getName() const {
- invariant(_ns);
- invariant(_min);
- return genID(*_ns, *_min);
-}
-
-void ChunkType::setNS(const std::string& ns) {
- invariant(!ns.empty());
- _ns = ns;
-}
-
-void ChunkType::setMin(const BSONObj& min) {
- invariant(!min.isEmpty());
- _min = min;
-}
-
-void ChunkType::setMax(const BSONObj& max) {
- invariant(!max.isEmpty());
- _max = max;
-}
-
-void ChunkType::setVersion(const ChunkVersion& version) {
- invariant(version.isSet());
- _version = version;
-}
-
-void ChunkType::setShard(const ShardId& shard) {
- invariant(shard.isValid());
- _shard = shard;
-}
-
-void ChunkType::setJumbo(bool jumbo) {
- _jumbo = jumbo;
+ // toConfigBSON will include all the set fields, whereas toShardBSON includes only a subset and
+ // requires them to be set.
+ return toConfigBSON().toString();
}
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index de468e21f79..06a26db34be 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -88,17 +88,53 @@ private:
};
/**
- * This class represents the layout and contents of documents contained in the
- * config.chunks collection. All manipulation of documents coming from that
- * collection should be done with this class.
+ * This class represents the layouts and contents of documents contained in the config server's
+ * config.chunks and shard server's config.chunks.uuid collections. All manipulation of documents
+ * coming from these collections should be done with this class. The shard's config.chunks.uuid
+ * collections use the epoch field as the uuid value, and epochs match 1:1 to collection instances
+ * (mmapped in config.collections). Therefore, the shard collections do not need to include epoch or
+ * namespace fields, as these will be known in order to access the collections.
+ *
+ * Expected config server config.chunks collection format:
+ * {
+ * _id : "test.foo-a_MinKey",
+ * ns : "test.foo",
+ * min : {
+ * "a" : { "$minKey" : 1 }
+ * },
+ * max : {
+ * "a" : { "$maxKey" : 1 }
+ * },
+ * shard : "test-rs1",
+ * lastmod : Timestamp(1, 0),
+ * lastmodEpoch : ObjectId("587fc60cef168288439ad6ed"),
+ * jumbo : false // optional field
+ * }
+ *
+ * Expected shard server config.chunks.<epoch> collection format:
+ * {
+ * _id: {
+ * "a" : { "$minKey" : 1 }
+ * }
+ * max : {
+ * "a" : { "$maxKey" : 1 }
+ * }
+ * shard : "test-rs1",
+ * lastmod : Timestamp(1, 0),
+ * }
+ *
+ * Note: it is intended to change the config server's collection schema to mirror the new shard
+ * server's collection schema, but that will be future work when the new schema is stable and there
+ * is time to do the extra work, as well as handle the backwards compatibility issues it poses.
*/
class ChunkType {
public:
// Name of the chunks collection in the config server.
static const std::string ConfigNS;
- // Field names and types in the chunks collection type.
+ // Field names and types in the chunks collections.
static const BSONField<std::string> name;
+ static const BSONField<BSONObj> minShardID;
static const BSONField<std::string> ns;
static const BSONField<BSONObj> min;
static const BSONField<BSONObj> max;
@@ -108,34 +144,38 @@ public:
static const BSONField<OID> DEPRECATED_epoch;
/**
- * Constructs a new ChunkType object from BSON.
+ * Constructs a new ChunkType object from BSON that has the config server's config.chunks
+ * collection format.
+ *
* Also does validation of the contents.
*/
- static StatusWith<ChunkType> fromBSON(const BSONObj& source);
-
- /**
- * Generates chunk id based on the namespace name and the lower bound of the chunk.
- */
- static std::string genID(StringData ns, const BSONObj& min);
+ static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source);
/**
- * Returns OK if all fields have been set. Otherwise returns NoSuchKey
- * and information about the first field that is missing.
+ * Returns the BSON representation of the entry for the config server's config.chunks
+ * collection.
*/
- Status validate() const;
+ BSONObj toConfigBSON() const;
/**
- * Returns the BSON representation of the entry.
+ * Constructs a new ChunkType object from BSON that has a shard server's config.chunks.<epoch>
+ * collection format.
+ *
+ * Also does validation of the contents.
*/
- BSONObj toBSON() const;
+ static StatusWith<ChunkType> fromShardBSON(const BSONObj& source, const OID& epoch);
/**
- * Returns a std::string representation of the current internal state.
+ * Returns the BSON representation of the entry for a shard server's config.chunks.<epoch>
+ * collection.
*/
- std::string toString() const;
+ BSONObj toShardBSON() const;
std::string getName() const;
+ /**
+ * Getters and setters.
+ */
const std::string& getNS() const {
return _ns.get();
}
@@ -173,20 +213,36 @@ public:
}
void setJumbo(bool jumbo);
+ /**
+ * Generates chunk id based on the namespace name and the lower bound of the chunk.
+ */
+ static std::string genID(StringData ns, const BSONObj& min);
+
+ /**
+ * Returns OK if all the mandatory fields have been set. Otherwise returns NoSuchKey and
+ * information about the first field that is missing.
+ */
+ Status validate() const;
+
+ /**
+ * Returns a std::string representation of the current internal state.
+ */
+ std::string toString() const;
+
private:
- // Convention: (M)andatory, (O)ptional, (S)pecial rule.
+ // Convention: (M)andatory, (O)ptional, (S)pecial; (C)onfig, (S)hard.
- // (M) collection this chunk is in
+ // (M)(C) collection this chunk is in
boost::optional<std::string> _ns;
- // (M) first key of the range, inclusive
+ // (M)(C)(S) first key of the range, inclusive
boost::optional<BSONObj> _min;
- // (M) last key of the range, non-inclusive
+ // (M)(C)(S) last key of the range, non-inclusive
boost::optional<BSONObj> _max;
- // (M) version of this chunk
+ // (M)(C)(S) version of this chunk
boost::optional<ChunkVersion> _version;
- // (M) shard this chunk lives in
+ // (M)(C)(S) shard this chunk lives in
boost::optional<ShardId> _shard;
- // (O) too big to move?
+ // (O)(C) too big to move?
boost::optional<bool> _jumbo;
};
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index bcf1470240c..4589e4d4c38 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -35,14 +35,17 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/time_support.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
using std::string;
using unittest::assertGet;
-TEST(ChunkType, MissingRequiredFields) {
+const BSONObj kMin = BSON("a" << 10);
+const BSONObj kMax = BSON("a" << 20);
+const ShardId kShard("shard0000");
+
+TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
@@ -53,7 +56,7 @@ TEST(ChunkType, MissingRequiredFields) {
<< "lastmodEpoch"
<< chunkVersion.epoch()
<< ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(objModNS);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
@@ -62,7 +65,7 @@ TEST(ChunkType, MissingRequiredFields) {
<< "lastmodEpoch"
<< chunkVersion.epoch()
<< ChunkType::shard("shard0001"));
- chunkRes = ChunkType::fromBSON(objModKeys);
+ chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
@@ -73,10 +76,66 @@ TEST(ChunkType, MissingRequiredFields) {
<< Timestamp(chunkVersion.toLong())
<< "lastmodEpoch"
<< chunkVersion.epoch());
- chunkRes = ChunkType::fromBSON(objModShard);
+ chunkRes = ChunkType::fromConfigBSON(objModShard);
+ ASSERT_FALSE(chunkRes.isOK());
+
+ BSONObj objModVersion =
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << ChunkType::shard("shard0001"));
+ chunkRes = ChunkType::fromConfigBSON(objModVersion);
ASSERT_FALSE(chunkRes.isOK());
}
+TEST(ChunkType, MissingShardRequiredFields) {
+ const OID epoch = OID::gen();
+ ChunkVersion chunkVersion(1, 2, epoch);
+ const auto lastmod = Timestamp(chunkVersion.toLong());
+
+ BSONObj objModMin =
+ BSON(ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromShardBSON(objModMin, epoch);
+ ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
+ ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
+
+ BSONObj objModMax = BSON(
+ ChunkType::minShardID(kMin) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
+ chunkRes = ChunkType::fromShardBSON(objModMax, epoch);
+ ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
+ ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
+
+ BSONObj objModShard =
+ BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax) << "lastmod" << lastmod);
+ chunkRes = ChunkType::fromShardBSON(objModShard, epoch);
+ ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
+ ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
+
+ BSONObj objModLastmod = BSON(
+ ChunkType::minShardID(kMin) << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
+ chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch);
+ ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::BadValue);
+}
+
+TEST(ChunkType, ToFromShardBSON) {
+ const OID epoch = OID::gen();
+ ChunkVersion chunkVersion(1, 2, epoch);
+ auto lastmod = Timestamp(chunkVersion.toLong());
+
+ BSONObj obj = BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax)
+ << ChunkType::shard(kShard.toString())
+ << "lastmod"
+ << lastmod);
+ ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch));
+
+ ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
+
+ ASSERT_BSONOBJ_EQ(kMin, shardChunk.getMin());
+ ASSERT_BSONOBJ_EQ(kMax, shardChunk.getMax());
+ ASSERT_EQUALS(kShard, shardChunk.getShard());
+ ASSERT_EQUALS(chunkVersion, shardChunk.getVersion());
+}
+
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
@@ -88,7 +147,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
<< "lastmodEpoch"
<< chunkVersion.epoch()
<< ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
}
@@ -103,12 +162,12 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
<< "lastmodEpoch"
<< chunkVersion.epoch()
<< ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
}
-TEST(ChunkType, NotAscending) {
+TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
<< ChunkType::min(BSON("a" << 20))
@@ -118,24 +177,26 @@ TEST(ChunkType, NotAscending) {
<< "lastmodEpoch"
<< chunkVersion.epoch()
<< ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
-TEST(ChunkType, CorrectContents) {
+TEST(ChunkType, ToFromConfigBSON) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_10") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << ChunkType::shard("shard0001")
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch());
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
+ ASSERT_BSONOBJ_EQ(chunk.toConfigBSON(), obj);
+
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
ASSERT_BSONOBJ_EQ(chunk.getMin(), BSON("a" << 10));
ASSERT_BSONOBJ_EQ(chunk.getMax(), BSON("a" << 20));
@@ -146,18 +207,18 @@ TEST(ChunkType, CorrectContents) {
}
TEST(ChunkType, Pre22Format) {
- ChunkType chunk = assertGet(ChunkType::fromBSON(BSON("_id"
- << "test.mycol-a_MinKey"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "ns"
- << "test.mycol"
- << "min"
- << BSON("a" << 10)
- << "max"
- << BSON("a" << 20)
- << "shard"
- << "shard0001")));
+ ChunkType chunk = assertGet(ChunkType::fromConfigBSON(BSON("_id"
+ << "test.mycol-a_MinKey"
+ << "lastmod"
+ << Date_t::fromMillisSinceEpoch(1)
+ << "ns"
+ << "test.mycol"
+ << "min"
+ << BSON("a" << 10)
+ << "max"
+ << BSON("a" << 20)
+ << "shard"
+ << "shard0001")));
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
@@ -170,7 +231,7 @@ TEST(ChunkType, Pre22Format) {
TEST(ChunkType, BadType) {
BSONObj obj = BSON(ChunkType::name() << 0);
- StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
+ StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -190,4 +251,5 @@ TEST(ChunkRange, MinGreaterThanMaxShouldError) {
ASSERT_EQ(ErrorCodes::FailedToParse, parseStatus.getStatus());
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index 2a83711a24c..fee3d67743e 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -95,7 +95,7 @@ public:
void convertBSONArrayToChunkTypes(const vector<BSONObj>& chunksArray,
std::vector<ChunkType>* chunksVector) {
for (const BSONObj& obj : chunksArray) {
- auto chunkTypeRes = ChunkType::fromBSON(obj);
+ auto chunkTypeRes = ChunkType::fromConfigBSON(obj);
ASSERT(chunkTypeRes.isOK());
chunksVector->push_back(chunkTypeRes.getValue());
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index bc10eea734f..261141dcc6e 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -455,7 +455,10 @@ Status ChunkManager::createFirstChunks(OperationContext* txn,
chunk.setVersion(version);
Status status = grid.catalogClient(txn)->insertConfigDocument(
- txn, ChunkType::ConfigNS, chunk.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ txn,
+ ChunkType::ConfigNS,
+ chunk.toConfigBSON(),
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK()) {
const string errMsg = str::stream() << "Creating first chunks failed: "
<< redact(status.reason());
diff --git a/src/mongo/s/chunk_version.cpp b/src/mongo/s/chunk_version.cpp
index c2c98697bd3..417d021e227 100644
--- a/src/mongo/s/chunk_version.cpp
+++ b/src/mongo/s/chunk_version.cpp
@@ -112,6 +112,16 @@ StatusWith<ChunkVersion> ChunkVersion::parseFromBSONForChunk(const BSONObj& obj)
return chunkVersion;
}
+StatusWith<ChunkVersion> ChunkVersion::parseFromBSONAndSetEpoch(const BSONObj& obj,
+ const OID& epoch) {
+ bool canParse;
+ ChunkVersion chunkVersion = ChunkVersion::fromBSON(obj, kLastmod, &canParse);
+ if (!canParse)
+ return {ErrorCodes::BadValue, "Unable to parse shard version"};
+ chunkVersion._epoch = epoch;
+ return chunkVersion;
+}
+
void ChunkVersion::appendForSetShardVersion(BSONObjBuilder* builder) const {
addToBSON(*builder, kVersion);
}
diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h
index 05517ffb609..0503636f146 100644
--- a/src/mongo/s/chunk_version.h
+++ b/src/mongo/s/chunk_version.h
@@ -99,6 +99,13 @@ public:
static StatusWith<ChunkVersion> parseFromBSONForChunk(const BSONObj& obj);
/**
+ * Interprets the lastmod (combined major/minor) from a BSONObj without an epoch
+ * { ..., lastmod: [ <combined major/minor> ], ... }
+ * and then sets the returned ChunkVersion's epoch field to 'epoch'.
+ */
+ static StatusWith<ChunkVersion> parseFromBSONAndSetEpoch(const BSONObj& obj, const OID& epoch);
+
+ /**
* Indicates a dropped collection. All components are zeroes (OID is zero time, zero
* machineId/inc).
*/
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index c6c5b219c42..85a2b6411be 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -251,7 +251,8 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* txn
Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) {
const NamespaceString chunkNS(ChunkType::ConfigNS);
for (const auto& chunk : chunks) {
- auto insertStatus = insertToConfigCollection(operationContext(), chunkNS, chunk.toBSON());
+ auto insertStatus =
+ insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON());
if (!insertStatus.isOK())
return insertStatus;
}
@@ -266,7 +267,7 @@ StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* txn
if (!doc.isOK())
return doc.getStatus();
- return ChunkType::fromBSON(doc.getValue());
+ return ChunkType::fromConfigBSON(doc.getValue());
}
StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationContext* txn,
diff --git a/src/mongo/s/request_types/balance_chunk_request_type.cpp b/src/mongo/s/request_types/balance_chunk_request_type.cpp
index dbf40ddea43..543665db517 100644
--- a/src/mongo/s/request_types/balance_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_type.cpp
@@ -55,7 +55,7 @@ BalanceChunkRequest::BalanceChunkRequest(ChunkType chunk,
: _chunk(std::move(chunk)), _secondaryThrottle(std::move(secondaryThrottle)) {}
StatusWith<BalanceChunkRequest> BalanceChunkRequest::parseFromConfigCommand(const BSONObj& obj) {
- auto chunkStatus = ChunkType::fromBSON(obj);
+ auto chunkStatus = ChunkType::fromConfigBSON(obj);
if (!chunkStatus.isOK()) {
return chunkStatus.getStatus();
}
@@ -132,7 +132,7 @@ BSONObj BalanceChunkRequest::serializeToMoveCommandForConfig(
BSONObjBuilder cmdBuilder;
cmdBuilder.append(kConfigSvrMoveChunk, 1);
- cmdBuilder.appendElements(chunk.toBSON());
+ cmdBuilder.appendElements(chunk.toConfigBSON());
cmdBuilder.append(kToShardId, newShardId.toString());
cmdBuilder.append(kMaxChunkSizeBytes, static_cast<long long>(maxChunkSizeBytes));
{
@@ -152,7 +152,7 @@ BSONObj BalanceChunkRequest::serializeToRebalanceCommandForConfig(const ChunkTyp
BSONObjBuilder cmdBuilder;
cmdBuilder.append(kConfigSvrMoveChunk, 1);
- cmdBuilder.appendElements(chunk.toBSON());
+ cmdBuilder.appendElements(chunk.toConfigBSON());
cmdBuilder.append(WriteConcernOptions::kWriteConcernField,
kMajorityWriteConcernNoTimeout.toBSON());
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
index d2998b43f1b..1f1125529cb 100644
--- a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
+++ b/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
@@ -148,11 +148,11 @@ void CommitChunkMigrationRequest::appendAsCommand(BSONObjBuilder* builder,
builder->append(kConfigSvrCommitChunkMigration, nss.ns());
builder->append(kFromShard, fromShard.toString());
builder->append(kToShard, toShard.toString());
- builder->append(kMigratedChunk, migratedChunk.toBSON());
+ builder->append(kMigratedChunk, migratedChunk.toConfigBSON());
fromShardCollectionVersion.appendWithFieldForCommands(builder, kFromShardCollectionVersion);
if (controlChunk) {
- builder->append(kControlChunk, controlChunk->toBSON());
+ builder->append(kControlChunk, controlChunk->toConfigBSON());
}
}