diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2022-01-24 14:56:18 +0100 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-01-24 20:03:34 +0000 |
commit | a2aff33b1f4fa49efd85b245e7945e720a775530 (patch) | |
tree | 8dc283bae55d6b8ac2e797ce328d5244eacdd8bd /src/mongo/db | |
parent | f395e9d34b413781bcfb67c2674378734223466c (diff) | |
download | mongo-a2aff33b1f4fa49efd85b245e7945e720a775530.tar.gz |
SERVER-62783 Get rid of ChunkVersion::appendWithField/parseWithField
Diffstat (limited to 'src/mongo/db')
16 files changed, 52 insertions, 59 deletions
diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h index 4044083d89f..4d3ec6143c8 100644 --- a/src/mongo/db/query/query_request_helper.h +++ b/src/mongo/db/query/query_request_helper.h @@ -57,8 +57,6 @@ static constexpr auto kMaxTimeMSOpOnlyField = "maxTimeMSOpOnly"; // Field names for sorting options. static constexpr auto kNaturalSortField = "$natural"; -static constexpr auto kShardVersionField = "shardVersion"; - /** * Assert that collectionName is valid. */ diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h index e8bc271bf9b..e81bbc97a83 100644 --- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h +++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h @@ -239,7 +239,7 @@ public: .append(kShardName, getTarget().toString()) .append(kEpoch, _version.epoch()); - _version.appendWithField(&commandBuilder, ChunkVersion::kShardVersionField); + _version.serializeToBSON(ChunkVersion::kShardVersionField, &commandBuilder); return commandBuilder.obj(); } @@ -314,7 +314,7 @@ public: .append(kMaxValue, _upperBoundKey) .append(kEstimatedValue, _estimatedValue); - _version.appendWithField(&commandBuilder, ChunkVersion::kShardVersionField); + _version.serializeToBSON(ChunkVersion::kShardVersionField, &commandBuilder); return commandBuilder.obj(); } diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp index b970710cbe2..59137d19e7f 100644 --- a/src/mongo/db/s/balancer/migration_test_fixture.cpp +++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp @@ -125,7 +125,7 @@ void MigrationTestFixture::setUpMigration(const NamespaceString& ns, builder.append(MigrationType::max(), chunk.getMax()); builder.append(MigrationType::toShard(), toShard.toString()); builder.append(MigrationType::fromShard(), chunk.getShard().toString()); - chunk.getVersion().appendWithField(&builder, "chunkVersion"); + chunk.getVersion().serializeToBSON("chunkVersion", &builder); builder.append(MigrationType::forceJumbo(), "doNotForceJumbo"); MigrationType migrationType = assertGet(MigrationType::fromBSON(builder.obj())); diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp index fc1371cef4a..8be64f74323 100644 --- a/src/mongo/db/s/balancer/type_migration.cpp +++ b/src/mongo/db/s/balancer/type_migration.cpp @@ -115,11 +115,11 @@ StatusWith<MigrationType> MigrationType::fromBSON(const BSONObj& source) { migrationType._fromShard = std::move(migrationFromShard); } - { - auto chunkVersionStatus = ChunkVersion::parseWithField(source, chunkVersion.name()); - if (!chunkVersionStatus.isOK()) - return chunkVersionStatus.getStatus(); - migrationType._chunkVersion = chunkVersionStatus.getValue(); + try { + auto chunkVersionStatus = ChunkVersion::fromBSONArrayThrowing(source[chunkVersion.name()]); + migrationType._chunkVersion = chunkVersionStatus; + } catch (const DBException& ex) { + return ex.toStatus(); } { @@ -160,7 +160,7 @@ BSONObj MigrationType::toBSON() const { builder.append(fromShard.name(), _fromShard.toString()); builder.append(toShard.name(), _toShard.toString()); - _chunkVersion.appendWithField(&builder, chunkVersion.name()); + _chunkVersion.serializeToBSON(chunkVersion.name(), &builder); builder.append(waitForDelete.name(), _waitForDelete); builder.append(forceJumbo.name(), _forceJumbo); diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp index c779efd4d44..e6bc584bca3 100644 --- a/src/mongo/db/s/balancer/type_migration_test.cpp +++ b/src/mongo/db/s/balancer/type_migration_test.cpp @@ -78,7 +78,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) { builder.append(MigrationType::max(), kMax); builder.append(MigrationType::fromShard(), kFromShard.toString()); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); builder.append(MigrationType::waitForDelete(), kWaitForDelete); builder.append(MigrationType::forceJumbo(), MoveChunkRequest::forceJumboToString(MoveChunkRequest::ForceJumbo::kDoNotForce)); @@ -97,7 +97,7 @@ TEST(MigrationTypeTest, FromAndToBSON) { builder.append(MigrationType::max(), kMax); builder.append(MigrationType::fromShard(), kFromShard.toString()); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); builder.append(MigrationType::waitForDelete(), kWaitForDelete); builder.append(MigrationType::forceJumbo(), MoveChunkRequest::forceJumboToString(MoveChunkRequest::ForceJumbo::kDoNotForce)); @@ -116,7 +116,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) { builder.append(MigrationType::max(), kMax); builder.append(MigrationType::fromShard(), kFromShard.toString()); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); BSONObj obj = builder.obj(); @@ -133,7 +133,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) { builder.append(MigrationType::max(), kMax); builder.append(MigrationType::fromShard(), kFromShard.toString()); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); BSONObj obj = builder.obj(); @@ -150,7 +150,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) { builder.append(MigrationType::min(), kMin); builder.append(MigrationType::fromShard(), kFromShard.toString()); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); BSONObj obj = builder.obj(); @@ -167,7 +167,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) { builder.append(MigrationType::min(), kMin); builder.append(MigrationType::max(), kMax); builder.append(MigrationType::toShard(), kToShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); BSONObj obj = builder.obj(); @@ -184,7 +184,7 @@ TEST(MigrationTypeTest, MissingRequiredToShardField) { builder.append(MigrationType::min(), kMin); builder.append(MigrationType::max(), kMax); builder.append(MigrationType::fromShard(), kFromShard.toString()); - version.appendWithField(&builder, "chunkVersion"); + version.serializeToBSON("chunkVersion", &builder); BSONObj obj = builder.obj(); @@ -203,9 +203,7 @@ TEST(MigrationTypeTest, MissingRequiredVersionField) { BSONObj obj = builder.obj(); - StatusWith<MigrationType> migrationType = MigrationType::fromBSON(obj); - ASSERT_EQUALS(migrationType.getStatus(), ErrorCodes::NoSuchKey); - ASSERT_STRING_CONTAINS(migrationType.getStatus().reason(), "chunkVersion"); + ASSERT_THROWS(uassertStatusOK(MigrationType::fromBSON(obj)), DBException); } } // namespace diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp index 8a485cd0c86..f87803ec350 100644 --- a/src/mongo/db/s/collection_metadata_filtering_test.cpp +++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp @@ -127,7 +127,7 @@ protected: const auto version = cm.getVersion(ShardId("0")); BSONObjBuilder builder; - version.appendWithField(&builder, ChunkVersion::kShardVersionField); + version.serializeToBSON(ChunkVersion::kShardVersionField, &builder); auto& oss = OperationShardingState::get(operationContext()); oss.initializeClientRoutingVersionsFromCommand(kNss, builder.obj()); } diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp index 70eaccd5105..00fdf7872bd 100644 --- a/src/mongo/db/s/collection_sharding_runtime_test.cpp +++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp @@ -80,7 +80,7 @@ protected: if (!OperationShardingState::isOperationVersioned(opCtx)) { const auto version = cm.getVersion(ShardId("0")); BSONObjBuilder builder; - version.appendWithField(&builder, ChunkVersion::kShardVersionField); + version.serializeToBSON(ChunkVersion::kShardVersionField, &builder); auto& oss = OperationShardingState::get(opCtx); oss.initializeClientRoutingVersionsFromCommand(kTestNss, builder.obj()); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 51e0a838d87..6025201b5a8 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -695,8 +695,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit( } BSONObjBuilder response; - currentMaxVersion.appendWithField(&response, kCollectionVersionField); - currentMaxVersion.appendWithField(&response, ChunkVersion::kShardVersionField); + currentMaxVersion.serializeToBSON(kCollectionVersionField, &response); + currentMaxVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response); return response.obj(); } @@ -773,10 +773,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( << chunkRange.toString(), chunk.getRange() == chunkRange); BSONObjBuilder response; - swCollVersion.getValue().appendWithField(&response, kCollectionVersionField); + swCollVersion.getValue().serializeToBSON(kCollectionVersionField, &response); const auto currentShardVersion = getShardVersion(opCtx, coll, shardId, swCollVersion.getValue()); - currentShardVersion.appendWithField(&response, ChunkVersion::kShardVersionField); + currentShardVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response); // Makes sure that the last thing we read in getCollectionVersion and getShardVersion gets // majority written before to return from this command, otherwise next RoutingInfo cache // refresh from the shard may not see those newest information. @@ -848,8 +848,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions()); BSONObjBuilder response; - mergeVersion.appendWithField(&response, kCollectionVersionField); - mergeVersion.appendWithField(&response, ChunkVersion::kShardVersionField); + mergeVersion.serializeToBSON(kCollectionVersionField, &response); + mergeVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response); return response.obj(); } @@ -959,10 +959,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( if (currentChunk.getShard() == toShard) { // The commit was already done successfully BSONObjBuilder response; - currentCollectionVersion.appendWithField(&response, kCollectionVersionField); + currentCollectionVersion.serializeToBSON(kCollectionVersionField, &response); const auto currentShardVersion = getShardVersion(opCtx, coll, fromShard, currentCollectionVersion); - currentShardVersion.appendWithField(&response, ChunkVersion::kShardVersionField); + currentShardVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response); // Makes sure that the last thing we read in getCurrentChunk, getShardVersion, and // getCollectionVersion gets majority written before to return from this command, otherwise // next RoutingInfo cache refresh from the shard may not see those newest information. @@ -1078,14 +1078,14 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( BSONObjBuilder response; if (!newControlChunk) { // We migrated the last chunk from the donor shard. - newMigratedChunk.getVersion().appendWithField(&response, kCollectionVersionField); + newMigratedChunk.getVersion().serializeToBSON(kCollectionVersionField, &response); const ChunkVersion donorShardVersion( 0, 0, currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()); - donorShardVersion.appendWithField(&response, ChunkVersion::kShardVersionField); + donorShardVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response); } else { - newControlChunk.get().getVersion().appendWithField(&response, kCollectionVersionField); - newControlChunk.get().getVersion().appendWithField(&response, - ChunkVersion::kShardVersionField); + newControlChunk.get().getVersion().serializeToBSON(kCollectionVersionField, &response); + newControlChunk.get().getVersion().serializeToBSON(ChunkVersion::kShardVersionField, + &response); } return response.obj(); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index d6df635f790..07581ebfbed 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -101,7 +101,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { validAfter)); // Verify the versions returned match expected values. - auto mver = assertGet(ChunkVersion::parseWithField(versions, "shardVersion")); + auto mver = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); ASSERT_EQ(ChunkVersion(migratedChunk.getVersion().majorVersion() + 1, 1, migratedChunk.getVersion().epoch(), @@ -109,7 +109,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { mver); // Verify that a collection version is returned - auto cver = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion")); + auto cver = ChunkVersion::fromBSONArrayThrowing(versions["collectionVersion"]); ASSERT_TRUE(mver.isOlderOrEqualThan(cver)); // Verify the chunks ended up in the right shards. @@ -182,12 +182,11 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { // Verify the version returned matches expected value. BSONObj versions = resultBSON.getValue(); - auto mver = ChunkVersion::parseWithField(versions, "shardVersion"); - ASSERT_OK(mver.getStatus()); - ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); + auto mver = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); + ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver); // Verify that a collection version is returned - auto cver = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion")); + auto cver = ChunkVersion::fromBSONArrayThrowing(versions["collectionVersion"]); ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, collEpoch, collTimestamp), cver); // Verify the chunk ended up in the right shard. @@ -248,9 +247,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { // Verify the version returned matches expected value. BSONObj versions = resultBSON.getValue(); - auto mver = ChunkVersion::parseWithField(versions, "shardVersion"); - ASSERT_OK(mver.getStatus()); - ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); + auto mver = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); + ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver); // Verify the chunk ended up in the right shard. auto chunkDoc0 = @@ -537,9 +535,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) // Verify the versions returned match expected values. BSONObj versions = resultBSON.getValue(); - auto mver = ChunkVersion::parseWithField(versions, "shardVersion"); - ASSERT_OK(mver.getStatus()); - ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); + auto mver = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); + ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver); // Verify the chunks ended up in the right shards. auto chunkDoc0 = diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 2d681f93e09..907455cf765 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -96,8 +96,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { ->commitChunksMerge( operationContext(), _nss1, collUuid, rangeToBeMerged, _shardId, validAfter)); - auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion")); - auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion")); + auto collVersion = ChunkVersion::fromBSONArrayThrowing(versions["collectionVersion"]); + auto shardVersion = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); ASSERT_TRUE(origVersion.isOlderThan(shardVersion)); ASSERT_EQ(collVersion, shardVersion); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index d93ece922eb..16e59054f76 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -90,8 +90,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { splitPoints, "shard0000", false /* fromChunkSplitter*/)); - auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion")); - auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion")); + auto collVersion = ChunkVersion::fromBSONArrayThrowing(versions["collectionVersion"]); + auto shardVersion = ChunkVersion::fromBSONArrayThrowing(versions["shardVersion"]); ASSERT_TRUE(origVersion.isOlderThan(shardVersion)); ASSERT_EQ(collVersion, shardVersion); diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index 27c9d20b63c..ba3079903f1 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -145,8 +145,8 @@ void mergeChunks(OperationContext* opCtx, boost::optional<ChunkVersion> shardVersionReceived = [&]() -> boost::optional<ChunkVersion> { // old versions might not have the shardVersion field if (cmdResponse.response[ChunkVersion::kShardVersionField]) { - return uassertStatusOK(ChunkVersion::parseWithField(cmdResponse.response, - ChunkVersion::kShardVersionField)); + return ChunkVersion::fromBSONArrayThrowing( + cmdResponse.response[ChunkVersion::kShardVersionField]); } return boost::none; }(); diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp index 60722206eb2..3a64f193175 100644 --- a/src/mongo/db/s/op_observer_sharding_test.cpp +++ b/src/mongo/db/s/op_observer_sharding_test.cpp @@ -49,7 +49,7 @@ void setCollectionFilteringMetadata(OperationContext* opCtx, CollectionMetadata ->setFilteringMetadata(opCtx, std::move(metadata)); BSONObjBuilder builder; - version.appendWithField(&builder, ChunkVersion::kShardVersionField); + version.serializeToBSON(ChunkVersion::kShardVersionField, &builder); auto& oss = OperationShardingState::get(opCtx); oss.initializeClientRoutingVersionsFromCommand(kTestNss, builder.obj()); } diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h index bff5c4c0aae..d212413bcee 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h @@ -130,7 +130,7 @@ protected: if (!OperationShardingState::isOperationVersioned(opCtx)) { const auto version = cm.getVersion(kThisShard.getShardId()); BSONObjBuilder builder; - version.appendWithField(&builder, ChunkVersion::kShardVersionField); + version.serializeToBSON(ChunkVersion::kShardVersionField, &builder); auto& oss = OperationShardingState::get(opCtx); oss.initializeClientRoutingVersionsFromCommand(nss, builder.obj()); diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp index e0eaf066050..8f1b5443a80 100644 --- a/src/mongo/db/s/sharding_write_router_bm.cpp +++ b/src/mongo/db/s/sharding_write_router_bm.cpp @@ -157,7 +157,7 @@ std::unique_ptr<CatalogCacheMock> createCatalogCacheMock(OperationContext* opCtx BSONObjBuilder builder; chunkManager.getVersion(originatorShard) - .appendWithField(&builder, ChunkVersion::kShardVersionField); + .serializeToBSON(ChunkVersion::kShardVersionField, &builder); // necessary to set the _shardVersions and _databaseVersions to true. Which is needed to get // `getCollectionDescription` to work OperationShardingState::get(opCtx).initializeClientRoutingVersionsFromCommand(kNss, diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index 9ea408b54d3..ba1a1fd217f 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -178,8 +178,8 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, boost::optional<ChunkVersion> shardVersionReceived = [&]() -> boost::optional<ChunkVersion> { // old versions might not have the shardVersion field if (cmdResponse.response[ChunkVersion::kShardVersionField]) { - return uassertStatusOK(ChunkVersion::parseWithField(cmdResponse.response, - ChunkVersion::kShardVersionField)); + return ChunkVersion::fromBSONArrayThrowing( + cmdResponse.response[ChunkVersion::kShardVersionField]); } return boost::none; }(); |