diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2019-06-27 09:33:33 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2019-06-30 04:01:25 -0400 |
commit | ce5e56f2c2c82b0ec65c8c8957326fe96bf1a7e7 (patch) | |
tree | f9d91011a9bf80093df9667f20f11b87b81ea545 | |
parent | 861013e35652e73fab0b97f534b73fce68a8e00e (diff) | |
download | mongo-ce5e56f2c2c82b0ec65c8c8957326fe96bf1a7e7.tar.gz |
SERVER-26531 Change ConfigServerTestFixture::setupChunks to `void`
... because having it return Status just so all callers can assert that
it's OK is useless.
(cherry picked from commit 4b955e6a1a35b1704a05aa29bd7e6ed42612333c)
15 files changed, 86 insertions, 95 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp index b6c6c0a0ce2..89f6015da96 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp @@ -50,7 +50,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "z")); @@ -69,7 +69,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) { shard.setHost("a:1234"); shard.setTags({"x", "y"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "x")); @@ -90,7 +90,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) { shard.setHost("a:1234"); shard.setTags({"x"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "y")); @@ -110,7 +110,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); auto status = ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), "b", "z"); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp index da9fdb32e6a..c767d812a56 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp @@ -61,7 +61,7 @@ public: shard.setHost("a:1234"); shard.setTags({zoneName()}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); CollectionType shardedCollection; shardedCollection.setNs(shardedNS()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 08ded5de55a..d4964336b8e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -58,7 +58,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); ChunkType migratedChunk, controlChunk; { @@ -82,7 +82,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) { controlChunk.setJumbo(true); } - setupChunks({migratedChunk, controlChunk}).transitional_ignore(); + setupChunks({migratedChunk, controlChunk}); Timestamp validAfter{101, 0}; BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext()) @@ -139,7 +139,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -156,7 +156,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); Timestamp validAfter{101, 0}; @@ -199,7 +199,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -216,7 +216,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); // Make the time distance between the last history element large enough. Timestamp validAfter{200, 0}; @@ -260,7 +260,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -277,7 +277,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); // Make the time before the last change to trigger the failure. Timestamp validAfter{99, 0}; @@ -304,7 +304,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -329,7 +329,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk0, chunk1}).transitional_ignore(); + setupChunks({chunk0, chunk1}); Timestamp validAfter{1}; @@ -355,7 +355,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -382,7 +382,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { chunk1.setMax(chunkMaxax); // get version from the control chunk this time - setupChunks({chunk1, chunk0}).transitional_ignore(); + setupChunks({chunk1, chunk0}); Timestamp validAfter{1}; @@ -408,7 +408,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -433,7 +433,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk1}).transitional_ignore(); + setupChunks({chunk1}); Timestamp validAfter{1}; @@ -459,7 +459,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -488,7 +488,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) Timestamp ctrlChunkValidAfter = Timestamp(50, 0); chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())}); - setupChunks({chunk0, chunk1}).transitional_ignore(); + setupChunks({chunk0, chunk1}); Timestamp validAfter{101, 0}; StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext()) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp index 43d378f2a3a..a92890183e9 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp @@ -130,7 +130,7 @@ protected: testPrimaryShard.setName("primary"); testPrimaryShard.setHost("b:20"); - uassertStatusOK(setupShards({extraShard, testPrimaryShard})); + setupShards({extraShard, testPrimaryShard}); // Prime the shard registry with information about the existing shards shardRegistry()->reload(operationContext()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp index 159fc584373..373dfad396e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp @@ -71,17 +71,17 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) { ShardType s0; s0.setName("shard0000"); s0.setHost("ShardHost0:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s0})); + setupShards(vector<ShardType>{s0}); ShardType s1; s1.setName("shard0001"); s1.setHost("ShardHost1:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s1})); + setupShards(vector<ShardType>{s1}); ShardType s2; s2.setName("shard0002"); s2.setHost("ShardHost2:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s2})); + setupShards(vector<ShardType>{s2}); // Prime the shard registry with information about the existing shards shardRegistry()->reload(operationContext()); @@ -160,7 +160,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(dbname, shard.getName(), false); @@ -175,7 +175,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExistsDifferentCase) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(dbnameDiffCase, shard.getName(), false); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp index 699b83c44e3..c413cf893f6 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp @@ -70,7 +70,7 @@ public: _min = BSON(_shardKey << 0); _max = BSON(_shardKey << 10); - ASSERT_OK(setupShards({_shard1, _shard2})); + setupShards({_shard1, _shard2}); auto shard1Targeter = RemoteCommandTargeterMock::get( uassertStatusOK(shardRegistry()->getShard(operationContext(), _shard1.getName())) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp index 83ed91c88cc..f6f0bc7129f 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp @@ -71,7 +71,7 @@ TEST_F(EnableShardingTest, noDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); auto shardTargeter = RemoteCommandTargeterMock::get( uassertStatusOK(shardRegistry()->getShard(operationContext(), ShardId("shard0"))) @@ -109,7 +109,7 @@ TEST_F(EnableShardingTest, dbExistsWithDifferentCase) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("Db3", shard.getName(), false); ASSERT_THROWS_CODE( ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db3"), @@ -121,7 +121,7 @@ TEST_F(EnableShardingTest, dbExists) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("db4", shard.getName(), false); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db4"); } @@ -130,7 +130,7 @@ TEST_F(EnableShardingTest, succeedsWhenTheDatabaseIsAlreadySharded) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("db5", shard.getName(), true); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db5"); } @@ -140,7 +140,7 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); // Set up database with bad type for primary field. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 80f66cc8692..08dc37fccfa 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -67,7 +67,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{100, 0}; @@ -138,7 +138,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax}; - setupChunks({chunk, chunk2, chunk3}).transitional_ignore(); + setupChunks({chunk, chunk2, chunk3}); Timestamp validAfter{100, 0}; @@ -213,7 +213,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); Timestamp validAfter{100, 0}; @@ -284,7 +284,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); Timestamp validAfter{1}; @@ -350,7 +350,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{1}; @@ -387,7 +387,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { // Record chunk baoundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{1}; @@ -430,7 +430,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) { mergedChunk.setVersion(mergedVersion); mergedChunk.setMax(chunkMax); - setupChunks({mergedChunk}).transitional_ignore(); + setupChunks({mergedChunk}); Timestamp validAfter{1}; @@ -493,7 +493,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { chunk.setVersion(version); originalChunks.push_back(chunk); - setupChunks(originalChunks).transitional_ignore(); + setupChunks(originalChunks); } Timestamp validAfter{1}; @@ -531,7 +531,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { chunk3.setMin(chunkBound2); chunk3.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk1, chunk2, chunk3})); + setupChunks({chunk1, chunk2, chunk3}); // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp index fddb7426509..f2fa1956692 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp @@ -51,7 +51,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shard.getName(), "z")); @@ -73,7 +73,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); @@ -106,7 +106,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); // Insert a chunk range document referring to a different zone TagsType tagDoc; @@ -147,7 +147,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); TagsType tagDoc; tagDoc.setNS(NamespaceString("test.foo")); @@ -186,7 +186,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) { shardA.setHost("a:1234"); shardA.setTags({"z"}); - setupShards({shardA}).transitional_ignore(); + setupShards({shardA}); auto status = ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), "b", "z"); @@ -213,7 +213,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi shardB.setHost("b:1234"); shardB.setTags({"y", "z"}); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index 2432d266e75..80f8621cab9 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -132,7 +132,7 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -154,7 +154,7 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) { shard1.setMaxSizeMB(100); shard1.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1})); + setupShards(std::vector<ShardType>{shard1}); ASSERT_EQUALS(ErrorCodes::IllegalOperation, ShardingCatalogManager::get(operationContext()) @@ -175,7 +175,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -211,9 +211,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { ChunkVersion(1, 3, epoch), shard1.getName()); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), true); - ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3})); + setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -240,7 +240,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), false); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) @@ -284,9 +284,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { std::vector<ChunkType> chunks{chunk1, chunk2, chunk3}; - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard2.getName(), false); - ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3})); + setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp index 7cbe29a27a7..1e1fba2a1eb 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp @@ -139,7 +139,7 @@ TEST_F(ConfigServerShardCollectionTest, Partially_Written_Chunks_Present) { ShardType shard; shard.setName("shard0"); shard.setHost("shardHost"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -151,7 +151,7 @@ TEST_F(ConfigServerShardCollectionTest, Partially_Written_Chunks_Present) { chunk.setShard(shard.getName()); chunk.setMin(BSON("_id" << 1)); chunk.setMax(BSON("_id" << 5)); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext()) ->shardCollection(operationContext(), @@ -179,7 +179,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_ForMapReduce_NoInitialSpli targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -248,7 +248,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_ForMapReduce_WithInitialSp targeter2->setFindHostReturnValue(shard2Host); targeterFactory()->addTargeterToReturn(ConnectionString(shard2Host), std::move(targeter2)); - ASSERT_OK(setupShards(vector<ShardType>{shard0, shard1, shard2})); + setupShards(vector<ShardType>{shard0, shard1, shard2}); setupDatabase(kNamespace.db().toString(), shard0.getName(), true); @@ -316,7 +316,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_NoInitialSplitPoints_NoSpl targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -367,7 +367,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_NoInitialSplitPoints_WithS targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -440,7 +440,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_WithInitialSplitPoints_NoS targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -549,7 +549,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromSplitVector_Man targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -588,7 +588,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromClient_ManyChun targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -624,7 +624,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_WithZones_OneChunkToPrimary) { const std::vector<ShardType> kShards{ShardType("shard0", "rs0/shard0:123", {"TestZone"}), ShardType("shard1", "rs1/shard1:123", {"TestZone"}), ShardType("shard2", "rs2/shard2:123")}; - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); std::vector<BSONObj> splitPoints{}; @@ -665,7 +665,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_SplitPoints_FromClient_ManyChunksD targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -711,7 +711,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_NoSplitPoints_OneChunkToPrimary) { targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -746,7 +746,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha const std::vector<ShardType> kShards{ShardType("shard0", "rs0/shard0:123", {"TestZone"}), ShardType("shard1", "rs1/shard1:123", {"TestZone"}), ShardType("shard2", "rs2/shard2:123")}; - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); std::vector<BSONObj> splitPoints{}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 48e892891c8..1baa43be2d4 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -61,7 +61,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkSplitPoint = BSON("a" << 5); std::vector<BSONObj> splitPoints{chunkSplitPoint}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -122,7 +122,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkSplitPoint2 = BSON("a" << 7); std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -206,7 +206,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { chunk2.setMin(BSON("a" << 10)); chunk2.setMax(BSON("a" << 20)); - ASSERT_OK(setupChunks({chunk, chunk2})); + setupChunks({chunk, chunk2}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -256,7 +256,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) { auto chunkSplitPoint = BSON("a" << 5); splitPoints.push_back(chunkSplitPoint); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -283,7 +283,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -310,7 +310,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -337,7 +337,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -364,7 +364,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -391,7 +391,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -415,7 +415,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) { auto chunkMax = BSON("a" << kMaxBSONKey); chunk.setMin(chunkMin); chunk.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp index 5c14845b07c..a1a12ad6469 100644 --- a/src/mongo/s/config_server_test_fixture.cpp +++ b/src/mongo/s/config_server_test_fixture.cpp @@ -284,16 +284,11 @@ StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(Operation return findResult.docs.front().getOwned(); } -Status ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) { +void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) { const NamespaceString shardNS(ShardType::ConfigNS); for (const auto& shard : shards) { - auto insertStatus = insertToConfigCollection(operationContext(), shardNS, shard.toBSON()); - if (!insertStatus.isOK()) { - return insertStatus; - } + ASSERT_OK(insertToConfigCollection(operationContext(), shardNS, shard.toBSON())); } - - return Status::OK(); } StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx, @@ -311,16 +306,11 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opC return ShardType::fromBSON(doc.getValue()); } -Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) { +void ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) { const NamespaceString chunkNS(ChunkType::ConfigNS); for (const auto& chunk : chunks) { - auto insertStatus = - insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON()); - if (!insertStatus.isOK()) - return insertStatus; + ASSERT_OK(insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON())); } - - return Status::OK(); } StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx, diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h index d592e47882a..50ea4eb4eb8 100644 --- a/src/mongo/s/config_server_test_fixture.h +++ b/src/mongo/s/config_server_test_fixture.h @@ -91,7 +91,7 @@ public: /** * Setup the config.shards collection to contain the given shards. */ - Status setupShards(const std::vector<ShardType>& shards); + void setupShards(const std::vector<ShardType>& shards); /** * Retrieves the shard document from the config server. @@ -102,7 +102,7 @@ public: /** * Setup the config.chunks collection to contain the given chunks. */ - Status setupChunks(const std::vector<ChunkType>& chunks); + void setupChunks(const std::vector<ChunkType>& chunks); /** * Retrieves the chunk document from the config server. diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp index 96454df98d4..38bf1b708ef 100644 --- a/src/mongo/s/sharding_router_test_fixture.cpp +++ b/src/mongo/s/sharding_router_test_fixture.cpp @@ -278,6 +278,7 @@ void ShardingTestFixture::addRemoteShards( targeterFactory()->addTargeterToReturn(ConnectionString(std::get<1>(shard)), std::move(targeter)); } + setupShards(shards); } |