diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2019-06-27 09:33:33 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2019-06-27 10:50:20 -0400 |
commit | 4b955e6a1a35b1704a05aa29bd7e6ed42612333c (patch) | |
tree | e65320a19784f295ded197f4b121b71410abf66a /src/mongo/db/s | |
parent | 8fd79fe845e3e4717df26abb78c3fb7859f3823e (diff) | |
download | mongo-4b955e6a1a35b1704a05aa29bd7e6ed42612333c.tar.gz |
SERVER-26531 Change ConfigServerTestFixture::setupChunks to `void`
... because having it return Status just so all callers can assert that
it's OK is useless.
Diffstat (limited to 'src/mongo/db/s')
12 files changed, 79 insertions, 79 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp index e8408c2d4c8..cc4eacef560 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp @@ -49,7 +49,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "z")); @@ -68,7 +68,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) { shard.setHost("a:1234"); shard.setTags({"x", "y"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "x")); @@ -89,7 +89,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) { shard.setHost("a:1234"); shard.setTags({"x"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "y")); @@ -109,7 +109,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); auto status = ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), "b", "z"); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp index b9a8e59eac1..6eabe85a358 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp @@ -60,7 +60,7 @@ public: shard.setHost("a:1234"); shard.setTags({zoneName()}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); CollectionType shardedCollection; shardedCollection.setNs(shardedNS()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 9bbad457c9b..7322b3caf08 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -57,7 +57,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); ChunkType migratedChunk, controlChunk; { @@ -81,7 +81,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithControlChunk) { controlChunk.setJumbo(true); } - setupChunks({migratedChunk, controlChunk}).transitional_ignore(); + setupChunks({migratedChunk, controlChunk}); Timestamp validAfter{101, 0}; BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext()) @@ -138,7 +138,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -155,7 +155,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); Timestamp validAfter{101, 0}; @@ -198,7 +198,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -215,7 +215,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); // Make the time distance between the last history element large enough. Timestamp validAfter{200, 0}; @@ -259,7 +259,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -276,7 +276,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); // Make the time before the last change to trigger the failure. Timestamp validAfter{99, 0}; @@ -303,7 +303,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -328,7 +328,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk0, chunk1}).transitional_ignore(); + setupChunks({chunk0, chunk1}); Timestamp validAfter{1}; @@ -354,7 +354,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -381,7 +381,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { chunk1.setMax(chunkMaxax); // get version from the control chunk this time - setupChunks({chunk1, chunk0}).transitional_ignore(); + setupChunks({chunk1, chunk0}); Timestamp validAfter{1}; @@ -407,7 +407,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -432,7 +432,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk1}).transitional_ignore(); + setupChunks({chunk1}); Timestamp validAfter{1}; @@ -458,7 +458,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -487,7 +487,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) Timestamp ctrlChunkValidAfter = Timestamp(50, 0); chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())}); - setupChunks({chunk0, chunk1}).transitional_ignore(); + setupChunks({chunk0, chunk1}); Timestamp validAfter{101, 0}; StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext()) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp index ae6a7068e01..7fd56297e96 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_create_collection_test.cpp @@ -129,7 +129,7 @@ protected: testPrimaryShard.setName("primary"); testPrimaryShard.setHost("b:20"); - uassertStatusOK(setupShards({extraShard, testPrimaryShard})); + setupShards({extraShard, testPrimaryShard}); // Prime the shard registry with information about the existing shards shardRegistry()->reload(operationContext()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp index 59fe4af9e98..f27765268c5 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp @@ -70,17 +70,17 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) { ShardType s0; s0.setName("shard0000"); s0.setHost("ShardHost0:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s0})); + setupShards(vector<ShardType>{s0}); ShardType s1; s1.setName("shard0001"); s1.setHost("ShardHost1:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s1})); + setupShards(vector<ShardType>{s1}); ShardType s2; s2.setName("shard0002"); s2.setHost("ShardHost2:27017"); - ASSERT_OK(setupShards(vector<ShardType>{s2})); + setupShards(vector<ShardType>{s2}); // Prime the shard registry with information about the existing shards shardRegistry()->reload(operationContext()); @@ -158,7 +158,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(dbname, shard.getName(), false); @@ -173,7 +173,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExistsDifferentCase) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(dbnameDiffCase, shard.getName(), false); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp index 43d6a7f4304..3260caba7bc 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_drop_coll_test.cpp @@ -69,7 +69,7 @@ public: _min = BSON(_shardKey << 0); _max = BSON(_shardKey << 10); - ASSERT_OK(setupShards({_shard1, _shard2})); + setupShards({_shard1, _shard2}); auto shard1Targeter = RemoteCommandTargeterMock::get( uassertStatusOK(shardRegistry()->getShard(operationContext(), _shard1.getName())) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp index 2dbc596b801..7452b250f14 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp @@ -70,7 +70,7 @@ TEST_F(EnableShardingTest, noDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); auto shardTargeter = RemoteCommandTargeterMock::get( uassertStatusOK(shardRegistry()->getShard(operationContext(), ShardId("shard0"))) @@ -107,7 +107,7 @@ TEST_F(EnableShardingTest, dbExistsWithDifferentCase) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("Db3", shard.getName(), false); ASSERT_THROWS_CODE( ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db3"), @@ -119,7 +119,7 @@ TEST_F(EnableShardingTest, dbExists) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("db4", shard.getName(), false); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db4"); } @@ -128,7 +128,7 @@ TEST_F(EnableShardingTest, succeedsWhenTheDatabaseIsAlreadySharded) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase("db5", shard.getName(), true); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db5"); } @@ -138,7 +138,7 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); // Set up database with bad type for primary field. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index b403eca6f7a..2ba113cc5f0 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -66,7 +66,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{100, 0}; @@ -137,7 +137,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax}; - setupChunks({chunk, chunk2, chunk3}).transitional_ignore(); + setupChunks({chunk, chunk2, chunk3}); Timestamp validAfter{100, 0}; @@ -212,7 +212,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); Timestamp validAfter{100, 0}; @@ -283,7 +283,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); Timestamp validAfter{1}; @@ -349,7 +349,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{1}; @@ -386,7 +386,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { // Record chunk baoundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); Timestamp validAfter{1}; @@ -429,7 +429,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) { mergedChunk.setVersion(mergedVersion); mergedChunk.setMax(chunkMax); - setupChunks({mergedChunk}).transitional_ignore(); + setupChunks({mergedChunk}); Timestamp validAfter{1}; @@ -492,7 +492,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { chunk.setVersion(version); originalChunks.push_back(chunk); - setupChunks(originalChunks).transitional_ignore(); + setupChunks(originalChunks); } Timestamp validAfter{1}; @@ -530,7 +530,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { chunk3.setMin(chunkBound2); chunk3.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk1, chunk2, chunk3})); + setupChunks({chunk1, chunk2, chunk3}); // Record chunk boundaries for passing into commitChunkMerge std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp index a61f12c4e8a..adcf51cfb7e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp @@ -50,7 +50,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shard.getName(), "z")); @@ -72,7 +72,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); @@ -105,7 +105,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); // Insert a chunk range document referring to a different zone TagsType tagDoc; @@ -146,7 +146,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); TagsType tagDoc; tagDoc.setNS(NamespaceString("test.foo")); @@ -185,7 +185,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) { shardA.setHost("a:1234"); shardA.setTags({"z"}); - setupShards({shardA}).transitional_ignore(); + setupShards({shardA}); auto status = ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), "b", "z"); @@ -212,7 +212,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi shardB.setHost("b:1234"); shardB.setTags({"y", "z"}); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index 8ebd34de481..cf2df6fbb9c 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -129,7 +129,7 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -151,7 +151,7 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) { shard1.setMaxSizeMB(100); shard1.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1})); + setupShards(std::vector<ShardType>{shard1}); ASSERT_EQUALS(ErrorCodes::IllegalOperation, ShardingCatalogManager::get(operationContext()) @@ -172,7 +172,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -208,9 +208,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { ChunkVersion(1, 3, epoch), shard1.getName()); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), true); - ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3})); + setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -237,7 +237,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), false); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) @@ -281,9 +281,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { std::vector<ChunkType> chunks{chunk1, chunk2, chunk3}; - ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2})); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard2.getName(), false); - ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3})); + setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp index 18419863c2f..76cf80aaa16 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp @@ -138,7 +138,7 @@ TEST_F(ConfigServerShardCollectionTest, Partially_Written_Chunks_Present) { ShardType shard; shard.setName("shard0"); shard.setHost("shardHost"); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -150,7 +150,7 @@ TEST_F(ConfigServerShardCollectionTest, Partially_Written_Chunks_Present) { chunk.setShard(shard.getName()); chunk.setMin(BSON("_id" << 1)); chunk.setMax(BSON("_id" << 5)); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext()) ->shardCollection(operationContext(), @@ -178,7 +178,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_ForMapReduce_NoInitialSpli targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -246,7 +246,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_ForMapReduce_WithInitialSp targeter2->setFindHostReturnValue(shard2Host); targeterFactory()->addTargeterToReturn(ConnectionString(shard2Host), std::move(targeter2)); - ASSERT_OK(setupShards(vector<ShardType>{shard0, shard1, shard2})); + setupShards(vector<ShardType>{shard0, shard1, shard2}); setupDatabase(kNamespace.db().toString(), shard0.getName(), true); @@ -313,7 +313,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_NoInitialSplitPoints_NoSpl targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -363,7 +363,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_NoInitialSplitPoints_WithS targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -435,7 +435,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_WithInitialSplitPoints_NoS targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector<ShardType>{shard})); + setupShards(vector<ShardType>{shard}); setupDatabase(kNamespace.db().toString(), shard.getName(), true); @@ -543,7 +543,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromSplitVector_Man targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -581,7 +581,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromClient_ManyChun targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -616,7 +616,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_WithZones_OneChunkToPrimary) { const std::vector<ShardType> kShards{ShardType("shard0", "rs0/shard0:123", {"TestZone"}), ShardType("shard1", "rs1/shard1:123", {"TestZone"}), ShardType("shard2", "rs2/shard2:123")}; - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); std::vector<BSONObj> splitPoints{}; @@ -657,7 +657,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_SplitPoints_FromClient_ManyChunksD targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -702,7 +702,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_NoSplitPoints_OneChunkToPrimary) { targeter->setFindHostReturnValue(connStr.getServers()[0]); targeterFactory()->addTargeterToReturn(connStr, std::move(targeter)); - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); auto future = launchAsync([&] { @@ -736,7 +736,7 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha const std::vector<ShardType> kShards{ShardType("shard0", "rs0/shard0:123", {"TestZone"}), ShardType("shard1", "rs1/shard1:123", {"TestZone"}), ShardType("shard2", "rs2/shard2:123")}; - ASSERT_OK(setupShards(kShards)); + setupShards(kShards); shardRegistry()->reload(operationContext()); std::vector<BSONObj> splitPoints{}; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 63095d20d45..478a8374407 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -60,7 +60,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkSplitPoint = BSON("a" << 5); std::vector<BSONObj> splitPoints{chunkSplitPoint}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -121,7 +121,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkSplitPoint2 = BSON("a" << 7); std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -205,7 +205,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { chunk2.setMin(BSON("a" << 10)); chunk2.setMax(BSON("a" << 20)); - ASSERT_OK(setupChunks({chunk, chunk2})); + setupChunks({chunk, chunk2}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -255,7 +255,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) { auto chunkSplitPoint = BSON("a" << 5); splitPoints.push_back(chunkSplitPoint); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -282,7 +282,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -309,7 +309,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { std::vector<BSONObj> splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -336,7 +336,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -363,7 +363,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -390,7 +390,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) { std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -414,7 +414,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) { auto chunkMax = BSON("a" << kMaxBSONKey); chunk.setMin(chunkMin); chunk.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), |