From 0ae4abfd8774ea10675ebe6e114b2d3a9a7ebfdd Mon Sep 17 00:00:00 2001 From: Kaloian Manassiev Date: Thu, 27 Jun 2019 09:33:33 -0400 Subject: SERVER-26531 Change ConfigServerTestFixture::setupChunks to `void` ... because having it return Status just so all callers can assert that it's OK is useless. (cherry picked from commit 4b955e6a1a35b1704a05aa29bd7e6ed42612333c) --- src/mongo/bson/bsonelement.cpp | 1 - src/mongo/db/keys_collection_manager_sharding.cpp | 10 ++-------- src/mongo/db/keys_collection_manager_sharding.h | 10 +++------- .../sharding_catalog_add_shard_to_zone_test.cpp | 8 ++++---- ...harding_catalog_assign_key_range_to_zone_test.cpp | 2 +- .../sharding_catalog_commit_chunk_migration_test.cpp | 20 ++++++++++---------- .../sharding_catalog_create_database_test.cpp | 10 +++++----- .../sharding_catalog_enable_sharding_test.cpp | 10 +++++----- .../s/catalog/sharding_catalog_merge_chunks_test.cpp | 18 +++++++++--------- .../sharding_catalog_remove_shard_from_zone_test.cpp | 12 ++++++------ .../s/catalog/sharding_catalog_remove_shard_test.cpp | 16 ++++++++-------- .../sharding_catalog_shard_collection_test.cpp | 10 +++++----- .../s/catalog/sharding_catalog_split_chunk_test.cpp | 20 ++++++++++---------- src/mongo/s/config_server_test_fixture.cpp | 18 ++++-------------- src/mongo/s/config_server_test_fixture.h | 4 ++-- 15 files changed, 74 insertions(+), 95 deletions(-) diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp index ebc9fac6981..96c5a338116 100644 --- a/src/mongo/bson/bsonelement.cpp +++ b/src/mongo/bson/bsonelement.cpp @@ -798,7 +798,6 @@ void BSONElement::toString( s << toHex(data, len) << ")"; } } break; - case bsonTimestamp: { // Convert from Milliseconds to Seconds for consistent Timestamp printing. auto secs = duration_cast(timestampTime().toDurationSinceEpoch()); diff --git a/src/mongo/db/keys_collection_manager_sharding.cpp b/src/mongo/db/keys_collection_manager_sharding.cpp index c94be209d2b..e149e14cd67 100644 --- a/src/mongo/db/keys_collection_manager_sharding.cpp +++ b/src/mongo/db/keys_collection_manager_sharding.cpp @@ -159,7 +159,7 @@ void KeysCollectionManagerSharding::stopMonitoring() { void KeysCollectionManagerSharding::enableKeyGenerator(OperationContext* opCtx, bool doEnable) { if (doEnable) { - _refresher.switchFunc(opCtx, [this](OperationContext* opCtx) { + _refresher.setFunc([this](OperationContext* opCtx) { KeysCollectionCacheReaderAndUpdater keyGenerator( _purpose, _client.get(), _keyValidForInterval); auto keyGenerationStatus = keyGenerator.refresh(opCtx); @@ -178,8 +178,7 @@ void KeysCollectionManagerSharding::enableKeyGenerator(OperationContext* opCtx, return cacheRefreshStatus; }); } else { - _refresher.switchFunc( - opCtx, [this](OperationContext* opCtx) { return _keysCache.refresh(opCtx); }); + _refresher.setFunc([this](OperationContext* opCtx) { return _keysCache.refresh(opCtx); }); } } @@ -313,11 +312,6 @@ void KeysCollectionManagerSharding::PeriodicRunner::setFunc(RefreshFunc newRefre _refreshNeededCV.notify_all(); } -void KeysCollectionManagerSharding::PeriodicRunner::switchFunc(OperationContext* opCtx, - RefreshFunc newRefreshStrategy) { - setFunc(newRefreshStrategy); -} - void KeysCollectionManagerSharding::PeriodicRunner::start(ServiceContext* service, const std::string& threadName, Milliseconds refreshInterval) { diff --git a/src/mongo/db/keys_collection_manager_sharding.h b/src/mongo/db/keys_collection_manager_sharding.h index d3c4239d4a5..63ff3cdbcc0 100644 --- a/src/mongo/db/keys_collection_manager_sharding.h +++ b/src/mongo/db/keys_collection_manager_sharding.h @@ -134,16 +134,12 @@ private: /** * Sets the refresh function to use. - * Should only be used to bootstrap this refresher with initial strategy. + * + * Should only be used to bootstrap this refresher with initial strategy. Also waits to make + * sure that the old strategy is not being used and will no longer be used after this call. */ void setFunc(RefreshFunc newRefreshStrategy); - /** - * Switches the current strategy with a new one. This also waits to make sure that the old - * strategy is not being used and will no longer be used after this call. - */ - void switchFunc(OperationContext* opCtx, RefreshFunc newRefreshStrategy); - /** * Starts the refresh thread. */ diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp index cfef6b9fd21..0dc89b36a4d 100644 --- a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp @@ -50,7 +50,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "z")); @@ -69,7 +69,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) { shard.setHost("a:1234"); shard.setTags({"x", "y"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "x")); @@ -90,7 +90,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) { shard.setHost("a:1234"); shard.setTags({"x"}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), shard.getName(), "y")); @@ -110,7 +110,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); auto status = ShardingCatalogManager::get(operationContext()) ->addShardToZone(operationContext(), "b", "z"); diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp index 31fdbfea430..5b794937265 100644 --- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp @@ -63,7 +63,7 @@ public: shard.setHost("a:1234"); shard.setTags({zoneName()}); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); CollectionType shardedCollection; shardedCollection.setNs(shardedNS()); diff --git a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp index db9bc5f213b..7fe11c44403 100644 --- a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp @@ -132,7 +132,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 15; auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen()); @@ -148,7 +148,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) { auto chunkMax = BSON("a" << 10); chunk0.setMax(chunkMax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) ->commitChunkMigration(operationContext(), @@ -188,7 +188,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -213,7 +213,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk0, chunk1}).transitional_ignore(); + setupChunks({chunk0, chunk1}); StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) ->commitChunkMigration(operationContext(), @@ -239,7 +239,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -266,7 +266,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { chunk1.setMax(chunkMaxax); // get version from the control chunk this time - setupChunks({chunk1, chunk0}).transitional_ignore(); + setupChunks({chunk1, chunk0}); StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) ->commitChunkMigration(operationContext(), @@ -292,7 +292,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -317,7 +317,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk1}).transitional_ignore(); + setupChunks({chunk1}); StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) ->commitChunkMigration(operationContext(), @@ -343,7 +343,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) { shard1.setName("shard1"); shard1.setHost("shard1:12"); - setupShards({shard0, shard1}).transitional_ignore(); + setupShards({shard0, shard1}); int origMajorVersion = 12; auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen()); @@ -368,7 +368,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) { auto chunkMaxax = BSON("a" << 20); chunk1.setMax(chunkMaxax); - setupChunks({chunk0}).transitional_ignore(); + setupChunks({chunk0}); StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) ->commitChunkMigration(operationContext(), diff --git a/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp b/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp index 92e43edd0e6..b686779deb3 100644 --- a/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp @@ -71,17 +71,17 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) { ShardType s0; s0.setName("shard0000"); s0.setHost("ShardHost0:27017"); - ASSERT_OK(setupShards(vector{s0})); + setupShards(vector{s0}); ShardType s1; s1.setName("shard0001"); s1.setHost("ShardHost1:27017"); - ASSERT_OK(setupShards(vector{s1})); + setupShards(vector{s1}); ShardType s2; s2.setName("shard0002"); s2.setHost("ShardHost2:27017"); - ASSERT_OK(setupShards(vector{s2})); + setupShards(vector{s2}); // Prime the shard registry with information about the existing shards shardRegistry()->reload(operationContext()); @@ -160,7 +160,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase(dbname, shard.getName(), false); @@ -175,7 +175,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExistsDifferentCase) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase(dbnameDiffCase, shard.getName(), false); diff --git a/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp b/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp index daec9aa1bb4..66cc354b4f5 100644 --- a/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp @@ -71,7 +71,7 @@ TEST_F(EnableShardingTest, noDBExists) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); auto shardTargeter = RemoteCommandTargeterMock::get( uassertStatusOK(shardRegistry()->getShard(operationContext(), ShardId("shard0"))) @@ -109,7 +109,7 @@ TEST_F(EnableShardingTest, dbExistsWithDifferentCase) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase("Db3", shard.getName(), false); ASSERT_THROWS_CODE( ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db3"), @@ -121,7 +121,7 @@ TEST_F(EnableShardingTest, dbExists) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase("db4", shard.getName(), false); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db4"); } @@ -130,7 +130,7 @@ TEST_F(EnableShardingTest, succeedsWhenTheDatabaseIsAlreadySharded) { ShardType shard; shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase("db5", shard.getName(), true); ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db5"); } @@ -140,7 +140,7 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) { shard.setName("shard0"); shard.setHost("shard0:12"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); // Set up database with bad type for primary field. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp index 198511e6bcb..668741de055 100644 --- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp @@ -67,7 +67,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { std::vector chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); ASSERT_OK( ShardingCatalogManager::get(operationContext()) @@ -129,7 +129,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { // Record chunk boundaries for passing into commitChunkMerge std::vector chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax}; - setupChunks({chunk, chunk2, chunk3}).transitional_ignore(); + setupChunks({chunk, chunk2, chunk3}); ASSERT_OK( ShardingCatalogManager::get(operationContext()) @@ -195,7 +195,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkMerge( @@ -256,7 +256,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { otherChunk.setMin(BSON("a" << 10)); otherChunk.setMax(BSON("a" << 20)); - setupChunks({chunk, chunk2, otherChunk}).transitional_ignore(); + setupChunks({chunk, chunk2, otherChunk}); ASSERT_OK( ShardingCatalogManager::get(operationContext()) @@ -317,7 +317,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { // Record chunk boundaries for passing into commitChunkMerge std::vector chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); auto mergeStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkMerge(operationContext(), @@ -351,7 +351,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { // Record chunk baoundaries for passing into commitChunkMerge std::vector chunkBoundaries{chunkMin, chunkBound, chunkMax}; - setupChunks({chunk, chunk2}).transitional_ignore(); + setupChunks({chunk, chunk2}); auto mergeStatus = ShardingCatalogManager::get(operationContext()) @@ -389,7 +389,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) { mergedChunk.setVersion(mergedVersion); mergedChunk.setMax(chunkMax); - setupChunks({mergedChunk}).transitional_ignore(); + setupChunks({mergedChunk}); ASSERT_EQ( ErrorCodes::BadValue, @@ -447,7 +447,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { chunk.setVersion(version); originalChunks.push_back(chunk); - setupChunks(originalChunks).transitional_ignore(); + setupChunks(originalChunks); } ASSERT_EQ(ErrorCodes::InvalidOptions, @@ -482,7 +482,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { chunk3.setMin(chunkBound2); chunk3.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk1, chunk2, chunk3})); + setupChunks({chunk1, chunk2, chunk3}); // Record chunk boundaries for passing into commitChunkMerge std::vector chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax}; diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp index 65f9fb3ecac..aa32f379bcc 100644 --- a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp @@ -51,7 +51,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) { shard.setName("a"); shard.setHost("a:1234"); - setupShards({shard}).transitional_ignore(); + setupShards({shard}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shard.getName(), "z")); @@ -73,7 +73,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); @@ -106,7 +106,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); // Insert a chunk range document referring to a different zone TagsType tagDoc; @@ -148,7 +148,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer shardB.setName("b"); shardB.setHost("b:1234"); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); TagsType tagDoc; tagDoc.setNS("test.foo"); @@ -188,7 +188,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) { shardA.setHost("a:1234"); shardA.setTags({"z"}); - setupShards({shardA}).transitional_ignore(); + setupShards({shardA}); auto status = ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), "b", "z"); @@ -215,7 +215,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi shardB.setHost("b:1234"); shardB.setTags({"y", "z"}); - setupShards({shardA, shardB}).transitional_ignore(); + setupShards({shardA, shardB}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->removeShardFromZone(operationContext(), shardB.getName(), "z")); diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp index 53f9a799540..cd112f8e4df 100644 --- a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp @@ -132,7 +132,7 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector{shard1, shard2})); + setupShards(std::vector{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -154,7 +154,7 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) { shard1.setMaxSizeMB(100); shard1.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector{shard1})); + setupShards(std::vector{shard1}); ASSERT_EQUALS(ErrorCodes::IllegalOperation, ShardingCatalogManager::get(operationContext()) @@ -175,7 +175,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector{shard1, shard2})); + setupShards(std::vector{shard1, shard2}); auto result = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -211,9 +211,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { ChunkVersion(1, 3, epoch), shard1.getName()); - ASSERT_OK(setupShards(std::vector{shard1, shard2})); + setupShards(std::vector{shard1, shard2}); setupDatabase("testDB", shard1.getName(), true); - ASSERT_OK(setupChunks(std::vector{chunk1, chunk2, chunk3})); + setupChunks(std::vector{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); @@ -240,7 +240,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) { shard2.setMaxSizeMB(100); shard2.setState(ShardType::ShardState::kShardAware); - ASSERT_OK(setupShards(std::vector{shard1, shard2})); + setupShards(std::vector{shard1, shard2}); setupDatabase("testDB", shard1.getName(), false); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) @@ -284,9 +284,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { std::vector chunks{chunk1, chunk2, chunk3}; - ASSERT_OK(setupShards(std::vector{shard1, shard2})); + setupShards(std::vector{shard1, shard2}); setupDatabase("testDB", shard2.getName(), false); - ASSERT_OK(setupChunks(std::vector{chunk1, chunk2, chunk3})); + setupChunks(std::vector{chunk1, chunk2, chunk3}); auto startedResult = assertGet(ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName())); diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp index ba49bafc1ae..cb0cd4eca5e 100644 --- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp @@ -119,7 +119,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) { ShardType shard; shard.setName("shard0"); shard.setHost("shardHost"); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); setupDatabase(nss.db().toString(), shard.getName(), true); @@ -131,7 +131,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) { chunk.setShard(shard.getName()); chunk.setMin(BSON("_id" << 1)); chunk.setMax(BSON("_id" << 5)); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ShardKeyPattern shardKeyPattern(BSON("_id" << 1)); BSONObj defaultCollation; @@ -163,7 +163,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) { targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); const auto nss = NamespaceString("db1.foo"); @@ -235,7 +235,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) { targeter2->setFindHostReturnValue(shard2Host); targeterFactory()->addTargeterToReturn(ConnectionString(shard2Host), std::move(targeter2)); - ASSERT_OK(setupShards(vector{shard0, shard1, shard2})); + setupShards(vector{shard0, shard1, shard2}); const auto nss = NamespaceString("db1.foo"); string ns = "db1.foo"; @@ -342,7 +342,7 @@ TEST_F(ShardCollectionTest, withInitialData) { targeter->setFindHostReturnValue(shardHost); targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter)); - ASSERT_OK(setupShards(vector{shard})); + setupShards(vector{shard}); const auto nss = NamespaceString("db1.foo"); string ns = "db1.foo"; diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp index 40957129ece..88cb5872967 100644 --- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp @@ -59,7 +59,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkSplitPoint = BSON("a" << 5); std::vector splitPoints{chunkSplitPoint}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -109,7 +109,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkSplitPoint2 = BSON("a" << 7); std::vector splitPoints{chunkSplitPoint, chunkSplitPoint2}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -180,7 +180,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { chunk2.setMin(BSON("a" << 10)); chunk2.setMax(BSON("a" << 20)); - ASSERT_OK(setupChunks({chunk, chunk2})); + setupChunks({chunk, chunk2}); ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -230,7 +230,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) { auto chunkSplitPoint = BSON("a" << 5); splitPoints.push_back(chunkSplitPoint); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -257,7 +257,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) { std::vector splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -284,7 +284,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { std::vector splitPoints{BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -311,7 +311,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) { std::vector splitPoints{BSON("a" << 5), BSON("a" << 4)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -338,7 +338,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) { std::vector splitPoints{BSON("a" << 0), BSON("a" << 5)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -365,7 +365,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) { std::vector splitPoints{BSON("a" << 5), BSON("a" << 15)}; - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); auto splitStatus = ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), @@ -389,7 +389,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) { auto chunkMax = BSON("a" << kMaxBSONKey); chunk.setMin(chunkMin); chunk.setMax(chunkMax); - ASSERT_OK(setupChunks({chunk})); + setupChunks({chunk}); ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp index 23520fb283f..aea46c5bbc7 100644 --- a/src/mongo/s/config_server_test_fixture.cpp +++ b/src/mongo/s/config_server_test_fixture.cpp @@ -288,16 +288,11 @@ StatusWith ConfigServerTestFixture::findOneOnConfigCollection(Operation return findResult.docs.front().getOwned(); } -Status ConfigServerTestFixture::setupShards(const std::vector& shards) { +void ConfigServerTestFixture::setupShards(const std::vector& shards) { const NamespaceString shardNS(ShardType::ConfigNS); for (const auto& shard : shards) { - auto insertStatus = insertToConfigCollection(operationContext(), shardNS, shard.toBSON()); - if (!insertStatus.isOK()) { - return insertStatus; - } + ASSERT_OK(insertToConfigCollection(operationContext(), shardNS, shard.toBSON())); } - - return Status::OK(); } StatusWith ConfigServerTestFixture::getShardDoc(OperationContext* opCtx, @@ -315,16 +310,11 @@ StatusWith ConfigServerTestFixture::getShardDoc(OperationContext* opC return ShardType::fromBSON(doc.getValue()); } -Status ConfigServerTestFixture::setupChunks(const std::vector& chunks) { +void ConfigServerTestFixture::setupChunks(const std::vector& chunks) { const NamespaceString chunkNS(ChunkType::ConfigNS); for (const auto& chunk : chunks) { - auto insertStatus = - insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON()); - if (!insertStatus.isOK()) - return insertStatus; + ASSERT_OK(insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON())); } - - return Status::OK(); } StatusWith ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx, diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h index 3ef0ed17c50..3bad8593d6a 100644 --- a/src/mongo/s/config_server_test_fixture.h +++ b/src/mongo/s/config_server_test_fixture.h @@ -91,7 +91,7 @@ public: /** * Setup the config.shards collection to contain the given shards. */ - Status setupShards(const std::vector& shards); + void setupShards(const std::vector& shards); /** * Retrieves the shard document from the config server. @@ -102,7 +102,7 @@ public: /** * Setup the config.chunks collection to contain the given chunks. */ - Status setupChunks(const std::vector& chunks); + void setupChunks(const std::vector& chunks); /** * Retrieves the chunk document from the config server. -- cgit v1.2.1