summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-06-27 09:33:33 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-06-30 10:20:24 -0400
commit0ae4abfd8774ea10675ebe6e114b2d3a9a7ebfdd (patch)
tree27e3534e3a98a89d65f9a952ec1c915a1fc45465
parentf37383b66613831046041e1c0fdfa7ed2fbc2da0 (diff)
downloadmongo-0ae4abfd8774ea10675ebe6e114b2d3a9a7ebfdd.tar.gz
SERVER-26531 Change ConfigServerTestFixture::setupChunks to `void`
... because having it return Status just so all callers can assert that it's OK is useless. (cherry picked from commit 4b955e6a1a35b1704a05aa29bd7e6ed42612333c)
-rw-r--r--src/mongo/bson/bsonelement.cpp1
-rw-r--r--src/mongo/db/keys_collection_manager_sharding.cpp10
-rw-r--r--src/mongo/db/keys_collection_manager_sharding.h10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp20
-rw-r--r--src/mongo/s/catalog/sharding_catalog_create_database_test.cpp10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp20
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp18
-rw-r--r--src/mongo/s/config_server_test_fixture.h4
15 files changed, 74 insertions, 95 deletions
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index ebc9fac6981..96c5a338116 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -798,7 +798,6 @@ void BSONElement::toString(
s << toHex(data, len) << ")";
}
} break;
-
case bsonTimestamp: {
// Convert from Milliseconds to Seconds for consistent Timestamp printing.
auto secs = duration_cast<Seconds>(timestampTime().toDurationSinceEpoch());
diff --git a/src/mongo/db/keys_collection_manager_sharding.cpp b/src/mongo/db/keys_collection_manager_sharding.cpp
index c94be209d2b..e149e14cd67 100644
--- a/src/mongo/db/keys_collection_manager_sharding.cpp
+++ b/src/mongo/db/keys_collection_manager_sharding.cpp
@@ -159,7 +159,7 @@ void KeysCollectionManagerSharding::stopMonitoring() {
void KeysCollectionManagerSharding::enableKeyGenerator(OperationContext* opCtx, bool doEnable) {
if (doEnable) {
- _refresher.switchFunc(opCtx, [this](OperationContext* opCtx) {
+ _refresher.setFunc([this](OperationContext* opCtx) {
KeysCollectionCacheReaderAndUpdater keyGenerator(
_purpose, _client.get(), _keyValidForInterval);
auto keyGenerationStatus = keyGenerator.refresh(opCtx);
@@ -178,8 +178,7 @@ void KeysCollectionManagerSharding::enableKeyGenerator(OperationContext* opCtx,
return cacheRefreshStatus;
});
} else {
- _refresher.switchFunc(
- opCtx, [this](OperationContext* opCtx) { return _keysCache.refresh(opCtx); });
+ _refresher.setFunc([this](OperationContext* opCtx) { return _keysCache.refresh(opCtx); });
}
}
@@ -313,11 +312,6 @@ void KeysCollectionManagerSharding::PeriodicRunner::setFunc(RefreshFunc newRefre
_refreshNeededCV.notify_all();
}
-void KeysCollectionManagerSharding::PeriodicRunner::switchFunc(OperationContext* opCtx,
- RefreshFunc newRefreshStrategy) {
- setFunc(newRefreshStrategy);
-}
-
void KeysCollectionManagerSharding::PeriodicRunner::start(ServiceContext* service,
const std::string& threadName,
Milliseconds refreshInterval) {
diff --git a/src/mongo/db/keys_collection_manager_sharding.h b/src/mongo/db/keys_collection_manager_sharding.h
index d3c4239d4a5..63ff3cdbcc0 100644
--- a/src/mongo/db/keys_collection_manager_sharding.h
+++ b/src/mongo/db/keys_collection_manager_sharding.h
@@ -134,17 +134,13 @@ private:
/**
* Sets the refresh function to use.
- * Should only be used to bootstrap this refresher with initial strategy.
+ *
+ * Should only be used to bootstrap this refresher with initial strategy. Also waits to make
+ * sure that the old strategy is not being used and will no longer be used after this call.
*/
void setFunc(RefreshFunc newRefreshStrategy);
/**
- * Switches the current strategy with a new one. This also waits to make sure that the old
- * strategy is not being used and will no longer be used after this call.
- */
- void switchFunc(OperationContext* opCtx, RefreshFunc newRefreshStrategy);
-
- /**
* Starts the refresh thread.
*/
void start(ServiceContext* service,
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
index cfef6b9fd21..0dc89b36a4d 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
@@ -50,7 +50,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->addShardToZone(operationContext(), shard.getName(), "z"));
@@ -69,7 +69,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) {
shard.setHost("a:1234");
shard.setTags({"x", "y"});
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->addShardToZone(operationContext(), shard.getName(), "x"));
@@ -90,7 +90,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) {
shard.setHost("a:1234");
shard.setTags({"x"});
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->addShardToZone(operationContext(), shard.getName(), "y"));
@@ -110,7 +110,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
auto status = ShardingCatalogManager::get(operationContext())
->addShardToZone(operationContext(), "b", "z");
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index 31fdbfea430..5b794937265 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -63,7 +63,7 @@ public:
shard.setHost("a:1234");
shard.setTags({zoneName()});
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
CollectionType shardedCollection;
shardedCollection.setNs(shardedNS());
diff --git a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
index db9bc5f213b..7fe11c44403 100644
--- a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
@@ -132,7 +132,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1}).transitional_ignore();
+ setupShards({shard0, shard1});
int origMajorVersion = 15;
auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
@@ -148,7 +148,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0}).transitional_ignore();
+ setupChunks({chunk0});
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
@@ -188,7 +188,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1}).transitional_ignore();
+ setupShards({shard0, shard1});
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -213,7 +213,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1}).transitional_ignore();
+ setupChunks({chunk0, chunk1});
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
@@ -239,7 +239,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1}).transitional_ignore();
+ setupShards({shard0, shard1});
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -266,7 +266,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0}).transitional_ignore();
+ setupChunks({chunk1, chunk0});
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
@@ -292,7 +292,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1}).transitional_ignore();
+ setupShards({shard0, shard1});
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -317,7 +317,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1}).transitional_ignore();
+ setupChunks({chunk1});
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
@@ -343,7 +343,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1}).transitional_ignore();
+ setupShards({shard0, shard1});
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -368,7 +368,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0}).transitional_ignore();
+ setupChunks({chunk0});
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
diff --git a/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp b/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp
index 92e43edd0e6..b686779deb3 100644
--- a/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_create_database_test.cpp
@@ -71,17 +71,17 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) {
ShardType s0;
s0.setName("shard0000");
s0.setHost("ShardHost0:27017");
- ASSERT_OK(setupShards(vector<ShardType>{s0}));
+ setupShards(vector<ShardType>{s0});
ShardType s1;
s1.setName("shard0001");
s1.setHost("ShardHost1:27017");
- ASSERT_OK(setupShards(vector<ShardType>{s1}));
+ setupShards(vector<ShardType>{s1});
ShardType s2;
s2.setName("shard0002");
s2.setHost("ShardHost2:27017");
- ASSERT_OK(setupShards(vector<ShardType>{s2}));
+ setupShards(vector<ShardType>{s2});
// Prime the shard registry with information about the existing shards
shardRegistry()->reload(operationContext());
@@ -160,7 +160,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExists) {
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase(dbname, shard.getName(), false);
@@ -175,7 +175,7 @@ TEST_F(CreateDatabaseTest, createDatabaseDBExistsDifferentCase) {
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase(dbnameDiffCase, shard.getName(), false);
diff --git a/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp b/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp
index daec9aa1bb4..66cc354b4f5 100644
--- a/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_enable_sharding_test.cpp
@@ -71,7 +71,7 @@ TEST_F(EnableShardingTest, noDBExists) {
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
auto shardTargeter = RemoteCommandTargeterMock::get(
uassertStatusOK(shardRegistry()->getShard(operationContext(), ShardId("shard0")))
@@ -109,7 +109,7 @@ TEST_F(EnableShardingTest, dbExistsWithDifferentCase) {
ShardType shard;
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase("Db3", shard.getName(), false);
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db3"),
@@ -121,7 +121,7 @@ TEST_F(EnableShardingTest, dbExists) {
ShardType shard;
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase("db4", shard.getName(), false);
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db4");
}
@@ -130,7 +130,7 @@ TEST_F(EnableShardingTest, succeedsWhenTheDatabaseIsAlreadySharded) {
ShardType shard;
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase("db5", shard.getName(), true);
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db5");
}
@@ -140,7 +140,7 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
shard.setName("shard0");
shard.setHost("shard0:12");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index 198511e6bcb..668741de055 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -67,7 +67,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2}).transitional_ignore();
+ setupChunks({chunk, chunk2});
ASSERT_OK(
ShardingCatalogManager::get(operationContext())
@@ -129,7 +129,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
- setupChunks({chunk, chunk2, chunk3}).transitional_ignore();
+ setupChunks({chunk, chunk2, chunk3});
ASSERT_OK(
ShardingCatalogManager::get(operationContext())
@@ -195,7 +195,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
+ setupChunks({chunk, chunk2, otherChunk});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(
@@ -256,7 +256,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
+ setupChunks({chunk, chunk2, otherChunk});
ASSERT_OK(
ShardingCatalogManager::get(operationContext())
@@ -317,7 +317,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2}).transitional_ignore();
+ setupChunks({chunk, chunk2});
auto mergeStatus = ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
@@ -351,7 +351,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
// Record chunk baoundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2}).transitional_ignore();
+ setupChunks({chunk, chunk2});
auto mergeStatus =
ShardingCatalogManager::get(operationContext())
@@ -389,7 +389,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
mergedChunk.setVersion(mergedVersion);
mergedChunk.setMax(chunkMax);
- setupChunks({mergedChunk}).transitional_ignore();
+ setupChunks({mergedChunk});
ASSERT_EQ(
ErrorCodes::BadValue,
@@ -447,7 +447,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
chunk.setVersion(version);
originalChunks.push_back(chunk);
- setupChunks(originalChunks).transitional_ignore();
+ setupChunks(originalChunks);
}
ASSERT_EQ(ErrorCodes::InvalidOptions,
@@ -482,7 +482,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk3.setMin(chunkBound2);
chunk3.setMax(chunkMax);
- ASSERT_OK(setupChunks({chunk1, chunk2, chunk3}));
+ setupChunks({chunk1, chunk2, chunk3});
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
index 65f9fb3ecac..aa32f379bcc 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
@@ -51,7 +51,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard}).transitional_ignore();
+ setupShards({shard});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->removeShardFromZone(operationContext(), shard.getName(), "z"));
@@ -73,7 +73,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB}).transitional_ignore();
+ setupShards({shardA, shardB});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->removeShardFromZone(operationContext(), shardB.getName(), "z"));
@@ -106,7 +106,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB}).transitional_ignore();
+ setupShards({shardA, shardB});
// Insert a chunk range document referring to a different zone
TagsType tagDoc;
@@ -148,7 +148,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB}).transitional_ignore();
+ setupShards({shardA, shardB});
TagsType tagDoc;
tagDoc.setNS("test.foo");
@@ -188,7 +188,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) {
shardA.setHost("a:1234");
shardA.setTags({"z"});
- setupShards({shardA}).transitional_ignore();
+ setupShards({shardA});
auto status = ShardingCatalogManager::get(operationContext())
->removeShardFromZone(operationContext(), "b", "z");
@@ -215,7 +215,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi
shardB.setHost("b:1234");
shardB.setTags({"y", "z"});
- setupShards({shardA, shardB}).transitional_ignore();
+ setupShards({shardA, shardB});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->removeShardFromZone(operationContext(), shardB.getName(), "z"));
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
index 53f9a799540..cd112f8e4df 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
@@ -132,7 +132,7 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) {
shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2}));
+ setupShards(std::vector<ShardType>{shard1, shard2});
auto result = assertGet(ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName()));
@@ -154,7 +154,7 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) {
shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1}));
+ setupShards(std::vector<ShardType>{shard1});
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
@@ -175,7 +175,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2}));
+ setupShards(std::vector<ShardType>{shard1, shard2});
auto result = assertGet(ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName()));
@@ -211,9 +211,9 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
ChunkVersion(1, 3, epoch),
shard1.getName());
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2}));
+ setupShards(std::vector<ShardType>{shard1, shard2});
setupDatabase("testDB", shard1.getName(), true);
- ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}));
+ setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3});
auto startedResult = assertGet(ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName()));
@@ -240,7 +240,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) {
shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2}));
+ setupShards(std::vector<ShardType>{shard1, shard2});
setupDatabase("testDB", shard1.getName(), false);
auto startedResult = assertGet(ShardingCatalogManager::get(operationContext())
@@ -284,9 +284,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
std::vector<ChunkType> chunks{chunk1, chunk2, chunk3};
- ASSERT_OK(setupShards(std::vector<ShardType>{shard1, shard2}));
+ setupShards(std::vector<ShardType>{shard1, shard2});
setupDatabase("testDB", shard2.getName(), false);
- ASSERT_OK(setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}));
+ setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3});
auto startedResult = assertGet(ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName()));
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index ba49bafc1ae..cb0cd4eca5e 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -119,7 +119,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
ShardType shard;
shard.setName("shard0");
shard.setHost("shardHost");
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
setupDatabase(nss.db().toString(), shard.getName(), true);
@@ -131,7 +131,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
chunk.setShard(shard.getName());
chunk.setMin(BSON("_id" << 1));
chunk.setMax(BSON("_id" << 5));
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
BSONObj defaultCollation;
@@ -163,7 +163,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
targeter->setFindHostReturnValue(shardHost);
targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter));
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
const auto nss = NamespaceString("db1.foo");
@@ -235,7 +235,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
targeter2->setFindHostReturnValue(shard2Host);
targeterFactory()->addTargeterToReturn(ConnectionString(shard2Host), std::move(targeter2));
- ASSERT_OK(setupShards(vector<ShardType>{shard0, shard1, shard2}));
+ setupShards(vector<ShardType>{shard0, shard1, shard2});
const auto nss = NamespaceString("db1.foo");
string ns = "db1.foo";
@@ -342,7 +342,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
targeter->setFindHostReturnValue(shardHost);
targeterFactory()->addTargeterToReturn(ConnectionString(shardHost), std::move(targeter));
- ASSERT_OK(setupShards(vector<ShardType>{shard}));
+ setupShards(vector<ShardType>{shard});
const auto nss = NamespaceString("db1.foo");
string ns = "db1.foo";
diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
index 40957129ece..88cb5872967 100644
--- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
@@ -59,7 +59,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkSplitPoint = BSON("a" << 5);
std::vector<BSONObj> splitPoints{chunkSplitPoint};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -109,7 +109,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkSplitPoint2 = BSON("a" << 7);
std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -180,7 +180,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setMin(BSON("a" << 10));
chunk2.setMax(BSON("a" << 20));
- ASSERT_OK(setupChunks({chunk, chunk2}));
+ setupChunks({chunk, chunk2});
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -230,7 +230,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto chunkSplitPoint = BSON("a" << 5);
splitPoints.push_back(chunkSplitPoint);
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -257,7 +257,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -284,7 +284,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -311,7 +311,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -338,7 +338,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -365,7 +365,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -389,7 +389,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
auto chunkMax = BSON("a" << kMaxBSONKey);
chunk.setMin(chunkMin);
chunk.setMax(chunkMax);
- ASSERT_OK(setupChunks({chunk}));
+ setupChunks({chunk});
ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 23520fb283f..aea46c5bbc7 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -288,16 +288,11 @@ StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(Operation
return findResult.docs.front().getOwned();
}
-Status ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) {
+void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) {
const NamespaceString shardNS(ShardType::ConfigNS);
for (const auto& shard : shards) {
- auto insertStatus = insertToConfigCollection(operationContext(), shardNS, shard.toBSON());
- if (!insertStatus.isOK()) {
- return insertStatus;
- }
+ ASSERT_OK(insertToConfigCollection(operationContext(), shardNS, shard.toBSON()));
}
-
- return Status::OK();
}
StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx,
@@ -315,16 +310,11 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opC
return ShardType::fromBSON(doc.getValue());
}
-Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) {
+void ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) {
const NamespaceString chunkNS(ChunkType::ConfigNS);
for (const auto& chunk : chunks) {
- auto insertStatus =
- insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON());
- if (!insertStatus.isOK())
- return insertStatus;
+ ASSERT_OK(insertToConfigCollection(operationContext(), chunkNS, chunk.toConfigBSON()));
}
-
- return Status::OK();
}
StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index 3ef0ed17c50..3bad8593d6a 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -91,7 +91,7 @@ public:
/**
* Setup the config.shards collection to contain the given shards.
*/
- Status setupShards(const std::vector<ShardType>& shards);
+ void setupShards(const std::vector<ShardType>& shards);
/**
* Retrieves the shard document from the config server.
@@ -102,7 +102,7 @@ public:
/**
* Setup the config.chunks collection to contain the given chunks.
*/
- Status setupChunks(const std::vector<ChunkType>& chunks);
+ void setupChunks(const std::vector<ChunkType>& chunks);
/**
* Retrieves the chunk document from the config server.