summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:22:02 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:46:57 -0400
commit9abef6f25aadfd04309cb2219068097f93dc961d (patch)
treef88c7f183f201813f363d5d68c1a4a76781ca7ef /src/mongo/s
parenta5f0a84c79b6ce41fef33da920c62be0ecc8f07b (diff)
downloadmongo-9abef6f25aadfd04309cb2219068097f93dc961d.tar.gz
SERVER-27244 Status usage compile-time facilities.
There are numerous places in the codebase where `mongo::Status` or `mongo::StatusWith< T >` objects are returned and never checked. Many of these are innocuous, but many of them are potentially severe bugs. This change introduces facilities to permit compile-time warning of unchecked `Status` and `StatusWith` usage on clang compilers. It introduces an `ignore` function which is useful to state that a specific "ignored status" case was intentional. It not presently an error, in clang builds, to forget to check a `Status` -- this will come in a later commit. This also introduces a `transitional_ignore` function, which allows for easy continual auditing of the codebase for current "whitelisted" unchecked-status instances. All present "ignored status" cases have been marked `transitional_ignore`.
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp24
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_log_change_test.cpp33
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp7
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp20
-rw-r--r--src/mongo/s/cluster_last_error_info_test.cpp26
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp21
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp27
-rw-r--r--src/mongo/s/mongos_options.cpp8
-rw-r--r--src/mongo/s/query/async_results_merger.cpp8
-rw-r--r--src/mongo/s/query/cluster_cursor_manager_test.cpp120
-rw-r--r--src/mongo/s/query/establish_cursors.cpp6
-rw-r--r--src/mongo/s/server.cpp7
-rw-r--r--src/mongo/s/shard_key_pattern.cpp2
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp11
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp4
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp16
24 files changed, 266 insertions, 184 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
index 9c2257f85d6..cf7e44ba9c1 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
@@ -48,7 +48,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "z"));
auto shardDocStatus = getShardDoc(operationContext(), shard.getName());
@@ -66,7 +66,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) {
shard.setHost("a:1234");
shard.setTags({"x", "y"});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "x"));
@@ -86,7 +86,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) {
shard.setHost("a:1234");
shard.setTags({"x"});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "y"));
@@ -105,7 +105,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
auto status = catalogManager()->addShardToZone(operationContext(), "b", "z");
ASSERT_EQ(ErrorCodes::ShardNotFound, status);
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index 104a97a3f6a..eff3e270fc4 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -59,7 +59,7 @@ public:
shard.setHost("a:1234");
shard.setTags({zoneName()});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
CollectionType shardedCollection;
shardedCollection.setNs(shardedNS());
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 27e01623f6b..0c5f16812d5 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -575,7 +575,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
"shardCollection.start",
ns,
collectionDetail.obj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
}
const NamespaceString nss(ns);
@@ -652,7 +653,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
"shardCollection.end",
ns,
BSON("version" << collVersion.toString()),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
@@ -715,7 +717,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
"removeShard.start",
"",
BSON("shard" << name),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return ShardDrainingStatus::STARTED;
}
@@ -765,7 +768,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
"removeShard",
"",
BSON("shard" << name),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return ShardDrainingStatus::COMPLETED;
}
@@ -921,7 +925,8 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
"dropCollection.start",
ns.ns(),
BSONObj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
auto shardsStatus = getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
@@ -1099,7 +1104,8 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
"dropCollection",
ns.ns(),
BSONObj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
diff --git a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
index 0413d18d030..81d07a8481c 100644
--- a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
@@ -55,7 +55,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -80,7 +80,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupChunks({chunk0, chunk1}).transitional_ignore();
// use crefs to verify it will take consts:
ChunkType const& chunk0cref = chunk0;
@@ -129,7 +129,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 15;
auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
@@ -145,7 +145,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupChunks({chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -185,7 +185,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -210,7 +210,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupChunks({chunk0, chunk1}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -236,7 +236,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -263,7 +263,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0});
+ setupChunks({chunk1, chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -289,7 +289,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -314,7 +314,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1});
+ setupChunks({chunk1}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -340,7 +340,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -365,7 +365,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0});
+ setupChunks({chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index c43cfe3a8aa..5c6d84f5abc 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -346,8 +346,10 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
}
TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
- getConfigShard()->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true);
+ getConfigShard()
+ ->createIndexOnConfig(
+ operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true)
+ .transitional_ignore();
ASSERT_OK(catalogManager()->initializeConfigDatabaseIfNeeded(operationContext()));
@@ -370,8 +372,10 @@ TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
TEST_F(ConfigInitializationTest, IncompatibleIndexAlreadyExists) {
// Make the index non-unique even though its supposed to be unique, make sure initialization
// fails
- getConfigShard()->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false);
+ getConfigShard()
+ ->createIndexOnConfig(
+ operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false)
+ .transitional_ignore();
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
catalogManager()->initializeConfigDatabaseIfNeeded(operationContext()));
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
index 7ddff536bb4..1523f00825a 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
@@ -73,8 +73,9 @@ public:
protected:
void noRetryAfterSuccessfulCreate() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
expectConfigCollectionCreate(configHost, getConfigCollName(), _cappedSize, BSON("ok" << 1));
expectConfigCollectionInsert(configHost,
@@ -88,8 +89,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionInsert(configHost,
getConfigCollName(),
@@ -103,8 +106,9 @@ protected:
}
void noRetryCreateIfAlreadyExists() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
BSONObjBuilder createResponseBuilder;
Command::appendCommandStatus(createResponseBuilder,
@@ -122,8 +126,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionInsert(configHost,
getConfigCollName(),
@@ -137,8 +143,9 @@ protected:
}
void createFailure() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
BSONObjBuilder createResponseBuilder;
Command::appendCommandStatus(createResponseBuilder,
@@ -150,8 +157,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we *do* attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionCreate(configHost, getConfigCollName(), _cappedSize, BSON("ok" << 1));
expectConfigCollectionInsert(configHost,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index 76479cbd5c3..58391f3173e 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -394,8 +394,10 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
appendShortVersion(&logDetail.subobjStart("left"), newChunks[0]);
appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]);
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -408,8 +410,10 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
chunkDetail.append("of", newChunksSize);
appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]);
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
}
}
@@ -517,8 +521,10 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx,
collVersion.addToBSON(logDetail, "prevShardVersion");
mergeVersion.addToBSON(logDetail, "mergedVersion");
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
return applyOpsStatus;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index 9d766d11f46..12ffa5c278d 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -684,8 +684,11 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
shardDetails.append("name", shardType.getName());
shardDetails.append("host", shardConnectionString.toString());
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(
+ opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
// Ensure the added shard is visible to this process.
auto shardRegistry = Grid::get(opCtx)->shardRegistry();
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index cdb71cfa7cf..d6c2878ecdf 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -63,7 +63,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -126,7 +126,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
- setupChunks({chunk, chunk2, chunk3});
+ setupChunks({chunk, chunk2, chunk3}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -193,7 +193,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -256,7 +256,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -318,7 +318,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
auto mergeStatus = catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.NonExistingColl"),
@@ -351,7 +351,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
// Record chunk baoundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
auto mergeStatus = catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -390,7 +390,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
mergedChunk.setVersion(mergedVersion);
mergedChunk.setMax(chunkMax);
- setupChunks({mergedChunk});
+ setupChunks({mergedChunk}).transitional_ignore();
ASSERT_EQ(ErrorCodes::BadValue,
catalogManager()->commitChunkMerge(operationContext(),
@@ -449,7 +449,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
chunk.setVersion(version);
originalChunks.push_back(chunk);
- setupChunks(originalChunks);
+ setupChunks(originalChunks).transitional_ignore();
}
ASSERT_EQ(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
index fa89754211f..8b9b47a284c 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
@@ -49,7 +49,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shard.getName(), "z"));
auto shardDocStatus = getShardDoc(operationContext(), shard.getName());
@@ -70,7 +70,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardB.getName(), "z"));
@@ -102,7 +102,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
// Insert a chunk range document referring to a different zone
TagsType tagDoc;
@@ -111,7 +111,8 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("y");
insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON());
+ operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ .transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardA.getName(), "z"));
@@ -142,7 +143,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
TagsType tagDoc;
tagDoc.setNS("test.foo");
@@ -150,7 +151,8 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("z");
insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON());
+ operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ .transitional_ignore();
auto status = catalogManager()->removeShardFromZone(operationContext(), shardA.getName(), "z");
ASSERT_EQ(ErrorCodes::ZoneStillInUse, status);
@@ -180,7 +182,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) {
shardA.setHost("a:1234");
shardA.setTags({"z"});
- setupShards({shardA});
+ setupShards({shardA}).transitional_ignore();
auto status = catalogManager()->removeShardFromZone(operationContext(), "b", "z");
ASSERT_EQ(ErrorCodes::ShardNotFound, status);
@@ -206,7 +208,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi
shardB.setHost("b:1234");
shardB.setTags({"y", "z"});
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardB.getName(), "z"));
diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
index 8373636edad..b3b8b02780b 100644
--- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
@@ -55,7 +55,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkSplitPoint = BSON("a" << 5);
std::vector<BSONObj> splitPoints{chunkSplitPoint};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -104,7 +104,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkSplitPoint2 = BSON("a" << 7);
std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -174,7 +174,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setMin(BSON("a" << 10));
chunk2.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -223,7 +223,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto chunkSplitPoint = BSON("a" << 5);
splitPoints.push_back(chunkSplitPoint);
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -249,7 +249,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.NonExistingColl"),
@@ -275,7 +275,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -301,7 +301,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -327,7 +327,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -353,7 +353,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 34d33d0a4c9..23a790ff052 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -121,7 +121,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{expectedColl.toBSON()}, builder.obj());
});
@@ -184,7 +184,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{expectedDb.toBSON()}, builder.obj());
});
@@ -402,7 +402,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()},
builder.obj());
@@ -817,7 +817,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON(), coll3.toBSON()},
builder.obj());
@@ -2037,7 +2037,7 @@ TEST_F(ShardingCatalogClientTest, BasicReadAfterOpTime) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2073,7 +2073,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2102,7 +2102,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, oldOpTime, oldOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2127,7 +2127,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, oldOpTime, oldOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2153,7 +2153,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeFindThenCmd) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
DatabaseType dbType;
dbType.setName("TestDB");
@@ -2215,7 +2215,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeCmdThenFind) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
diff --git a/src/mongo/s/cluster_last_error_info_test.cpp b/src/mongo/s/cluster_last_error_info_test.cpp
index 49913c27b4e..b7a06a58ea1 100644
--- a/src/mongo/s/cluster_last_error_info_test.cpp
+++ b/src/mongo/s/cluster_last_error_info_test.cpp
@@ -75,15 +75,19 @@ TEST_F(ClusterGetLastErrorTest,
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Make the reply contain ShardingMetadata.
repl::OpTime opTime{Timestamp{10, 10}, 10};
onCommandWithMetadata([&](const RemoteCommandRequest& request) {
BSONObjBuilder metadataBob;
- rpc::ShardingMetadata(opTime, OID() /* ignored OID field */).writeToMetadata(&metadataBob);
+ rpc::ShardingMetadata(opTime, OID() /* ignored OID field */)
+ .writeToMetadata(&metadataBob)
+ .transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), metadataBob.obj(), Milliseconds(1));
});
@@ -116,15 +120,19 @@ TEST_F(ClusterGetLastErrorTest, ClusterLastErrorInfoNotUpdatedIfNotInitialized)
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Make the reply contain ShardingMetadata.
repl::OpTime opTime{Timestamp{10, 10}, 10};
onCommandWithMetadata([&](const RemoteCommandRequest& request) {
BSONObjBuilder metadataBob;
- rpc::ShardingMetadata(opTime, OID() /* ignored OID field */).writeToMetadata(&metadataBob);
+ rpc::ShardingMetadata(opTime, OID() /* ignored OID field */)
+ .writeToMetadata(&metadataBob)
+ .transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), metadataBob.obj(), Milliseconds(1));
});
@@ -157,8 +165,10 @@ TEST_F(ClusterGetLastErrorTest, ClusterLastErrorInfoNotUpdatedIfReplyDoesntHaveS
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Do not return ShardingMetadata in the reply.
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index 3ac6de0af42..8f7d6bb81c8 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -103,11 +103,13 @@ public:
uassertStatusOK(dbInfoStatus.getStatus());
- catalogClient->logChange(opCtx,
- "dropDatabase.start",
- dbname,
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "dropDatabase.start",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
auto& dbInfo = dbInfoStatus.getValue();
@@ -145,8 +147,13 @@ public:
// Invalidate the database so the next access will do a full reload
catalogCache->purgeDatabase(dbname);
- catalogClient->logChange(
- opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "dropDatabase",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
result.append("dropped", dbname);
return true;
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 45d619c82df..2e0f977753f 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -167,12 +167,14 @@ public:
const auto shardedColls = getAllShardedCollectionsForDb(opCtx, dbname);
// Record start in changelog
- catalogClient->logChange(
- opCtx,
- "movePrimary.start",
- dbname,
- _buildMoveLogEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "movePrimary.start",
+ dbname,
+ _buildMoveLogEntry(
+ dbname, fromShard->toString(), toShard->toString(), shardedColls),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
ScopedDbConnection toconn(toShard->getConnString());
@@ -292,12 +294,13 @@ public:
result << "primary" << toShard->toString();
// Record finish in changelog
- catalogClient->logChange(
- opCtx,
- "movePrimary",
- dbname,
- _buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "movePrimary",
+ dbname,
+ _buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
return true;
}
diff --git a/src/mongo/s/mongos_options.cpp b/src/mongo/s/mongos_options.cpp
index a98cb444fc3..5163ca6c295 100644
--- a/src/mongo/s/mongos_options.cpp
+++ b/src/mongo/s/mongos_options.cpp
@@ -103,16 +103,16 @@ Status addMongosOptions(moe::OptionSection* options) {
.setSources(moe::SourceAllLegacy);
- options->addSection(general_options);
+ options->addSection(general_options).transitional_ignore();
#if defined(_WIN32)
- options->addSection(windows_scm_options);
+ options->addSection(windows_scm_options).transitional_ignore();
#endif
- options->addSection(sharding_options);
+ options->addSection(sharding_options).transitional_ignore();
#ifdef MONGO_CONFIG_SSL
- options->addSection(ssl_options);
+ options->addSection(ssl_options).transitional_ignore();
#endif
return Status::OK();
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index ced16197e98..037765fd5f9 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -525,9 +525,11 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
executor::RemoteCommandRequest request(
remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
- _executor->scheduleRemoteCommand(
- request,
- stdx::bind(&AsyncResultsMerger::handleKillCursorsResponse, stdx::placeholders::_1));
+ _executor
+ ->scheduleRemoteCommand(request,
+ stdx::bind(&AsyncResultsMerger::handleKillCursorsResponse,
+ stdx::placeholders::_1))
+ .status_with_transitional_ignore();
}
}
}
diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp
index dc2c5460c38..6fc0d25da20 100644
--- a/src/mongo/s/query/cluster_cursor_manager_test.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp
@@ -362,11 +362,13 @@ TEST_F(ClusterCursorManagerTest, KillCursorWrongCursorId) {
// Test that killing all mortal expired cursors correctly kills a mortal expired cursor.
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -377,11 +379,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) {
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) {
Date_t timeBeforeCursorCreation = getClockSource()->now();
getClockSource()->advance(Milliseconds(1));
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(timeBeforeCursorCreation);
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -390,11 +394,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) {
// Test that killing all mortal expired cursors does not kill a cursor that is immortal.
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipImmortal) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Immortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Immortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -432,11 +438,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors)
if (i < numKilledCursorsExpected) {
cutoff = getClockSource()->now();
}
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getClockSource()->advance(Milliseconds(1));
}
getManager()->killMortalCursorsInactiveSince(cutoff);
@@ -457,11 +465,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors)
TEST_F(ClusterCursorManagerTest, KillAllCursors) {
const size_t numCursors = 10;
for (size_t i = 0; i < numCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
}
getManager()->killAllCursors();
for (size_t i = 0; i < numCursors; ++i) {
@@ -506,11 +516,13 @@ TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipPinned) {
// Test that reaping does not call kill() on the underlying ClusterClientCursor for cursors that
// haven't been killed.
TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipNonZombies) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
ASSERT(!isMockCursorKilled(0));
@@ -525,21 +537,25 @@ TEST_F(ClusterCursorManagerTest, StatsInitAsZero) {
// Test that registering a sharded cursor updates the corresponding counter in stats().
TEST_F(ClusterCursorManagerTest, StatsRegisterShardedCursor) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(1U, getManager()->stats().cursorsSharded);
}
// Test that registering a not-sharded cursor updates the corresponding counter in stats().
TEST_F(ClusterCursorManagerTest, StatsRegisterNotShardedCursor) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(1U, getManager()->stats().cursorsNotSharded);
}
@@ -560,21 +576,25 @@ TEST_F(ClusterCursorManagerTest, StatsPinCursor) {
TEST_F(ClusterCursorManagerTest, StatsRegisterMultipleCursors) {
const size_t numShardedCursors = 10;
for (size_t i = 0; i < numShardedCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(i + 1, getManager()->stats().cursorsSharded);
ASSERT_EQ(0U, getManager()->stats().cursorsNotSharded);
}
const size_t numNotShardedCursors = 10;
for (size_t i = 0; i < numNotShardedCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(numShardedCursors, getManager()->stats().cursorsSharded);
ASSERT_EQ(i + 1, getManager()->stats().cursorsNotSharded);
}
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index 905dec8f3c2..52ce9ed63bb 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -151,8 +151,10 @@ StatusWith<std::vector<ClusterClientCursorParams::RemoteCursor>> establishCursor
// We do not process the response to the killCursors request (we make a good-faith
// attempt at cleaning up the cursors, but ignore any returned errors).
- executor->scheduleRemoteCommand(
- request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {});
+ executor
+ ->scheduleRemoteCommand(
+ request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {})
+ .status_with_transitional_ignore();
}
return status;
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index af0147eb685..dfb2eb21aa7 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -318,7 +318,10 @@ static ExitCode runMongosServer() {
return EXIT_SHARDING_ERROR;
}
- Grid::get(opCtx.get())->getBalancerConfiguration()->refreshAndCheck(opCtx.get());
+ Grid::get(opCtx.get())
+ ->getBalancerConfiguration()
+ ->refreshAndCheck(opCtx.get())
+ .transitional_ignore();
}
Status status = getGlobalAuthorizationManager()->initialize(NULL);
@@ -344,7 +347,7 @@ static ExitCode runMongosServer() {
// Set up the periodic runner for background job execution
auto runner = makePeriodicRunner();
- runner->startup();
+ runner->startup().transitional_ignore();
getGlobalServiceContext()->setPeriodicRunner(std::move(runner));
// Set up the logical session cache
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index c71a23f0133..5288efab6a9 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -208,7 +208,7 @@ BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
static BSONElement extractKeyElementFromMatchable(const MatchableDocument& matchable,
StringData pathStr) {
ElementPath path;
- path.init(pathStr);
+ path.init(pathStr).transitional_ignore();
path.setTraverseNonleafArrays(false);
path.setTraverseLeafArray(false);
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 4a0dccb1670..6ab6b1cb119 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -120,9 +120,10 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3
- << "members"
- << serversBob.arr()));
+ replSetConfig
+ .initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
+ << serversBob.arr()))
+ .transitional_ignore();
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
@@ -136,7 +137,9 @@ void ShardingMongodTestFixture::setUp() {
service,
stdx::make_unique<repl::ReplicationProcess>(
storagePtr.get(), stdx::make_unique<repl::ReplicationConsistencyMarkersMock>()));
- repl::ReplicationProcess::get(_opCtx.get())->initializeRollbackID(_opCtx.get());
+ repl::ReplicationProcess::get(_opCtx.get())
+ ->initializeRollbackID(_opCtx.get())
+ .transitional_ignore();
repl::StorageInterface::set(service, std::move(storagePtr));
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 1029febad24..a203d24d518 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -111,7 +111,7 @@ void ShardingTestFixture::setUp() {
auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
_transportLayer = tlMock.get();
service->setTransportLayer(std::move(tlMock));
- _transportLayer->start();
+ _transportLayer->start().transitional_ignore();
// Set the newly created service context to be the current global context so that tests,
// which invoke code still referencing getGlobalServiceContext will work properly.
@@ -148,7 +148,7 @@ void ShardingTestFixture::setUp() {
std::unique_ptr<ShardingCatalogClientImpl> catalogClient(
stdx::make_unique<ShardingCatalogClientImpl>(std::move(uniqueDistLockManager)));
_catalogClient = catalogClient.get();
- catalogClient->startup();
+ catalogClient->startup().transitional_ignore();
ConnectionString configCS = ConnectionString::forReplicaSet(
"configRS", {HostAndPort{"TestHost1"}, HostAndPort{"TestHost2"}});
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index 8db978218fa..8f668b34f56 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -70,13 +70,15 @@ void reportStatus(OperationContext* opCtx,
mType.setMongoVersion(VersionInfoInterface::instance().version().toString());
try {
- Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
- opCtx,
- MongosType::ConfigNS,
- BSON(MongosType::name(instanceId)),
- BSON("$set" << mType.toBSON()),
- true,
- ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->updateConfigDocument(opCtx,
+ MongosType::ConfigNS,
+ BSON(MongosType::name(instanceId)),
+ BSON("$set" << mType.toBSON()),
+ true,
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .status_with_transitional_ignore();
} catch (const std::exception& e) {
log() << "Caught exception while reporting uptime: " << e.what();
}