summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
authorADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:22:02 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:46:57 -0400
commit9abef6f25aadfd04309cb2219068097f93dc961d (patch)
treef88c7f183f201813f363d5d68c1a4a76781ca7ef /src/mongo/db/repl
parenta5f0a84c79b6ce41fef33da920c62be0ecc8f07b (diff)
downloadmongo-9abef6f25aadfd04309cb2219068097f93dc961d.tar.gz
SERVER-27244 Status usage compile-time facilities.
There are numerous places in the codebase where `mongo::Status` or `mongo::StatusWith< T >` objects are returned and never checked. Many of these are innocuous, but many of them are potentially severe bugs. This change introduces facilities to permit compile-time warning of unchecked `Status` and `StatusWith` usage on clang compilers. It introduces an `ignore` function which is useful to state that a specific "ignored status" case was intentional. It not presently an error, in clang builds, to forget to check a `Status` -- this will come in a later commit. This also introduces a `transitional_ignore` function, which allows for easy continual auditing of the codebase for current "whitelisted" unchecked-status instances. All present "ignored status" cases have been marked `transitional_ignore`.
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/bgsync.cpp4
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp6
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp24
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp11
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp14
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp20
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp13
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp26
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp59
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp40
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp74
-rw-r--r--src/mongo/db/repl/reporter.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp20
-rw-r--r--src/mongo/db/repl/scatter_gather_test.cpp16
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp32
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp4
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp271
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp328
30 files changed, 565 insertions, 449 deletions
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index f09dbc6319b..a73815cbb8f 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -329,7 +329,7 @@ void BackgroundSync::_produce(OperationContext* opCtx) {
log() << "Our newest OpTime : " << lastOpTimeFetched;
log() << "Earliest OpTime available is " << syncSourceResp.earliestOpTimeSeen
<< " from " << syncSourceResp.getSyncSource();
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
@@ -586,7 +586,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
StorageInterface* storageInterface) {
if (_replCoord->getMemberState().primary()) {
warning() << "Rollback situation detected in catch-up mode. Aborting catch-up mode.";
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 230a13f43ae..cdf5e7fda84 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -442,7 +442,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
rpc::ReplSetMetadata::kNoPrimary,
-1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 31803378b07..4fb7a7fe208 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -94,7 +94,9 @@ void CollectionClonerTest::setUp() {
const CollectionOptions& options,
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs) {
- (_loader = new CollectionBulkLoaderMock(&collectionStats))->init(secondaryIndexSpecs);
+ (_loader = new CollectionBulkLoaderMock(&collectionStats))
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(_loader));
@@ -352,7 +354,7 @@ TEST_F(CollectionClonerTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
collNss = theNss;
collOptions = theOptions;
collIndexSpecs = theIndexSpecs;
- loader->init(theIndexSpecs);
+ loader->init(theIndexSpecs).transitional_ignore();
return std::unique_ptr<CollectionBulkLoader>(loader);
};
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 364e737fba7..593090f9ae0 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -101,7 +101,8 @@ void DatabaseClonerTest::setUp() {
const std::vector<BSONObj>& secondaryIndexSpecs) {
const auto collInfo = &_collections[nss];
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index 36020cea9a8..5e7ceacae2b 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -176,7 +176,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index 8bafea199b9..4dcabbcdd09 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -132,7 +132,7 @@ TEST_F(DropPendingCollectionReaperTest,
opTime[i] = OpTime({Seconds((i + 1) * 10), 0}, 1LL);
ns[i] = NamespaceString("test", str::stream() << "coll" << i);
dpns[i] = ns[i].makeDropPendingNamespace(opTime[i]);
- _storageInterface->createCollection(opCtx.get(), dpns[i], {});
+ _storageInterface->createCollection(opCtx.get(), dpns[i], {}).transitional_ignore();
}
// Add drop-pending namespaces with drop optimes out of order and check that
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index d9872693f8e..a327208c172 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -236,7 +236,7 @@ public:
int selfConfigIndex = 0;
ReplSetConfig config;
- config.initialize(configObj);
+ config.initialize(configObj).transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index d9f0b9a8872..f0b86dbd0af 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -833,17 +833,19 @@ public:
Timestamp lastOpTimeApplied(100, 0);
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2"))));
+ config
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2"))))
+ .transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 6ef1ad277d2..54218230581 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -232,7 +232,7 @@ InitialSyncer::InitialSyncer(
InitialSyncer::~InitialSyncer() {
DESTRUCTOR_GUARD({
- shutdown();
+ shutdown().transitional_ignore();
join();
});
}
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 1a55a5d83dc..8dc0b62f53a 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -269,7 +269,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
@@ -903,7 +904,7 @@ TEST_F(InitialSyncerTest, InitialSyncerRecreatesOplogAndDropsReplicatedDatabases
auto oldCreateOplogFn = _storageInterface->createOplogFn;
_storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* opCtx,
const NamespaceString& nss) {
- oldCreateOplogFn(opCtx, nss);
+ oldCreateOplogFn(opCtx, nss).transitional_ignore();
return Status(ErrorCodes::OperationFailed, "oplog creation failed");
};
@@ -1856,7 +1857,7 @@ TEST_F(InitialSyncerTest,
net->blackHole(noi);
}
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
@@ -2086,7 +2087,7 @@ TEST_F(
OperationContext*, const NamespaceString& nss, const BSONObj& doc) {
insertDocumentNss = nss;
insertDocumentDoc = doc;
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
return Status::OK();
};
@@ -3130,7 +3131,7 @@ TEST_F(
const MultiApplier::Operations& ops,
MultiApplier::ApplyOperationFn applyOperation) {
// 'OperationPtr*' is ignored by our overridden _multiInitialSyncApply().
- applyOperation(nullptr);
+ applyOperation(nullptr).transitional_ignore();
return ops.back().getOpTime();
};
bool fetchCountIncremented = false;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index bf69d4e02bd..79e22f471c3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1267,7 +1267,7 @@ void SnapshotThread::run() {
name = replCoord->reserveSnapshotName(nullptr);
// This establishes the view that we will name.
- _manager->prepareForCreateSnapshot(opCtx.get());
+ _manager->prepareForCreateSnapshot(opCtx.get()).transitional_ignore();
}
auto opTimeOfSnapshot = OpTime();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 961926a1f35..c35fa20b046 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -859,7 +859,7 @@ void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* o
SnapshotName name) {
auto manager = _service->getGlobalStorageEngine()->getSnapshotManager();
invariant(manager); // This should never be called if there is no SnapshotManager.
- manager->createSnapshot(opCtx, name);
+ manager->createSnapshot(opCtx, name).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::forceSnapshotCreation() {
@@ -959,7 +959,7 @@ void ReplicationCoordinatorExternalStateImpl::onDurable(const JournalListener::T
void ReplicationCoordinatorExternalStateImpl::startNoopWriter(OpTime opTime) {
invariant(_noopWriter);
- _noopWriter->startWritingPeriodicNoops(opTime);
+ _noopWriter->startWritingPeriodicNoops(opTime).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::stopNoopWriter() {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index bb2c3e1f0da..2d3ba978e54 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -611,7 +611,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
// Clear maint. mode.
while (getMaintenanceMode()) {
- setMaintenanceMode(false);
+ setMaintenanceMode(false).transitional_ignore();
}
if (startCompleted) {
@@ -2208,14 +2208,16 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
- this,
- stdx::placeholders::_1,
- newConfig,
- isForceReconfig,
- myIndex,
- finishedEvent));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ isForceReconfig,
+ myIndex,
+ finishedEvent))
+ .status_with_transitional_ignore();
return;
}
@@ -3028,12 +3030,12 @@ void ReplicationCoordinatorImpl::_prepareReplSetMetadata_inlock(const OpTime& la
OpTime lastVisibleOpTime =
std::max(lastOpTimeFromClient, _getCurrentCommittedSnapshotOpTime_inlock());
auto metadata = _topCoord->prepareReplSetMetadata(lastVisibleOpTime);
- metadata.writeToMetadata(builder);
+ metadata.writeToMetadata(builder).transitional_ignore();
}
void ReplicationCoordinatorImpl::_prepareOplogQueryMetadata_inlock(int rbid,
BSONObjBuilder* builder) const {
- _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder);
+ _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder).transitional_ignore();
}
bool ReplicationCoordinatorImpl::isV1ElectionProtocol() const {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index 322c84246e6..91b1c5dfc15 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -142,9 +142,10 @@ void ReplicationCoordinatorImpl::_startElectSelf_inlock() {
return;
}
fassert(18681, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -217,9 +218,10 @@ void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() {
}
fassert(18685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 5462732a99e..f09de3c8b80 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -391,15 +391,17 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index d227f78b76b..394878e52ec 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -149,8 +149,10 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock() {
return;
}
fassert(28685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -244,9 +246,10 @@ void ReplicationCoordinatorImpl::_startVoteRequester_inlock(long long newTerm) {
return;
}
fassert(28643, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long originalTerm) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index a1c34534c3f..6d0ade99358 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -470,17 +470,19 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "protocolVersion"
- << 1));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "protocolVersion"
+ << 1))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -759,7 +761,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
simulateEnoughHeartbeatsForAllNodesUp();
simulateSuccessfulDryRun();
// update to a future term before the election completes
- getReplCoord()->updateTerm(&opCtx, 1000);
+ getReplCoord()->updateTerm(&opCtx, 1000).transitional_ignore();
NetworkInterfaceMock* net = getNet();
net->enterNetwork();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 04216de9202..379a18fb231 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -337,8 +337,12 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
return finishEvent;
}
- _replExecutor->scheduleWork(stdx::bind(
- &ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1, finishEvent));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
+ this,
+ stdx::placeholders::_1,
+ finishEvent))
+ .status_with_transitional_ignore();
return finishEvent;
}
@@ -398,17 +402,21 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
<< newConfig.getConfigVersion()
<< " to be processed after election is cancelled.";
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
return;
}
- _replExecutor->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
@@ -490,13 +498,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
if (MONGO_FAIL_POINT(blockHeartbeatReconfigFinish)) {
LOG_FOR_HEARTBEATS(0) << "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
"_heartbeatReconfigFinish until fail point is disabled.";
- _replExecutor->scheduleWorkAt(
- _replExecutor->now() + Milliseconds{10},
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
@@ -522,12 +531,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
<< "Waiting for election to complete before finishing reconfig to version "
<< newConfig.getConfigVersion();
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 59eee099c93..9a00ab76f2e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -379,7 +379,7 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
rpc::ReplSetMetadata metadata(
opTime.getTerm(), opTime, opTime, rsConfig.getConfigVersion(), unexpectedId, 1, -1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
heartbeatResponse = makeResponseStatus(responseBuilder.obj(), metadataBuilder.obj());
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 761359552ed..e6fecc98450 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -520,15 +520,17 @@ TEST_F(
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -591,15 +593,17 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 4
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(4);
hbResp.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 2fd396c1218..896cea7bd5c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -979,10 +979,10 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// Majority satisfied but not either custom mode
- getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 1, time1);
- getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 2, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 2, time1).transitional_ignore();
getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
@@ -993,8 +993,8 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// All modes satisfied
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time1).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
@@ -1039,8 +1039,8 @@ TEST_F(
// multiDC satisfied but not majority or multiRack
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time2);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time2).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
@@ -2333,7 +2333,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
ASSERT_OK(getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Seconds(1)));
status = getReplCoord()->setMaintenanceMode(false);
@@ -2369,8 +2369,10 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
- getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000));
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
+ getReplCoord()
+ ->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000))
+ .transitional_ignore();
// Can't modify maintenance mode when running for election (before and after dry run).
ASSERT_EQUALS(TopologyCoordinator::Role::follower, getTopoCoord().getRole());
@@ -3863,7 +3865,7 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
OpTime time(Timestamp(10, 0), 1);
@@ -3905,7 +3907,7 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
// higher term, should change
@@ -3984,7 +3986,7 @@ TEST_F(ReplCoordTest,
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4110,7 +4112,7 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4330,17 +4332,19 @@ TEST_F(ReplCoordTest,
// Respond to node1's heartbeat command with a config that excludes node1.
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node2:12345"
- << "_id"
- << 1))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
@@ -4735,9 +4739,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(2U, memberIds.size());
@@ -4751,7 +4757,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds2.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(2U, memberIds2.size());
@@ -4779,9 +4786,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds3.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(1U, memberIds3.size());
@@ -4795,7 +4804,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds4.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(1U, memberIds4.size());
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 64b7dd27e12..b22c180bb7c 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -110,7 +110,7 @@ Reporter::Reporter(executor::TaskExecutor* executor,
}
Reporter::~Reporter() {
- DESTRUCTOR_GUARD(shutdown(); join(););
+ DESTRUCTOR_GUARD(shutdown(); join().transitional_ignore(););
}
std::string Reporter::toString() const {
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index cd77a41ba95..d1e794e015d 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -82,7 +82,7 @@ void RollbackTest::setUp() {
_opCtx = cc().makeOperationContext();
_replicationProcess->getConsistencyMarkers()->setAppliedThrough(_opCtx.get(), OpTime{});
_replicationProcess->getConsistencyMarkers()->setMinValid(_opCtx.get(), OpTime{});
- _replicationProcess->initializeRollbackID(_opCtx.get());
+ _replicationProcess->initializeRollbackID(_opCtx.get()).transitional_ignore();
_threadPoolExecutorTest.launchExecutorThread();
}
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 6e3615ceee0..048aa72ab6d 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -188,7 +188,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
{},
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::UnknownError);
}
@@ -211,7 +212,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
1,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::Error(40362));
}
@@ -243,7 +245,7 @@ Collection* _createCollection(OperationContext* opCtx,
mongo::WriteUnitOfWork wuow(opCtx);
auto db = dbHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
- db->dropCollection(opCtx, nss.ns());
+ db->dropCollection(opCtx, nss.ns()).transitional_ignore();
auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
@@ -899,7 +901,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci
rollbackSource,
0,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
DBException,
40365);
ASSERT(rollbackSource.copyCollectionCalled);
@@ -1160,7 +1163,8 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1168,7 +1172,8 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1177,7 +1182,8 @@ TEST(RSRollbackTest, LocalEntryWithoutO2IsFatal) {
fromjson("{op: 'u', ns: 'test.t', o2: {_id: 1}, o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2"))
+ .transitional_ignore(),
RSFatalException);
}
diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp
index 294b2c84ed7..3f4d8d4d5cd 100644
--- a/src/mongo/db/repl/scatter_gather_test.cpp
+++ b/src/mongo/db/repl/scatter_gather_test.cpp
@@ -157,7 +157,9 @@ TEST_F(ScatterGatherTest, DeleteAlgorithmAfterItHasCompleted) {
ScatterGatherRunner* sgr = new ScatterGatherRunner(sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr->start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -245,7 +247,9 @@ TEST_F(ScatterGatherTest, ShutdownExecutorAfterStart) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
shutdownExecutorThread();
sga.finish();
ASSERT_FALSE(ranCompletion);
@@ -258,7 +262,9 @@ TEST_F(ScatterGatherTest, DoNotProcessMoreThanSufficientResponses) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -300,7 +306,9 @@ TEST_F(ScatterGatherTest, DoNotCreateCallbacksIfHasSufficientResponsesReturnsTru
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
// Wait until callback finishes.
NetworkInterfaceMock* net = getNet();
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index c01a1badff2..57eb9d4d262 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -133,7 +133,7 @@ int64_t getIndexKeyCount(OperationContext* opCtx, IndexCatalog* cat, IndexDescri
auto idx = cat->getIndex(desc);
int64_t numKeys;
ValidateResults fullRes;
- idx->validate(opCtx, &numKeys, &fullRes);
+ idx->validate(opCtx, &numKeys, &fullRes).transitional_ignore();
return numKeys;
}
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 1218ffa2cc2..77579cc24ad 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -256,12 +256,13 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
_finishCallback(Status(ErrorCodes::CallbackCanceled,
str::stream()
<< "sync source resolver shut down while probing candidate: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -272,14 +273,14 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
<< "' for " << kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
const auto& queryResponse = queryResult.getValue();
const auto remoteEarliestOpTime = _parseRemoteEarliestOpTime(candidate, queryResponse);
if (remoteEarliestOpTime.isNull()) {
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -306,7 +307,7 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
earliestOpTimeSeen = remoteEarliestOpTime;
}
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -323,7 +324,7 @@ void SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime earl
stdx::placeholders::_1));
if (!handle.isOK()) {
- _finishCallback(handle.getStatus());
+ _finishCallback(handle.getStatus()).transitional_ignore();
return;
}
@@ -339,7 +340,7 @@ void SyncSourceResolver::_rbidRequestCallback(
OpTime earliestOpTimeSeen,
const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
if (rbidReply.response.status == ErrorCodes::CallbackCanceled) {
- _finishCallback(rbidReply.response.status);
+ _finishCallback(rbidReply.response.status).transitional_ignore();
return;
}
@@ -352,7 +353,7 @@ void SyncSourceResolver::_rbidRequestCallback(
log() << "Blacklisting " << candidate << " due to error: '" << ex << "' for "
<< kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -361,11 +362,11 @@ void SyncSourceResolver::_rbidRequestCallback(
// Unittest requires that this kind of failure be handled specially.
auto status = _scheduleFetcher(_makeRequiredOpTimeFetcher(candidate, earliestOpTimeSeen));
if (!status.isOK()) {
- _finishCallback(status);
+ _finishCallback(status).transitional_ignore();
}
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
@@ -405,12 +406,13 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
"required optime "
<< _requiredOpTime.toString()
<< " in candidate's oplog: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -422,7 +424,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until << ". required optime: " << _requiredOpTime;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -439,11 +441,11 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_chooseAndProbeNextSyncSource(OpTime earliestOpTimeSeen) {
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 7492bb4bb40..9dbed691425 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -1967,7 +1967,9 @@ StatusWith<BSONObj> TopologyCoordinatorImpl::prepareReplSetUpdatePositionCommand
// Add metadata to command. Old style parsing logic will reject the metadata.
if (commandStyle == ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kNewStyle) {
- prepareReplSetMetadata(currentCommittedSnapshotOpTime).writeToMetadata(&cmdBuilder);
+ prepareReplSetMetadata(currentCommittedSnapshotOpTime)
+ .writeToMetadata(&cmdBuilder)
+ .transitional_ignore();
}
return cmdBuilder.obj();
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index b5af086b797..ec66ca1eb5f 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -2585,19 +2585,21 @@ TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();
@@ -4757,7 +4759,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4791,7 +4794,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -5081,7 +5085,8 @@ TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5124,7 +5129,8 @@ TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepso
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5561,7 +5567,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5569,17 +5576,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -5617,7 +5625,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5626,19 +5635,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5674,7 +5684,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5683,19 +5694,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5730,7 +5742,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5764,7 +5777,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5810,7 +5824,8 @@ TEST_F(TopoCoordTest, ArbiterDoesNotGrantVoteWhenItCanSeeAHealthyPrimaryOfEqualO
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5849,7 +5864,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5885,7 +5901,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -5918,17 +5935,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -5949,7 +5967,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5977,17 +5996,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6008,7 +6028,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6036,17 +6057,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6066,7 +6088,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6094,17 +6117,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6125,7 +6149,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6153,17 +6178,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6184,7 +6210,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 80fc9c3bf52..f4e43bf3e0b 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -2044,7 +2044,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2078,7 +2079,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2442,7 +2444,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2450,17 +2453,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -2498,7 +2502,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2507,19 +2512,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2528,19 +2534,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// real request fine
ReplSetRequestVotesArgs args3;
- args3.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args3
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response3;
getTopoCoord().processReplSetRequestVotes(args3, &response3);
@@ -2549,19 +2556,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// dry post real, fails
ReplSetRequestVotesArgs args4;
- args4.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args4
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response4;
getTopoCoord().processReplSetRequestVotes(args4, &response4);
@@ -2598,7 +2606,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2607,19 +2616,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2654,7 +2664,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2688,7 +2699,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2726,7 +2738,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2762,7 +2775,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -2795,17 +2809,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2826,7 +2841,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2854,17 +2870,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2885,7 +2902,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2913,17 +2931,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2943,7 +2962,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2971,17 +2991,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3002,7 +3023,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -3030,17 +3052,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3061,7 +3084,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -3552,7 +3576,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3598,7 +3623,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3900,7 +3926,7 @@ TEST_F(HeartbeatResponseTestV1,
// Freeze node to set stepdown wait.
BSONObjBuilder response;
- getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
+ getTopoCoord().prepareFreezeResponse(now()++, 20, &response).status_with_transitional_ignore();
nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0");
ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
@@ -4659,21 +4685,23 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();