summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp50
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.h13
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp14
-rw-r--r--src/mongo/db/repl/optime.cpp1
-rw-r--r--src/mongo/db/repl/optime.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp11
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp9
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp24
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp4
12 files changed, 92 insertions, 51 deletions
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index b3652192e40..83489872c63 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -36,6 +36,7 @@
#include "mongo/base/status.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/scatter_gather_algorithm.h"
#include "mongo/db/repl/scatter_gather_runner.h"
@@ -48,9 +49,10 @@ namespace repl {
using executor::RemoteCommandRequest;
-QuorumChecker::QuorumChecker(const ReplSetConfig* rsConfig, int myIndex)
+QuorumChecker::QuorumChecker(const ReplSetConfig* rsConfig, int myIndex, long long term)
: _rsConfig(rsConfig),
_myIndex(myIndex),
+ _term(term),
_numResponses(1), // We "responded" to ourself already.
_numElectable(0),
_vetoStatus(Status::OK()),
@@ -81,14 +83,29 @@ std::vector<RemoteCommandRequest> QuorumChecker::getRequests() const {
return requests;
}
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName(_rsConfig->getReplSetName());
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
- hbArgs.setCheckEmpty(isInitialConfig);
- hbArgs.setSenderHost(myConfig.getHostAndPort());
- hbArgs.setSenderId(myConfig.getId());
- const BSONObj hbRequest = hbArgs.toBSON();
+ BSONObj hbRequest;
+ if (_term == OpTime::kUninitializedTerm) {
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName(_rsConfig->getReplSetName());
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
+ hbArgs.setCheckEmpty(isInitialConfig);
+ hbArgs.setSenderHost(myConfig.getHostAndPort());
+ hbArgs.setSenderId(myConfig.getId());
+ hbRequest = hbArgs.toBSON();
+
+ } else {
+ ReplSetHeartbeatArgsV1 hbArgs;
+ hbArgs.setSetName(_rsConfig->getReplSetName());
+ hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
+ if (isInitialConfig) {
+ hbArgs.setCheckEmpty();
+ }
+ hbArgs.setSenderHost(myConfig.getHostAndPort());
+ hbArgs.setSenderId(myConfig.getId());
+ hbArgs.setTerm(_term);
+ hbRequest = hbArgs.toBSON();
+ }
// Send a bunch of heartbeat requests.
// Schedule an operation when a "sufficient" number of them have completed, and use that
@@ -281,8 +298,9 @@ bool QuorumChecker::hasReceivedSufficientResponses() const {
Status checkQuorumGeneral(executor::TaskExecutor* executor,
const ReplSetConfig& rsConfig,
- const int myIndex) {
- auto checker = std::make_shared<QuorumChecker>(&rsConfig, myIndex);
+ const int myIndex,
+ long long term) {
+ auto checker = std::make_shared<QuorumChecker>(&rsConfig, myIndex, term);
ScatterGatherRunner runner(checker, executor);
Status status = runner.run();
if (!status.isOK()) {
@@ -294,16 +312,18 @@ Status checkQuorumGeneral(executor::TaskExecutor* executor,
Status checkQuorumForInitiate(executor::TaskExecutor* executor,
const ReplSetConfig& rsConfig,
- const int myIndex) {
+ const int myIndex,
+ long long term) {
invariant(rsConfig.getConfigVersion() == 1);
- return checkQuorumGeneral(executor, rsConfig, myIndex);
+ return checkQuorumGeneral(executor, rsConfig, myIndex, term);
}
Status checkQuorumForReconfig(executor::TaskExecutor* executor,
const ReplSetConfig& rsConfig,
- const int myIndex) {
+ const int myIndex,
+ long long term) {
invariant(rsConfig.getConfigVersion() > 1);
- return checkQuorumGeneral(executor, rsConfig, myIndex);
+ return checkQuorumGeneral(executor, rsConfig, myIndex, term);
}
} // namespace repl
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.h b/src/mongo/db/repl/check_quorum_for_config_change.h
index 2bc9a289464..60918c76587 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.h
+++ b/src/mongo/db/repl/check_quorum_for_config_change.h
@@ -57,7 +57,7 @@ public:
*
* "rsConfig" must stay in scope until QuorumChecker's destructor completes.
*/
- QuorumChecker(const ReplSetConfig* rsConfig, int myIndex);
+ QuorumChecker(const ReplSetConfig* rsConfig, int myIndex, long long term);
virtual ~QuorumChecker();
virtual std::vector<executor::RemoteCommandRequest> getRequests() const;
@@ -91,6 +91,9 @@ private:
// Index of the local node's member configuration in _rsConfig.
const int _myIndex;
+ // The term of this node.
+ const long long _term;
+
// List of voting nodes that have responded affirmatively.
std::vector<HostAndPort> _voters;
@@ -118,6 +121,7 @@ private:
*
* "myIndex" is the index of this node's member configuration in "rsConfig".
* "executor" is the event loop in which to schedule network/aysnchronous processing.
+ * "term" is the term of this node.
*
* For purposes of initiate, a quorum is only met if all of the following conditions
* are met:
@@ -128,7 +132,8 @@ private:
*/
Status checkQuorumForInitiate(executor::TaskExecutor* executor,
const ReplSetConfig& rsConfig,
- const int myIndex);
+ const int myIndex,
+ long long term);
/**
* Performs a quorum call to determine if a sufficient number of nodes are up
@@ -136,6 +141,7 @@ Status checkQuorumForInitiate(executor::TaskExecutor* executor,
*
* "myIndex" is the index of this node's member configuration in "rsConfig".
* "executor" is the event loop in which to schedule network/aysnchronous processing.
+ * "term" is the term of this node.
*
* For purposes of reconfig, a quorum is only met if all of the following conditions
* are met:
@@ -146,7 +152,8 @@ Status checkQuorumForInitiate(executor::TaskExecutor* executor,
*/
Status checkQuorumForReconfig(executor::TaskExecutor* executor,
const ReplSetConfig& rsConfig,
- const int myIndex);
+ const int myIndex,
+ long long term);
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index cdf5e7fda84..696e66764e3 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/check_quorum_for_config_change.h"
#include "mongo/db/repl/repl_set_config.h"
-#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
@@ -120,14 +120,14 @@ void CheckQuorumTest::_runQuorumCheck(const ReplSetConfig& config, int myIndex)
class CheckQuorumForInitiate : public CheckQuorumTest {
private:
virtual Status _runQuorumCheckImpl(const ReplSetConfig& config, int myIndex) {
- return checkQuorumForInitiate(&getExecutor(), config, myIndex);
+ return checkQuorumForInitiate(&getExecutor(), config, myIndex, 0);
}
};
class CheckQuorumForReconfig : public CheckQuorumTest {
protected:
virtual Status _runQuorumCheckImpl(const ReplSetConfig& config, int myIndex) {
- return checkQuorumForReconfig(&getExecutor(), config, myIndex);
+ return checkQuorumForReconfig(&getExecutor(), config, myIndex, 0);
}
};
@@ -207,13 +207,15 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
const BSONObj makeHeartbeatRequest(const ReplSetConfig& rsConfig, int myConfigIndex) {
const MemberConfig& myConfig = rsConfig.getMemberAt(myConfigIndex);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
hbArgs.setSetName(rsConfig.getReplSetName());
- hbArgs.setProtocolVersion(1);
hbArgs.setConfigVersion(rsConfig.getConfigVersion());
- hbArgs.setCheckEmpty(rsConfig.getConfigVersion() == 1);
+ if (rsConfig.getConfigVersion() == 1) {
+ hbArgs.setCheckEmpty();
+ }
hbArgs.setSenderHost(myConfig.getHostAndPort());
hbArgs.setSenderId(myConfig.getId());
+ hbArgs.setTerm(0);
return hbArgs.toBSON();
}
diff --git a/src/mongo/db/repl/optime.cpp b/src/mongo/db/repl/optime.cpp
index d5d0c316ece..9c63e0a7e88 100644
--- a/src/mongo/db/repl/optime.cpp
+++ b/src/mongo/db/repl/optime.cpp
@@ -41,6 +41,7 @@ namespace repl {
const char OpTime::kTimestampFieldName[] = "ts";
const char OpTime::kTermFieldName[] = "t";
+const long long OpTime::kInitialTerm = 0;
// static
OpTime OpTime::max() {
diff --git a/src/mongo/db/repl/optime.h b/src/mongo/db/repl/optime.h
index a1706b33b56..05ee64df110 100644
--- a/src/mongo/db/repl/optime.h
+++ b/src/mongo/db/repl/optime.h
@@ -60,7 +60,7 @@ public:
//
// This is also the initial term for nodes that were recently started up but have not
// yet joined the cluster, all in protocol version 1.
- static const long long kInitialTerm = 0;
+ static const long long kInitialTerm;
/**
* Returns maximum OpTime value.
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 108b844a03d..5d8fee72e3e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2252,7 +2252,8 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
<< " members parses ok";
if (!args.force) {
- status = checkQuorumForReconfig(_replExecutor.get(), newConfig, myIndex.getValue());
+ status = checkQuorumForReconfig(
+ _replExecutor.get(), newConfig, myIndex.getValue(), _topCoord->getTerm());
if (!status.isOK()) {
error() << "replSetReconfig failed; " << status;
return status;
@@ -2385,7 +2386,13 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
log() << "replSetInitiate config object with " << newConfig.getNumMembers()
<< " members parses ok";
- status = checkQuorumForInitiate(_replExecutor.get(), newConfig, myIndex.getValue());
+ // In pv1, the TopologyCoordinator has not set the term yet. It will be set to kInitialTerm if
+ // the initiate succeeds so we pass that here.
+ status = checkQuorumForInitiate(
+ _replExecutor.get(),
+ newConfig,
+ myIndex.getValue(),
+ newConfig.getProtocolVersion() == 1 ? OpTime::kInitialTerm : OpTime::kUninitializedTerm);
if (!status.isOK()) {
error() << "replSetInitiate failed; " << status;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 9eb62a9eea3..127050775a5 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -1334,7 +1334,7 @@ private:
int _earliestMemberId = -1; // (M)
// Cached copy of the current config protocol version.
- AtomicInt64 _protVersion; // (S)
+ AtomicInt64 _protVersion{1}; // (S)
// Source of random numbers used in setting election timeouts, etc.
PseudoRandom _random; // (M)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 459ac62c700..c83657a154e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
@@ -114,7 +115,7 @@ TEST_F(ReplCoordHBTest, NodeJoinsExistingReplSetWhenReceivingAConfigContainingTh
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
ASSERT_EQUALS("mySet", hbArgs.getSetName());
ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
@@ -180,7 +181,7 @@ TEST_F(ReplCoordHBTest,
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
ASSERT_EQUALS("mySet", hbArgs.getSetName());
ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index d8d040c8c26..6e77d46c084 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -114,10 +114,11 @@ TEST_F(ReplCoordHBV1Test,
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
ASSERT_EQUALS("mySet", hbArgs.getSetName());
ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ASSERT_EQUALS(OpTime::kInitialTerm, hbArgs.getTerm());
ReplSetHeartbeatResponse hbResp;
hbResp.setSetName("mySet");
hbResp.setState(MemberState::RS_PRIMARY);
@@ -183,10 +184,11 @@ TEST_F(ReplCoordHBV1Test,
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
ASSERT_EQUALS("mySet", hbArgs.getSetName());
ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ASSERT_EQUALS(OpTime::kInitialTerm, hbArgs.getTerm());
ReplSetHeartbeatResponse hbResp;
hbResp.setSetName("mySet");
hbResp.setState(MemberState::RS_PRIMARY);
@@ -252,10 +254,11 @@ TEST_F(ReplCoordHBV1Test,
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
ASSERT_EQUALS("mySet", hbArgs.getSetName());
ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ASSERT_EQUALS(OpTime::kInitialTerm, hbArgs.getTerm());
ReplSetHeartbeatResponse hbResp;
hbResp.setSetName("mySet");
hbResp.setState(MemberState::RS_PRIMARY);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index b042cd5b0ca..417381a4c18 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -33,7 +33,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/repl_set_config.h"
-#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
@@ -316,7 +316,7 @@ TEST_F(ReplCoordTest,
getNet()->enterNetwork();
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
+ repl::ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
repl::ReplSetHeartbeatResponse hbResp;
hbResp.setSetName("mySet");
@@ -356,7 +356,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
stdx::thread reconfigThread(
stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
- replyToReceivedHeartbeat();
+ replyToReceivedHeartbeatV1();
reconfigThread.join();
ASSERT_EQUALS(ErrorCodes::OutOfDiskSpace, status);
}
@@ -474,7 +474,7 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
getNet()->enterNetwork();
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
+ repl::ReplSetHeartbeatArgsV1 hbArgs;
ASSERT_OK(hbArgs.initialize(request.cmdObj));
repl::ReplSetHeartbeatResponse hbResp;
hbResp.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 2f29f836a10..f0887d1078c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -343,13 +343,13 @@ TEST_F(ReplCoordTest, NodeReturnsNodeNotFoundWhenQuorumCheckFailsWhileInitiating
start(HostAndPort("node1", 12345));
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
hbArgs.setSetName("mySet");
- hbArgs.setProtocolVersion(1);
hbArgs.setConfigVersion(1);
- hbArgs.setCheckEmpty(true);
+ hbArgs.setCheckEmpty();
hbArgs.setSenderHost(HostAndPort("node1", 12345));
hbArgs.setSenderId(0);
+ hbArgs.setTerm(0);
Status status(ErrorCodes::InternalError, "Not set");
stdx::thread prsiThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
@@ -375,13 +375,13 @@ TEST_F(ReplCoordTest, InitiateSucceedsWhenQuorumCheckPasses) {
start(HostAndPort("node1", 12345));
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
hbArgs.setSetName("mySet");
- hbArgs.setProtocolVersion(1);
hbArgs.setConfigVersion(1);
- hbArgs.setCheckEmpty(true);
+ hbArgs.setCheckEmpty();
hbArgs.setSenderHost(HostAndPort("node1", 12345));
hbArgs.setSenderId(0);
+ hbArgs.setTerm(0);
auto appliedTS = Timestamp(3, 3);
getReplCoord()->setMyLastAppliedOpTime(OpTime(appliedTS, 1));
@@ -3368,7 +3368,7 @@ TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
Status status(ErrorCodes::InternalError, "Not Set");
stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
- replyToReceivedHeartbeat();
+ replyToReceivedHeartbeatV1();
reconfigThread.join();
ASSERT_OK(status);
@@ -3458,7 +3458,7 @@ TEST_F(
Status status(ErrorCodes::InternalError, "Not Set");
stdx::thread reconfigThread(stdx::bind(doReplSetReconfigToFewer, getReplCoord(), &status));
- replyToReceivedHeartbeat();
+ replyToReceivedHeartbeatV1();
reconfigThread.join();
ASSERT_OK(status);
@@ -3544,7 +3544,7 @@ TEST_F(ReplCoordTest,
Status status(ErrorCodes::InternalError, "Not Set");
stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
- replyToReceivedHeartbeat();
+ replyToReceivedHeartbeatV1();
reconfigThread.join();
ASSERT_OK(status);
@@ -5416,14 +5416,14 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
<< startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0));
- ReplSetHeartbeatArgs hbArgs;
+ ReplSetHeartbeatArgsV1 hbArgs;
hbArgs.setSetName("mySet");
- hbArgs.setProtocolVersion(1);
hbArgs.setConfigVersion(2);
hbArgs.setSenderId(1);
hbArgs.setSenderHost(HostAndPort("node2", 12345));
+ hbArgs.setTerm(0);
ReplSetHeartbeatResponse hbResp;
- ASSERT_OK(getReplCoord()->processHeartbeat(hbArgs, &hbResp));
+ ASSERT_OK(getReplCoord()->processHeartbeatV1(hbArgs, &hbResp));
// Confirm that the node relinquishes PRIMARY after only one node is left UP.
const Date_t startDate1 = getNet()->now();
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 5eb29adfc98..421b5d8b4ca 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -1758,8 +1758,8 @@ TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter)
0);
HostAndPort target("host2", 27017);
Date_t requestDate = now();
- std::pair<ReplSetHeartbeatArgs, Milliseconds> uppingRequest =
- getTopoCoord().prepareHeartbeatRequest(requestDate, "myset", target);
+ std::pair<ReplSetHeartbeatArgsV1, Milliseconds> uppingRequest =
+ getTopoCoord().prepareHeartbeatRequestV1(requestDate, "myset", target);
auto action = getTopoCoord().processHeartbeatResponse(
requestDate, Milliseconds(0), target, makeStatusWith<ReplSetHeartbeatResponse>());
Date_t expected(now() + Milliseconds(2500));