summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/commands/get_last_error.cpp6
-rw-r--r--src/mongo/db/read_concern.cpp18
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_impl.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp3
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp7
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp13
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.h6
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp42
-rw-r--r--src/mongo/db/repl/replication_coordinator.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.h7
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp21
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp63
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp20
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp25
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp6
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp5
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.cpp4
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.h2
29 files changed, 91 insertions, 212 deletions
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index abeb8ed2220..e9ca375d880 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -151,11 +151,7 @@ public:
if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet) {
const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
if (!lastOp.isNull()) {
- if (replCoord->isV1ElectionProtocol()) {
- lastOp.append(&result, "lastOp");
- } else {
- result.append("lastOp", lastOp.getTimestamp());
- }
+ lastOp.append(&result, "lastOp");
}
}
diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp
index b8215af1610..fef22f8ab1f 100644
--- a/src/mongo/db/read_concern.cpp
+++ b/src/mongo/db/read_concern.cpp
@@ -231,14 +231,6 @@ Status waitForReadConcern(OperationContext* opCtx,
"node needs to be a replica set member to use read concern"};
}
- // Replica sets running pv0 do not support linearizable read concern until further testing
- // is completed (SERVER-27025).
- if (!replCoord->isV1ElectionProtocol()) {
- return {
- ErrorCodes::IncompatibleElectionProtocol,
- "Replica sets running protocol version 0 do not support readConcern: linearizable"};
- }
-
if (readConcernArgs.getArgsOpTime()) {
return {ErrorCodes::FailedToParse,
"afterOpTime not compatible with linearizable read concern"};
@@ -303,11 +295,6 @@ Status waitForReadConcern(OperationContext* opCtx,
return {ErrorCodes::NotAReplicaSet,
"node needs to be a replica set member to use readConcern: snapshot"};
}
-
- if (!replCoord->isV1ElectionProtocol()) {
- return {ErrorCodes::IncompatibleElectionProtocol,
- "Replica sets running protocol version 0 do not support readConcern: snapshot"};
- }
if (speculative) {
session->setSpeculativeTransactionOpTimeToLastApplied(opCtx);
}
@@ -325,11 +312,6 @@ Status waitForReadConcern(OperationContext* opCtx,
readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern) &&
!speculative &&
replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet) {
- if (!replCoord->isV1ElectionProtocol()) {
- return {ErrorCodes::IncompatibleElectionProtocol,
- str::stream() << "Replica sets running protocol version 0 do not support "
- "majority committed reads"};
- }
const int debugLevel = serverGlobalParams.clusterRole == ClusterRole::ConfigServer ? 1 : 2;
diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.cpp b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
index 25d717f1a2b..8c94f9e3b06 100644
--- a/src/mongo/db/repl/data_replicator_external_state_impl.cpp
+++ b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
@@ -82,9 +82,6 @@ executor::TaskExecutor* DataReplicatorExternalStateImpl::getTaskExecutor() const
}
OpTimeWithTerm DataReplicatorExternalStateImpl::getCurrentTermAndLastCommittedOpTime() {
- if (!_replicationCoordinator->isV1ElectionProtocol()) {
- return {OpTime::kUninitializedTerm, OpTime()};
- }
return {_replicationCoordinator->getTerm(), _replicationCoordinator->getLastCommittedOpTime()};
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 42f471bedd4..061285a3227 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -152,8 +152,7 @@ void _getNextOpTimes(OperationContext* opCtx,
long long term = OpTime::kUninitializedTerm;
// Fetch term out of the newOpMutex.
- if (replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet &&
- replCoord->isV1ElectionProtocol()) {
+ if (replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet) {
// Current term. If we're not a replset of pv=1, it remains kOldProtocolVersionTerm.
term = replCoord->getTerm();
}
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 28af2d5a834..6545bace758 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -102,10 +102,7 @@ BSONObj makeGetMoreCommandObject(const NamespaceString& nss,
/**
* Returns command metadata object suitable for tailing remote oplog.
*/
-BSONObj makeMetadataObject(bool isV1ElectionProtocol) {
- if (!isV1ElectionProtocol)
- return ReadPreferenceSetting::secondaryPreferredMetadata();
-
+BSONObj makeMetadataObject() {
BSONObjBuilder metaBuilder;
metaBuilder << rpc::kReplSetMetadataFieldName << 1;
metaBuilder << rpc::kOplogQueryMetadataFieldName << 1;
@@ -332,7 +329,7 @@ OplogFetcher::OplogFetcher(executor::TaskExecutor* executor,
maxFetcherRestarts,
onShutdownCallbackFn,
"oplog fetcher"),
- _metadataObject(makeMetadataObject(config.getProtocolVersion() == 1LL)),
+ _metadataObject(makeMetadataObject()),
_requiredRBID(requiredRBID),
_requireFresherSyncSource(requireFresherSyncSource),
_dataReplicatorExternalState(dataReplicatorExternalState),
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index da433b4a7e1..b01d30f8d4e 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -670,7 +670,7 @@ public:
ReplSetHeartbeatResponse response;
status = ReplicationCoordinator::get(opCtx)->processHeartbeatV1(args, &response);
if (status.isOK())
- response.addToBSON(&result, true);
+ response.addToBSON(&result);
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from " << cmdObj.getStringField("from")
<< " and generated response, " << response;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index fa08f5d2aa0..040d43e0083 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -63,7 +63,7 @@ const std::string kTimestampFieldName = "ts";
} // namespace
-void ReplSetHeartbeatResponse::addToBSON(BSONObjBuilder* builder, bool isProtocolVersionV1) const {
+void ReplSetHeartbeatResponse::addToBSON(BSONObjBuilder* builder) const {
builder->append(kOkFieldName, 1.0);
if (_electionTimeSet) {
builder->appendDate(kElectionTimeFieldName,
@@ -95,18 +95,13 @@ void ReplSetHeartbeatResponse::addToBSON(BSONObjBuilder* builder, bool isProtoco
_durableOpTime.append(builder, kDurableOpTimeFieldName);
}
if (_appliedOpTimeSet) {
- if (isProtocolVersionV1) {
- _appliedOpTime.append(builder, kAppliedOpTimeFieldName);
- } else {
- builder->appendDate(kAppliedOpTimeFieldName,
- Date_t::fromMillisSinceEpoch(_appliedOpTime.getTimestamp().asLL()));
- }
+ _appliedOpTime.append(builder, kAppliedOpTimeFieldName);
}
}
-BSONObj ReplSetHeartbeatResponse::toBSON(bool isProtocolVersionV1) const {
+BSONObj ReplSetHeartbeatResponse::toBSON() const {
BSONObjBuilder builder;
- addToBSON(&builder, isProtocolVersionV1);
+ addToBSON(&builder);
return builder.obj();
}
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.h b/src/mongo/db/repl/repl_set_heartbeat_response.h
index 5fe978d0d15..102a64ecf1a 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.h
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.h
@@ -57,18 +57,18 @@ public:
/**
* Appends all non-default values to "builder".
*/
- void addToBSON(BSONObjBuilder* builder, bool isProtocolVersionV1) const;
+ void addToBSON(BSONObjBuilder* builder) const;
/**
* Returns a BSONObj consisting of all non-default values to "builder".
*/
- BSONObj toBSON(bool isProtocolVersionV1) const;
+ BSONObj toBSON() const;
/**
* Returns toBSON().toString()
*/
const std::string toString() const {
- return toBSON(true).toString();
+ return toBSON().toString();
}
const std::string& getReplicaSetName() const {
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index 943da9d0246..7be7e94e667 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -57,7 +57,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
ASSERT_EQUALS(-1, hbResponse.getConfigVersion());
- BSONObj hbResponseObj = hbResponse.toBSON(false);
+ BSONObj hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
@@ -77,7 +77,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
@@ -99,7 +99,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
@@ -123,7 +123,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(1, hbResponse.getConfigVersion());
ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
@@ -149,7 +149,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getDurableOpTime());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
@@ -159,7 +159,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
// set appliedOpTime
hbResponse.setAppliedOpTime(OpTime(Timestamp(50), 0));
@@ -177,18 +177,18 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getDurableOpTime());
ASSERT_EQUALS(OpTime(Timestamp(0, 50), 0), hbResponse.getAppliedOpTime());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"]["ts"].timestamp());
ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["durableOpTime"]["ts"].timestamp());
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
// set config
ReplSetConfig config;
@@ -208,19 +208,19 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(OpTime(Timestamp(0, 50), 0), hbResponse.getAppliedOpTime());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"]["ts"].timestamp());
ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["durableOpTime"]["ts"].timestamp());
ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
// set state
hbResponse.setState(MemberState(MemberState::RS_SECONDARY));
@@ -241,20 +241,20 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(OpTime(Timestamp(0, 50), 0), hbResponse.getAppliedOpTime());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"]["ts"].timestamp());
ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["durableOpTime"]["ts"].timestamp());
ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
// set syncingTo
hbResponse.setSyncingTo(HostAndPort("syncTarget"));
@@ -275,13 +275,13 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(OpTime(Timestamp(0, 50), 0), hbResponse.getAppliedOpTime());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"]["ts"].timestamp());
ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["durableOpTime"]["ts"].timestamp());
ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
@@ -289,7 +289,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
// set hbmsg
hbResponse.setHbMsg("lub dub");
@@ -309,13 +309,13 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
ASSERT_EQUALS(OpTime(Timestamp(0, 50), 0), hbResponse.getAppliedOpTime());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
- hbResponseObj = hbResponse.toBSON(false);
+ hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
ASSERT_EQUALS("lub dub", hbResponseObj["hbmsg"].String());
ASSERT_EQUALS(1, hbResponseObj["v"].Number());
ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 50), hbResponseObj["opTime"]["ts"].timestamp());
ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["durableOpTime"]["ts"].timestamp());
ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
@@ -323,7 +323,7 @@ TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON().toString());
}
TEST(ReplSetHeartbeatResponse, InitializeWrongElectionTimeType) {
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index b2f61698e88..2b75d7485fb 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -697,11 +697,6 @@ public:
BSONObjBuilder* builder) const = 0;
/**
- * Returns true if the V1 election protocol is being used and false otherwise.
- */
- virtual bool isV1ElectionProtocol() const = 0;
-
- /**
* Returns whether or not majority write concerns should implicitly journal, if j has not been
* explicitly set.
*/
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h
index e70f68f47a8..0972a377efd 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state.h
@@ -135,13 +135,12 @@ public:
* state. See the call site in ReplicationCoordinatorImpl for details about when and how it is
* called.
*
- * Among other things, this writes a message about our transition to primary to the oplog if
- * isV1 and and returns the optime of that message. If !isV1, returns the optime of the last op
- * in the oplog.
+ * Among other things, this writes a message about our transition to primary to the oplog and
+ * returns the optime of that message.
*
* Throws on errors.
*/
- virtual OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) = 0;
+ virtual OpTime onTransitionToPrimary(OperationContext* opCtx) = 0;
/**
* Simple wrapper around SyncSourceFeedback::forwardSlaveProgress. Signals to the
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 20d96481ace..91345c4c304 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -483,8 +483,7 @@ void ReplicationCoordinatorExternalStateImpl::onDrainComplete(OperationContext*
}
}
-OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* opCtx,
- bool isV1ElectionProtocol) {
+OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
// Clear the appliedThrough marker so on startup we'll use the top of the oplog. This must be
@@ -498,16 +497,14 @@ OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationC
_replicationProcess->getConsistencyMarkers()->clearAppliedThrough(
opCtx, lastAppliedOpTime.getTimestamp());
- if (isV1ElectionProtocol) {
- writeConflictRetry(opCtx, "logging transition to primary to oplog", "local.oplog.rs", [&] {
- WriteUnitOfWork wuow(opCtx);
- opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
- opCtx,
- BSON("msg"
- << "new primary"));
- wuow.commit();
- });
- }
+ writeConflictRetry(opCtx, "logging transition to primary to oplog", "local.oplog.rs", [&] {
+ WriteUnitOfWork wuow(opCtx);
+ opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
+ opCtx,
+ BSON("msg"
+ << "new primary"));
+ wuow.commit();
+ });
const auto opTimeToReturn = fassert(28665, loadLastOpTime(opCtx));
_shardingOnTransitionToPrimaryHook(opCtx);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 797175b8111..4f8be81a4ba 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -79,7 +79,7 @@ public:
virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config);
virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx);
void onDrainComplete(OperationContext* opCtx) override;
- OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override;
+ OpTime onTransitionToPrimary(OperationContext* opCtx) override;
virtual void forwardSlaveProgress();
virtual bool isSelf(const HostAndPort& host, ServiceContext* service);
virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index 30568571626..842fcf4fd27 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -262,8 +262,7 @@ void ReplicationCoordinatorExternalStateMock::setIsReadCommittedEnabled(bool val
void ReplicationCoordinatorExternalStateMock::onDrainComplete(OperationContext* opCtx) {}
-OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* opCtx,
- bool isV1ElectionProtocol) {
+OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* opCtx) {
_lastOpTime = _firstOpTimeOfMyTerm;
_firstOpTimeOfMyTerm = OpTime();
return fassert(40297, _lastOpTime);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 05d08eb0f6d..84e615280dd 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -68,7 +68,7 @@ public:
virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config);
virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx);
void onDrainComplete(OperationContext* opCtx) override;
- OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override;
+ OpTime onTransitionToPrimary(OperationContext* opCtx) override;
virtual void forwardSlaveProgress();
virtual bool isSelf(const HostAndPort& host, ServiceContext* service);
virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index a80bc8885f2..184b848b700 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -996,7 +996,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
{
lk.unlock();
AllowNonLocalWritesBlock writesAllowed(opCtx);
- OpTime firstOpTime = _externalState->onTransitionToPrimary(opCtx, isV1ElectionProtocol());
+ OpTime firstOpTime = _externalState->onTransitionToPrimary(opCtx);
lk.lock();
auto status = _topCoord->completeTransitionToPrimary(firstOpTime);
@@ -1819,7 +1819,6 @@ void ReplicationCoordinatorImpl::_handleTimePassing(
// For election protocol v1, call _startElectSelfIfEligibleV1 to avoid race
// against other elections caused by events like election timeout, replSetStepUp etc.
- invariant(isV1ElectionProtocol());
_startElectSelfIfEligibleV1(
TopologyCoordinator::StartElectionReason::kSingleNodeStepDownTimeout);
}
@@ -2460,11 +2459,8 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator_inlock(
if (_topCoord->getRole() == TopologyCoordinator::Role::kCandidate) {
invariant(_rsConfig.getNumMembers() == 1 && _selfIndex == 0 &&
_rsConfig.getMemberAt(0).isElectable());
- if (isV1ElectionProtocol()) {
- // Start election in protocol version 1
- return kActionStartSingleNodeElection;
- }
- return kActionWinElection;
+ // Start election in protocol version 1
+ return kActionStartSingleNodeElection;
}
return kActionNone;
}
@@ -2508,12 +2504,8 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator_inlock(
// overriding requirement is to elect this singleton node primary.
invariant(_rsConfig.getNumMembers() == 1 && _selfIndex == 0 &&
_rsConfig.getMemberAt(0).isElectable());
- if (isV1ElectionProtocol()) {
- // Start election in protocol version 1
- result = kActionStartSingleNodeElection;
- } else {
- result = kActionWinElection;
- }
+ // Start election in protocol version 1
+ result = kActionStartSingleNodeElection;
}
if (newState.rollback()) {
@@ -2593,12 +2585,8 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
break;
case kActionWinElection: {
stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (isV1ElectionProtocol()) {
- invariant(_topCoord->getTerm() != OpTime::kUninitializedTerm);
- _electionId = OID::fromTerm(_topCoord->getTerm());
- } else {
- _electionId = OID::gen();
- }
+ invariant(_topCoord->getTerm() != OpTime::kUninitializedTerm);
+ _electionId = OID::fromTerm(_topCoord->getTerm());
auto ts = LogicalClock::get(getServiceContext())->reserveTicks(1).asTimestamp();
_topCoord->processWinElection(_electionId, ts);
@@ -2613,13 +2601,9 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
}
// Notify all secondaries of the election win.
_restartHeartbeats_inlock();
- if (isV1ElectionProtocol()) {
- invariant(!_catchupState);
- _catchupState = stdx::make_unique<CatchupState>(this);
- _catchupState->start_inlock();
- } else {
- _enterDrainMode_inlock();
- }
+ invariant(!_catchupState);
+ _catchupState = stdx::make_unique<CatchupState>(this);
+ _catchupState->start_inlock();
break;
}
case kActionStartSingleNodeElection:
@@ -2734,11 +2718,6 @@ void ReplicationCoordinatorImpl::CatchupState::signalHeartbeatUpdate_inlock() {
}
Status ReplicationCoordinatorImpl::abortCatchupIfNeeded() {
- if (!isV1ElectionProtocol()) {
- return Status(ErrorCodes::CommandNotSupported,
- "Primary catch-up is only supported by Protocol Version 1");
- }
-
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_catchupState) {
_catchupState->abort_inlock();
@@ -3179,9 +3158,6 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) {
- if (!isV1ElectionProtocol()) {
- return {ErrorCodes::BadValue, "not using election protocol v1"};
- }
auto termStatus = updateTerm(opCtx, args.getTerm());
if (!termStatus.isOK() && termStatus.code() != ErrorCodes::StaleTerm)
@@ -3246,10 +3222,6 @@ void ReplicationCoordinatorImpl::_prepareOplogQueryMetadata_inlock(int rbid,
_topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder).transitional_ignore();
}
-bool ReplicationCoordinatorImpl::isV1ElectionProtocol() const {
- return _protVersion.load() == 1;
-}
-
bool ReplicationCoordinatorImpl::getWriteConcernMajorityShouldJournal() {
return getConfig().getWriteConcernMajorityShouldJournal();
}
@@ -3324,11 +3296,6 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long
return {ErrorCodes::BadValue, "cannot supply 'term' without active replication"};
}
- if (!isV1ElectionProtocol()) {
- // Do not update if not in V1 protocol.
- return Status::OK();
- }
-
// Check we haven't acquired any lock, because potential stepdown needs global lock.
dassert(!opCtx->lockState()->isLocked() || opCtx->lockState()->isNoop());
TopologyCoordinator::UpdateTermResult updateTermResult;
@@ -3353,10 +3320,6 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long
EventHandle ReplicationCoordinatorImpl::_updateTerm_inlock(
long long term, TopologyCoordinator::UpdateTermResult* updateTermResult) {
- if (!isV1ElectionProtocol()) {
- LOG(3) << "Cannot update term in election protocol version 0";
- return EventHandle();
- }
auto now = _replExecutor->now();
TopologyCoordinator::UpdateTermResult localUpdateTermResult = _topCoord->updateTerm(term, now);
@@ -3527,11 +3490,6 @@ CallbackFn ReplicationCoordinatorImpl::_wrapAsCallbackFn(const stdx::function<vo
}
Status ReplicationCoordinatorImpl::stepUpIfEligible() {
- if (!isV1ElectionProtocol()) {
- return Status(ErrorCodes::CommandNotSupported,
- "Step-up command is only supported by Protocol Version 1");
- }
-
_startElectSelfIfEligibleV1(TopologyCoordinator::StartElectionReason::kStepUpRequest);
EventHandle finishEvent;
{
@@ -3563,7 +3521,6 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_cancelElectionI
if (_topCoord->getRole() != TopologyCoordinator::Role::kCandidate) {
return {};
}
- invariant(isV1ElectionProtocol());
invariant(_voteRequester);
_voteRequester->cancel();
return _electionFinishedEvent;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index a0175b86458..33b9404f18f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -258,8 +258,6 @@ public:
virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) override;
- virtual bool isV1ElectionProtocol() const override;
-
virtual bool getWriteConcernMajorityShouldJournal() override;
virtual void summarizeAsHtml(ReplSetHtmlSummary* s) override;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 388c465e69d..ff6335916ce 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -495,7 +495,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
hbResp2.setState(MemberState::RS_SECONDARY);
net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
- net->scheduleResponse(noi2, net->now(), makeResponseStatus(hbResp2.toBSON(true)));
+ net->scheduleResponse(noi2, net->now(), makeResponseStatus(hbResp2.toBSON()));
net->runReadyNetworkOperations();
getNet()->exitNetwork();
@@ -525,7 +525,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
hbResp.setState(MemberState::RS_SECONDARY);
hbResp.setConfigVersion(rsConfig.getConfigVersion());
BSONObjBuilder respObj;
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -943,7 +943,7 @@ private:
hbResp.setTerm(replCoord->getTerm());
hbResp.setAppliedOpTime(otherNodesOpTime);
hbResp.setDurableOpTime(otherNodesOpTime);
- auto response = makeResponseStatus(hbResp.toBSON(replCoord->isV1ElectionProtocol()));
+ auto response = makeResponseStatus(hbResp.toBSON());
net->scheduleResponse(noi, net->now(), response);
}
}
@@ -2110,7 +2110,7 @@ protected:
hbResp.setConfigVersion(rsConfig.getConfigVersion());
hbResp.setAppliedOpTime(opTime);
hbResp.setDurableOpTime(opTime);
- return makeResponseStatus(hbResp.toBSON(true));
+ return makeResponseStatus(hbResp.toBSON());
}
void simulateSuccessfulV1Voting() {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index d1269f78d1b..bfa7d03cb68 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -95,7 +95,6 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call
const Date_t now = _replExecutor->now();
BSONObj heartbeatObj;
Milliseconds timeout(0);
- invariant(isV1ElectionProtocol());
const std::pair<ReplSetHeartbeatArgsV1, Milliseconds> hbRequest =
_topCoord->prepareHeartbeatRequestV1(now, _settings.ourSetName(), target);
heartbeatObj = hbRequest.first.toBSON();
@@ -661,10 +660,8 @@ void ReplicationCoordinatorImpl::_startHeartbeats_inlock() {
_topCoord->restartHeartbeats();
- if (isV1ElectionProtocol()) {
- _topCoord->resetAllMemberTimeouts(_replExecutor->now());
- _scheduleNextLivenessUpdate_inlock();
- }
+ _topCoord->resetAllMemberTimeouts(_replExecutor->now());
+ _scheduleNextLivenessUpdate_inlock();
}
void ReplicationCoordinatorImpl::_handleLivenessTimeout(
@@ -677,9 +674,6 @@ void ReplicationCoordinatorImpl::_handleLivenessTimeout(
if (!cbData.status.isOK()) {
return;
}
- if (!isV1ElectionProtocol()) {
- return;
- }
// Scan liveness table for problems and mark nodes as down by calling into topocoord.
HeartbeatResponseAction action = _topCoord->checkMemberTimeouts(_replExecutor->now());
@@ -692,9 +686,6 @@ void ReplicationCoordinatorImpl::_handleLivenessTimeout(
}
void ReplicationCoordinatorImpl::_scheduleNextLivenessUpdate_inlock() {
- if (!isV1ElectionProtocol()) {
- return;
- }
// Scan liveness table for earliest date; schedule a run at (that date plus election
// timeout).
Date_t earliestDate;
@@ -773,10 +764,6 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
return;
}
- if (!isV1ElectionProtocol()) {
- return;
- }
-
if (!_memberState.secondary()) {
return;
}
@@ -803,9 +790,6 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(
TopologyCoordinator::StartElectionReason reason) {
- if (!isV1ElectionProtocol()) {
- return;
- }
stdx::lock_guard<stdx::mutex> lock(_mutex);
// If it is not a single node replica set, no need to start an election after stepdown timeout.
if (reason == TopologyCoordinator::StartElectionReason::kSingleNodeStepDownTimeout &&
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 05a9c1281fc..15d83b93eb3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -125,7 +125,7 @@ TEST_F(ReplCoordHBV1Test,
hbResp.setConfig(rsConfig);
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
+ hbResp.addToBSON(&responseBuilder);
net->scheduleResponse(
noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
assertRunUntil(startDate + Milliseconds(200));
@@ -195,7 +195,7 @@ TEST_F(ReplCoordHBV1Test,
hbResp.setConfig(rsConfig);
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
+ hbResp.addToBSON(&responseBuilder);
net->scheduleResponse(
noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
assertRunUntil(startDate + Milliseconds(200));
@@ -265,7 +265,7 @@ TEST_F(ReplCoordHBV1Test,
hbResp.setConfig(rsConfig);
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
+ hbResp.addToBSON(&responseBuilder);
net->scheduleResponse(
noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
assertRunUntil(startDate + Milliseconds(2200));
@@ -376,7 +376,7 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
+ hbResp.addToBSON(&responseBuilder);
rpc::ReplSetMetadata metadata(
opTime.getTerm(), opTime, opTime, rsConfig.getConfigVersion(), unexpectedId, 1, -1);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 5aefac8a7b2..60e93ee965f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -333,7 +333,7 @@ TEST_F(ReplCoordTest,
hbResp.setConfigVersion(5);
BSONObjBuilder respObj;
respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
+ hbResp.addToBSON(&respObj);
net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
net->runReadyNetworkOperations();
getNet()->exitNetwork();
@@ -492,7 +492,7 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
hbResp.setConfigVersion(2);
BSONObjBuilder respObj;
respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
+ hbResp.addToBSON(&respObj);
net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
net->runReadyNetworkOperations();
getNet()->exitNetwork();
@@ -549,7 +549,7 @@ TEST_F(
hbResp2.setState(MemberState::RS_SECONDARY);
BSONObjBuilder respObj2;
respObj2 << "ok" << 1;
- hbResp2.addToBSON(&respObj2, false);
+ hbResp2.addToBSON(&respObj2);
net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
@@ -621,7 +621,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
hbResp.setState(MemberState::RS_SECONDARY);
BSONObjBuilder respObj2;
respObj2 << "ok" << 1;
- hbResp.addToBSON(&respObj2, false);
+ hbResp.addToBSON(&respObj2);
net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj2.obj()));
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 7af5b422e8a..49809d3dafb 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -382,10 +382,9 @@ TEST_F(ReplCoordTest, InitiateSucceedsWhenQuorumCheckPasses) {
ASSERT_BSONOBJ_EQ(hbArgs.toBSON(), noi->getRequest().cmdObj);
ReplSetHeartbeatResponse hbResp;
hbResp.setConfigVersion(0);
- getNet()->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- RemoteCommandResponse(hbResp.toBSON(false), BSONObj(), Milliseconds(8)));
+ getNet()->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ RemoteCommandResponse(hbResp.toBSON(), BSONObj(), Milliseconds(8)));
getNet()->runUntil(startDate + Milliseconds(10));
getNet()->exitNetwork();
ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
@@ -1354,7 +1353,7 @@ protected:
hbResp.setDurableOpTime(desiredOpTime);
BSONObjBuilder respObj;
respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
+ hbResp.addToBSON(&respObj);
getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
}
while (getNet()->hasReadyRequests()) {
@@ -1863,7 +1862,7 @@ protected:
hbResp.setAppliedOpTime(optimeResponse);
BSONObjBuilder respObj;
respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
+ hbResp.addToBSON(&respObj);
getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
hbNum += 1;
}
@@ -2166,7 +2165,7 @@ TEST_F(StepDownTest,
hbResp.setConfigVersion(hbArgs.getConfigVersion());
BSONObjBuilder respObj;
respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
+ hbResp.addToBSON(&respObj);
getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
}
while (getNet()->hasReadyRequests()) {
@@ -4488,7 +4487,7 @@ TEST_F(ReplCoordTest,
hbResp.setConfigVersion(config.getConfigVersion());
hbResp.setSetName(config.getReplSetName());
hbResp.setState(MemberState::RS_SECONDARY);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true), metadataObj));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(), metadataObj));
net->runReadyNetworkOperations();
net->exitNetwork();
@@ -4614,7 +4613,7 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
hbResp.setConfigVersion(config.getConfigVersion());
hbResp.setSetName(config.getReplSetName());
hbResp.setState(MemberState::RS_SECONDARY);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true), metadataObj));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(), metadataObj));
net->runReadyNetworkOperations();
net->exitNetwork();
@@ -4788,7 +4787,7 @@ TEST_F(ReplCoordTest,
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
hbResp.setState(MemberState::RS_SECONDARY);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
net->runReadyNetworkOperations();
net->exitNetwork();
@@ -4844,7 +4843,7 @@ TEST_F(ReplCoordTest,
// Heartbeat response is scheduled with a delay so that we can be sure that
// the election was rescheduled due to the heartbeat response.
auto heartbeatWhen = net->now() + Seconds(1);
- net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON()));
net->runUntil(heartbeatWhen);
ASSERT_EQUALS(heartbeatWhen, net->now());
net->runReadyNetworkOperations();
@@ -4898,7 +4897,7 @@ TEST_F(ReplCoordTest,
// Heartbeat response is scheduled with a delay so that we can be sure that
// the election was rescheduled due to the heartbeat response.
auto heartbeatWhen = net->now() + Seconds(1);
- net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON()));
net->runUntil(heartbeatWhen);
ASSERT_EQUALS(heartbeatWhen, net->now());
net->runReadyNetworkOperations();
@@ -4949,7 +4948,7 @@ TEST_F(ReplCoordTest,
// Heartbeat response is scheduled with a delay so that we can be sure that
// the election was rescheduled due to the heartbeat response.
auto heartbeatWhen = net->now() + Seconds(1);
- net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, heartbeatWhen, makeResponseStatus(hbResp.toBSON()));
net->runUntil(heartbeatWhen);
ASSERT_EQUALS(heartbeatWhen, net->now());
net->runReadyNetworkOperations();
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index a4c1c5d6700..6e4f2e339af 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -408,10 +408,6 @@ Status ReplicationCoordinatorMock::processHeartbeatV1(const ReplSetHeartbeatArgs
return Status::OK();
}
-bool ReplicationCoordinatorMock::isV1ElectionProtocol() const {
- return true;
-}
-
bool ReplicationCoordinatorMock::getWriteConcernMajorityShouldJournal() {
return true;
}
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 7b85930f7d5..a784b5093e1 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -224,8 +224,6 @@ public:
virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response);
- virtual bool isV1ElectionProtocol() const override;
-
virtual bool getWriteConcernMajorityShouldJournal();
virtual void summarizeAsHtml(ReplSetHtmlSummary* output);
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 23347464610..7fb82bf5f57 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -240,7 +240,7 @@ void ReplCoordTest::simulateEnoughHeartbeatsForAllNodesUp() {
hbResp.setConfigVersion(rsConfig.getConfigVersion());
hbResp.setAppliedOpTime(OpTime(Timestamp(100, 2), 0));
BSONObjBuilder respObj;
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -343,7 +343,7 @@ void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t e
hbResp.setAppliedOpTime(opTime);
hbResp.setDurableOpTime(opTime);
hbResp.setConfigVersion(rsConfig.getConfigVersion());
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
net->scheduleResponse(noi,
net->now(),
@@ -436,7 +436,7 @@ bool ReplCoordTest::consumeHeartbeatV1(const NetworkInterfaceMock::NetworkOperat
hbResp.setConfigVersion(rsConfig.getConfigVersion());
hbResp.setAppliedOpTime(lastApplied);
BSONObjBuilder respObj;
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
return true;
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 1d0f78b76e7..c1557c9d522 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -660,10 +660,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollbackImpl::_findComm
log() << "Rollback common point is " << commonPointOpTime;
// Rollback common point should be >= the replication commit point.
- invariant(!_replicationCoordinator->isV1ElectionProtocol() ||
- commonPointOpTime.getTimestamp() >= lastCommittedOpTime.getTimestamp());
- invariant(!_replicationCoordinator->isV1ElectionProtocol() ||
- commonPointOpTime >= lastCommittedOpTime);
+ invariant(commonPointOpTime.getTimestamp() >= lastCommittedOpTime.getTimestamp());
+ invariant(commonPointOpTime >= lastCommittedOpTime);
// Rollback common point should be >= the committed snapshot optime.
invariant(commonPointOpTime.getTimestamp() >= committedSnapshot.getTimestamp());
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 8a6c6a0ae62..0ce3a34d3ea 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -934,9 +934,8 @@ Status _syncRollback(OperationContext* opCtx,
log() << "Rollback common point is " << commonPoint;
// Rollback common point should be >= the replication commit point.
- invariant(!replCoord->isV1ElectionProtocol() ||
- commonPoint.getTimestamp() >= lastCommittedOpTime.getTimestamp());
- invariant(!replCoord->isV1ElectionProtocol() || commonPoint >= lastCommittedOpTime);
+ invariant(commonPoint.getTimestamp() >= lastCommittedOpTime.getTimestamp());
+ invariant(commonPoint >= lastCommittedOpTime);
// Rollback common point should be >= the committed snapshot optime.
invariant(commonPoint.getTimestamp() >= committedSnapshot.getTimestamp());
diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp
index 6fd8bf5a086..381a34574db 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.cpp
+++ b/src/mongo/embedded/replication_coordinator_embedded.cpp
@@ -364,10 +364,6 @@ void ReplicationCoordinatorEmbedded::prepareReplMetadata(const BSONObj&,
UASSERT_NOT_IMPLEMENTED;
}
-bool ReplicationCoordinatorEmbedded::isV1ElectionProtocol() const {
- UASSERT_NOT_IMPLEMENTED;
-}
-
bool ReplicationCoordinatorEmbedded::getWriteConcernMajorityShouldJournal() {
UASSERT_NOT_IMPLEMENTED;
}
diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h
index 2136186f544..24eda7e109f 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.h
+++ b/src/mongo/embedded/replication_coordinator_embedded.h
@@ -196,8 +196,6 @@ public:
Status processHeartbeatV1(const repl::ReplSetHeartbeatArgsV1&,
repl::ReplSetHeartbeatResponse*) override;
- bool isV1ElectionProtocol() const override;
-
bool getWriteConcernMajorityShouldJournal() override;
void summarizeAsHtml(repl::ReplSetHtmlSummary*) override;