summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVesselina Ratcheva <31660559+vessy-mongodb@users.noreply.github.com>2022-04-21 19:08:23 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-21 19:45:34 +0000
commit14e04b0acc27f7d7092eb93bf1b2666c50226d06 (patch)
treed40cda1e2cb6859e28899646e5e05acd38a22087
parent8f8616c7deca0d2c12539fbaab4fddf757b006ad (diff)
downloadmongo-14e04b0acc27f7d7092eb93bf1b2666c50226d06.tar.gz
SERVER-64627 Refactor onInitialSyncComplete and onStartupRecoveryComplete into new sharding hook that also works with FCBIS
-rw-r--r--jstests/replsets/initial_sync_runs_completion_hook.js40
-rw-r--r--src/mongo/db/repl/SConscript2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp3
-rw-r--r--src/mongo/db/repl/primary_only_service.h3
-rw-r--r--src/mongo/db/repl/replica_set_aware_service.cpp11
-rw-r--r--src/mongo/db/repl/replica_set_aware_service.h20
-rw-r--r--src/mongo/db/repl/replica_set_aware_service_test.cpp41
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp7
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp26
-rw-r--r--src/mongo/db/repl/tenant_file_importer_service.h3
-rw-r--r--src/mongo/db/s/balancer/balancer.h3
-rw-r--r--src/mongo/db/s/balancer_stats_registry.h4
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.cpp3
-rw-r--r--src/mongo/db/s/recoverable_critical_section_service.cpp2
-rw-r--r--src/mongo/db/s/recoverable_critical_section_service.h7
-rw-r--r--src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp2
-rw-r--r--src/mongo/db/s/user_writes_recoverable_critical_section_service.h7
-rw-r--r--src/mongo/db/vector_clock_mongod.cpp3
19 files changed, 119 insertions, 73 deletions
diff --git a/jstests/replsets/initial_sync_runs_completion_hook.js b/jstests/replsets/initial_sync_runs_completion_hook.js
new file mode 100644
index 00000000000..22d016db95f
--- /dev/null
+++ b/jstests/replsets/initial_sync_runs_completion_hook.js
@@ -0,0 +1,40 @@
+/**
+ * Tests that we will run the appropriate hook after initial sync completes.
+ *
+ * @tags: [requires_fcv_60]
+ */
+
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+
+const rst = new ReplSetTest({nodes: 1, name: jsTestName()});
+rst.startSet();
+rst.initiate();
+
+const dbName = "testDB";
+const collName = "testColl";
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({a: 1}, {b: 2}, {c: 3}));
+
+jsTestLog("Adding the initial-syncing node to the replica set.");
+const secondary = rst.add({
+ rsConfig: {priority: 0, votes: 0},
+ setParameter: {logComponentVerbosity: tojson({'sharding': 2})}
+});
+
+rst.reInitiate();
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+
+jsTestLog("Checking for message indicating sharding hook ran.");
+checkLog.containsJson(secondary, 6351912);
+
+jsTestLog("Done with test.");
+rst.stopSet();
+})();
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 07b2e725e95..2992ec9d30f 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -306,6 +306,7 @@ env.Library(
'oplog_application',
'oplog_interface_local',
'repl_server_parameters',
+ 'replica_set_aware_service',
],
)
@@ -1213,7 +1214,6 @@ env.Library(
'$BUILD_DIR/mongo/db/index_builds_coordinator_interface',
'$BUILD_DIR/mongo/executor/scoped_task_executor',
'repl_server_parameters',
- 'replica_set_aware_service',
]
)
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 53b5b8d00c2..d1b9b699ada 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -58,7 +58,6 @@
#include "mongo/db/repl/oplog_fetcher.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
-#include "mongo/db/repl/replica_set_aware_service.h"
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/repl/storage_interface.h"
@@ -577,8 +576,6 @@ void InitialSyncer::_tearDown_inlock(OperationContext* opCtx,
tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx);
reconstructPreparedTransactions(opCtx, repl::OplogApplication::Mode::kInitialSync);
- ReplicaSetAwareServiceRegistry::get(opCtx->getServiceContext()).onInitialSyncComplete(opCtx);
-
_replicationProcess->getConsistencyMarkers()->setInitialSyncIdIfNotSet(opCtx);
// We set the initial data timestamp before clearing the initial sync flag. See comments in
diff --git a/src/mongo/db/repl/primary_only_service.h b/src/mongo/db/repl/primary_only_service.h
index 7e6fc1baf2e..3ac82044484 100644
--- a/src/mongo/db/repl/primary_only_service.h
+++ b/src/mongo/db/repl/primary_only_service.h
@@ -566,8 +566,7 @@ public:
std::vector<BSONObj>* ops) noexcept;
void onStartup(OperationContext*) final;
- void onStartupRecoveryComplete(OperationContext* opCtx) final {}
- void onInitialSyncComplete(OperationContext* opCtx) final {}
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) final {}
void onShutdown() final;
void onStepUpBegin(OperationContext*, long long term) final {}
void onBecomeArbiter() final {}
diff --git a/src/mongo/db/repl/replica_set_aware_service.cpp b/src/mongo/db/repl/replica_set_aware_service.cpp
index fdf09046af2..f4851f9fe3d 100644
--- a/src/mongo/db/repl/replica_set_aware_service.cpp
+++ b/src/mongo/db/repl/replica_set_aware_service.cpp
@@ -56,15 +56,10 @@ void ReplicaSetAwareServiceRegistry::onStartup(OperationContext* opCtx) {
});
}
-void ReplicaSetAwareServiceRegistry::onStartupRecoveryComplete(OperationContext* opCtx) {
+void ReplicaSetAwareServiceRegistry::onInitialDataAvailable(OperationContext* opCtx,
+ bool isMajorityDataAvailable) {
std::for_each(_services.begin(), _services.end(), [&](ReplicaSetAwareInterface* service) {
- service->onStartupRecoveryComplete(opCtx);
- });
-}
-
-void ReplicaSetAwareServiceRegistry::onInitialSyncComplete(OperationContext* opCtx) {
- std::for_each(_services.begin(), _services.end(), [&](ReplicaSetAwareInterface* service) {
- service->onInitialSyncComplete(opCtx);
+ service->onInitialDataAvailable(opCtx, isMajorityDataAvailable);
});
}
diff --git a/src/mongo/db/repl/replica_set_aware_service.h b/src/mongo/db/repl/replica_set_aware_service.h
index f7ea8c3a71e..3f099610a5c 100644
--- a/src/mongo/db/repl/replica_set_aware_service.h
+++ b/src/mongo/db/repl/replica_set_aware_service.h
@@ -118,14 +118,17 @@ public:
virtual void onStartup(OperationContext* opCtx) = 0;
/**
- * Called after startup recovery has completed.
+ * Called when either initial sync or startup recovery have completed.
+ * Local reads are always available at this point, with no special restrictions on resource
+ * locks. If the "isMajorityDataAvailable" flag is set, the data read locally is also committed
+ * to a majority of replica set members. In the opposite case, the local data may be subject to
+ * rollback attempts, which will also crash the server.
+ * This is one of the first hooks that a node will run after starting up and this is expected to
+ * be evoked strictly before any calls to onRollback, although it may be preceded by OpObserver
+ * calls. In-memory state may be reconstructed here, pending the difference in data availability
+ * described above.
*/
- virtual void onStartupRecoveryComplete(OperationContext* opCtx) = 0;
-
- /**
- * Called after initial sync has completed.
- */
- virtual void onInitialSyncComplete(OperationContext* opCtx) = 0;
+ virtual void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) = 0;
/**
* Called as part of ReplicationCoordinator shutdown.
@@ -200,8 +203,7 @@ public:
static ReplicaSetAwareServiceRegistry& get(ServiceContext* serviceContext);
void onStartup(OperationContext* opCtx) final;
- void onStartupRecoveryComplete(OperationContext* opCtx) final;
- void onInitialSyncComplete(OperationContext* opCtx) final;
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) final;
void onShutdown() final;
void onStepUpBegin(OperationContext* opCtx, long long term) final;
void onStepUpComplete(OperationContext* opCtx, long long term) final;
diff --git a/src/mongo/db/repl/replica_set_aware_service_test.cpp b/src/mongo/db/repl/replica_set_aware_service_test.cpp
index df127bc5558..95bb0eb19e9 100644
--- a/src/mongo/db/repl/replica_set_aware_service_test.cpp
+++ b/src/mongo/db/repl/replica_set_aware_service_test.cpp
@@ -43,8 +43,7 @@ template <class ActualService>
class TestService : public ReplicaSetAwareService<ActualService> {
public:
int numCallsOnStartup{0};
- int numCallsOnStartupRecoveryComplete{0};
- int numCallsOnInitialSyncComplete{0};
+ int numCallsonInitialDataAvailable{0};
int numCallsOnStepUpBegin{0};
int numCallsOnStepUpComplete{0};
int numCallsOnStepDown{0};
@@ -55,12 +54,8 @@ protected:
numCallsOnStartup++;
}
- void onStartupRecoveryComplete(OperationContext* opCtx) override {
- numCallsOnStartupRecoveryComplete++;
- }
-
- void onInitialSyncComplete(OperationContext* opCtx) override {
- numCallsOnInitialSyncComplete++;
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) override {
+ numCallsonInitialDataAvailable++;
}
void onStepUpBegin(OperationContext* opCtx, long long term) override {
@@ -145,10 +140,10 @@ private:
TestService::onStartup(opCtx);
}
- void onStartupRecoveryComplete(OperationContext* opCtx) final {
- ASSERT_EQ(numCallsOnStartupRecoveryComplete,
- ServiceB::get(getServiceContext())->numCallsOnStartupRecoveryComplete - 1);
- TestService::onStartupRecoveryComplete(opCtx);
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) final {
+ ASSERT_EQ(numCallsonInitialDataAvailable,
+ ServiceB::get(getServiceContext())->numCallsonInitialDataAvailable - 1);
+ TestService::onInitialDataAvailable(opCtx, isMajorityDataAvailable);
}
void onStepUpBegin(OperationContext* opCtx, long long term) final {
@@ -222,32 +217,29 @@ TEST_F(ReplicaSetAwareServiceTest, ReplicaSetAwareService) {
auto c = ServiceC::get(sc);
ASSERT_EQ(0, a->numCallsOnStartup);
- ASSERT_EQ(0, a->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(0, a->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(0, a->numCallsonInitialDataAvailable);
ASSERT_EQ(0, a->numCallsOnStepUpBegin);
ASSERT_EQ(0, a->numCallsOnStepUpComplete);
ASSERT_EQ(0, a->numCallsOnStepDown);
ASSERT_EQ(0, a->numCallsOnBecomeArbiter);
ASSERT_EQ(0, b->numCallsOnStartup);
- ASSERT_EQ(0, b->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(0, b->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(0, b->numCallsonInitialDataAvailable);
ASSERT_EQ(0, b->numCallsOnStepUpBegin);
ASSERT_EQ(0, b->numCallsOnStepUpComplete);
ASSERT_EQ(0, b->numCallsOnStepDown);
ASSERT_EQ(0, b->numCallsOnBecomeArbiter);
ASSERT_EQ(0, c->numCallsOnStartup);
- ASSERT_EQ(0, c->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(0, c->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(0, c->numCallsonInitialDataAvailable);
ASSERT_EQ(0, c->numCallsOnStepUpBegin);
ASSERT_EQ(0, c->numCallsOnStepUpComplete);
ASSERT_EQ(0, c->numCallsOnStepDown);
ASSERT_EQ(0, c->numCallsOnBecomeArbiter);
ReplicaSetAwareServiceRegistry::get(sc).onStartup(opCtx);
- ReplicaSetAwareServiceRegistry::get(sc).onStartupRecoveryComplete(opCtx);
- ReplicaSetAwareServiceRegistry::get(sc).onInitialSyncComplete(opCtx);
+ ReplicaSetAwareServiceRegistry::get(sc).onInitialDataAvailable(opCtx, false
+ /* isMajorityDataAvailable */);
ReplicaSetAwareServiceRegistry::get(sc).onStepUpBegin(opCtx, _term);
ReplicaSetAwareServiceRegistry::get(sc).onStepUpBegin(opCtx, _term);
ReplicaSetAwareServiceRegistry::get(sc).onStepUpBegin(opCtx, _term);
@@ -257,24 +249,21 @@ TEST_F(ReplicaSetAwareServiceTest, ReplicaSetAwareService) {
ReplicaSetAwareServiceRegistry::get(sc).onBecomeArbiter();
ASSERT_EQ(0, a->numCallsOnStartup);
- ASSERT_EQ(0, a->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(0, a->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(0, a->numCallsonInitialDataAvailable);
ASSERT_EQ(0, a->numCallsOnStepUpBegin);
ASSERT_EQ(0, a->numCallsOnStepUpComplete);
ASSERT_EQ(0, a->numCallsOnStepDown);
ASSERT_EQ(0, a->numCallsOnBecomeArbiter);
ASSERT_EQ(1, b->numCallsOnStartup);
- ASSERT_EQ(1, b->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(1, b->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(1, b->numCallsonInitialDataAvailable);
ASSERT_EQ(3, b->numCallsOnStepUpBegin);
ASSERT_EQ(2, b->numCallsOnStepUpComplete);
ASSERT_EQ(1, b->numCallsOnStepDown);
ASSERT_EQ(1, b->numCallsOnBecomeArbiter);
ASSERT_EQ(1, c->numCallsOnStartup);
- ASSERT_EQ(1, c->numCallsOnStartupRecoveryComplete);
- ASSERT_EQ(1, c->numCallsOnInitialSyncComplete);
+ ASSERT_EQ(1, c->numCallsonInitialDataAvailable);
ASSERT_EQ(3, c->numCallsOnStepUpBegin);
ASSERT_EQ(2, c->numCallsOnStepUpComplete);
ASSERT_EQ(1, c->numCallsOnStepDown);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index fdb866263ad..a1296d98522 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -537,8 +537,6 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(
LOGV2(4280506, "Reconstructing prepared transactions");
reconstructPreparedTransactions(opCtx, OplogApplication::Mode::kRecovering);
- ReplicaSetAwareServiceRegistry::get(_service).onStartupRecoveryComplete(opCtx);
-
const auto lastOpTimeAndWallTimeResult = _externalState->loadLastOpTimeAndWallTime(opCtx);
// Use a callback here, because _finishLoadLocalConfig calls isself() which requires
@@ -834,6 +832,9 @@ void ReplicationCoordinatorImpl::_initialSyncerCompletionFunction(
_topCoord->resetMaintenanceCount();
}
+ ReplicaSetAwareServiceRegistry::get(_service).onInitialDataAvailable(
+ cc().makeOperationContext().get(), false /* isMajorityDataAvailable */);
+
// Transition from STARTUP2 to RECOVERING and start the producer and the applier.
// If the member state is REMOVED, this will do nothing until we receive a config with
// ourself in it.
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 7d940aaa58f..5cd96227393 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/repl/oplog_buffer.h"
#include "mongo/db/repl/oplog_interface_local.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
+#include "mongo/db/repl/replica_set_aware_service.h"
#include "mongo/db/repl/replication_consistency_markers_impl.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/transaction_oplog_application.h"
@@ -471,6 +472,12 @@ void ReplicationRecoveryImpl::recoverFromOplog(OperationContext* opCtx,
hangAfterOplogTruncationInRollback.pauseWhileSet();
+ // Truncation may need to adjust the initialDataTimestamp so we let it complete first.
+ if (!isRollbackRecovery) {
+ ReplicaSetAwareServiceRegistry::get(getGlobalServiceContext())
+ .onInitialDataAvailable(opCtx, true /* isMajorityDataAvailable */);
+ }
+
auto topOfOplogSW = _getTopOfOplog(opCtx);
if (topOfOplogSW.getStatus() == ErrorCodes::CollectionIsEmpty ||
topOfOplogSW.getStatus() == ErrorCodes::NamespaceNotFound) {
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index 6bb25c7abea..ee937d39648 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -53,6 +53,7 @@
#include "mongo/db/storage/storage_parameters_gen.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/unittest/death_test.h"
+#include "mongo/unittest/log_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
@@ -1614,6 +1615,31 @@ TEST_F(ReplicationRecoveryTest, RecoverySetsValidateFeaturesAsPrimaryToFalseWhil
ASSERT_FALSE(serverGlobalParams.validateFeaturesAsPrimary.load());
}
+TEST_F(ReplicationRecoveryTest, StartupRecoveryRunsCompletionHook) {
+ ReplicationRecoveryImpl recovery(getStorageInterface(), getConsistencyMarkers());
+ auto opCtx = getOperationContext();
+
+ getConsistencyMarkers()->setOplogTruncateAfterPoint(opCtx, Timestamp(2, 2));
+ getStorageInterfaceRecovery()->setRecoveryTimestamp(Timestamp(4, 4));
+ getConsistencyMarkers()->setAppliedThrough(opCtx, OpTime(Timestamp(4, 4), 1));
+ _setUpOplog(opCtx, getStorageInterface(), {1, 2, 3, 4});
+
+ auto severityGuard = unittest::MinimumLoggedSeverityGuard{logv2::LogComponent::kSharding,
+ logv2::LogSeverity::Debug(2)};
+ startCapturingLogMessages();
+ recovery.recoverFromOplog(opCtx, boost::none);
+ stopCapturingLogMessages();
+
+ ASSERT_EQUALS(1,
+ countTextFormatLogLinesContaining(
+ "Recovering all user writes recoverable critical sections"));
+
+ _assertDocsInOplog(opCtx, {1, 2, 3, 4});
+ _assertDocsInTestCollection(opCtx, {});
+
+ ASSERT_EQ(getConsistencyMarkers()->getOplogTruncateAfterPoint(opCtx), Timestamp());
+}
+
} // namespace
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/tenant_file_importer_service.h b/src/mongo/db/repl/tenant_file_importer_service.h
index 534b3d48f09..92aa2baa426 100644
--- a/src/mongo/db/repl/tenant_file_importer_service.h
+++ b/src/mongo/db/repl/tenant_file_importer_service.h
@@ -54,8 +54,7 @@ public:
private:
void onStartup(OperationContext* opCtx) final;
- void onStartupRecoveryComplete(OperationContext* opCtx) final {}
- void onInitialSyncComplete(OperationContext* opCtx) final {}
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) final {}
void onShutdown() final {
stdx::lock_guard lk(_mutex);
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index 93d8b158bfa..be31a053d0a 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -198,8 +198,7 @@ private:
* ReplicaSetAwareService entry points.
*/
void onStartup(OperationContext* opCtx) final {}
- void onStartupRecoveryComplete(OperationContext* opCtx) final {}
- void onInitialSyncComplete(OperationContext* opCtx) final {}
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) final {}
void onShutdown() final {}
void onStepUpBegin(OperationContext* opCtx, long long term) final;
void onStepUpComplete(OperationContext* opCtx, long long term) final;
diff --git a/src/mongo/db/s/balancer_stats_registry.h b/src/mongo/db/s/balancer_stats_registry.h
index f7a7d20dd58..84c587bb489 100644
--- a/src/mongo/db/s/balancer_stats_registry.h
+++ b/src/mongo/db/s/balancer_stats_registry.h
@@ -79,8 +79,8 @@ public:
long long getCollNumOrphanDocs(const UUID& collectionUUID) const;
private:
- void onStartupRecoveryComplete(OperationContext* opCtx) override final {}
- void onInitialSyncComplete(OperationContext* opCtx) override final {}
+ void onInitialDataAvailable(OperationContext* opCtx,
+ bool isMajorityDataAvailable) override final {}
void onStepUpBegin(OperationContext* opCtx, long long term) override final {}
void onBecomeArbiter() override final {}
void onShutdown() override final {}
diff --git a/src/mongo/db/s/dist_lock_manager_replset.cpp b/src/mongo/db/s/dist_lock_manager_replset.cpp
index 6f491dbff67..76e4795fc29 100644
--- a/src/mongo/db/s/dist_lock_manager_replset.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset.cpp
@@ -65,8 +65,7 @@ public:
static DistLockManagerService* get(OperationContext* opCtx);
void onStartup(OperationContext* opCtx) override {}
- void onStartupRecoveryComplete(OperationContext* opCtx) override {}
- void onInitialSyncComplete(OperationContext* opCtx) override {}
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) override {}
void onShutdown() override {}
void onStepUpBegin(OperationContext* opCtx, long long term) override {
auto distLockManager = DistLockManager::get(opCtx);
diff --git a/src/mongo/db/s/recoverable_critical_section_service.cpp b/src/mongo/db/s/recoverable_critical_section_service.cpp
index 073c1be01ca..39406025140 100644
--- a/src/mongo/db/s/recoverable_critical_section_service.cpp
+++ b/src/mongo/db/s/recoverable_critical_section_service.cpp
@@ -57,7 +57,7 @@ bool inRecoveryMode(OperationContext* opCtx) {
}
const auto memberState = replCoord->getMemberState();
- return memberState.startup() || memberState.startup2() || memberState.rollback();
+ return memberState.startup2() || memberState.rollback();
}
} // namespace recoverable_critical_section_util
diff --git a/src/mongo/db/s/recoverable_critical_section_service.h b/src/mongo/db/s/recoverable_critical_section_service.h
index 3452cb0ede7..d1953b27942 100644
--- a/src/mongo/db/s/recoverable_critical_section_service.h
+++ b/src/mongo/db/s/recoverable_critical_section_service.h
@@ -104,11 +104,8 @@ public:
void recoverRecoverableCriticalSections(OperationContext* opCtx);
private:
- void onStartupRecoveryComplete(OperationContext* opCtx) override final {
- recoverRecoverableCriticalSections(opCtx);
- }
-
- void onInitialSyncComplete(OperationContext* opCtx) override final {
+ void onInitialDataAvailable(OperationContext* opCtx,
+ bool isMajorityDataAvailable) override final {
recoverRecoverableCriticalSections(opCtx);
}
diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
index 5aec4a6521f..6e15c008f19 100644
--- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
+++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
@@ -53,7 +53,7 @@ bool inRecoveryMode(OperationContext* opCtx) {
}
const auto memberState = replCoord->getMemberState();
- return memberState.startup() || memberState.startup2() || memberState.rollback();
+ return memberState.startup2() || memberState.rollback();
}
} // namespace user_writes_recoverable_critical_section_util
diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.h b/src/mongo/db/s/user_writes_recoverable_critical_section_service.h
index 462c3f7fb85..a810242b87b 100644
--- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.h
+++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.h
@@ -145,11 +145,8 @@ public:
void recoverRecoverableCriticalSections(OperationContext* opCtx);
private:
- void onStartupRecoveryComplete(OperationContext* opCtx) override final {
- recoverRecoverableCriticalSections(opCtx);
- }
-
- void onInitialSyncComplete(OperationContext* opCtx) override final {
+ void onInitialDataAvailable(OperationContext* opCtx,
+ bool isMajorityDataAvailable) override final {
recoverRecoverableCriticalSections(opCtx);
}
diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp
index 226d762e2a4..8f791e87e3f 100644
--- a/src/mongo/db/vector_clock_mongod.cpp
+++ b/src/mongo/db/vector_clock_mongod.cpp
@@ -92,8 +92,7 @@ private:
// ReplicaSetAwareService methods implementation
void onStartup(OperationContext* opCtx) override {}
- void onStartupRecoveryComplete(OperationContext* opCtx) override {}
- void onInitialSyncComplete(OperationContext* opCtx) override {}
+ void onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) override {}
void onShutdown() override {}
void onStepUpBegin(OperationContext* opCtx, long long term) override;
void onStepUpComplete(OperationContext* opCtx, long long term) override {}