summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorXuerui Fa <xuerui.fa@mongodb.com>2019-10-16 18:05:39 +0000
committerevergreen <evergreen@mongodb.com>2019-10-16 18:05:39 +0000
commitb3b494a72f0e19d7556bee627da7ae9b79e26a03 (patch)
treefebde7da37919aba32e1ad092ba6bb540b55cf8b /src
parentfbdd5712f9ff6714f5ce9804eb4e9decb25dd88b (diff)
downloadmongo-b3b494a72f0e19d7556bee627da7ae9b79e26a03.tar.gz
SERVER-42825 Log and track metrics.repl.stateTransition counters after stopped killing user operation
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/repl/replication_coordinator.h15
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp86
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h38
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp7
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.cpp7
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.h5
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp15
-rw-r--r--src/mongo/db/repl/rollback_impl.h6
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.cpp7
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.h5
12 files changed, 161 insertions, 43 deletions
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 8bc865775b0..07b54534dfa 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -940,6 +940,21 @@ public:
*/
inline static constexpr StringData newPrimaryMsg = "new primary"_sd;
+ /*
+ * Specifies the state transitions that kill user operations. Used for tracking state transition
+ * metrics.
+ */
+ enum class OpsKillingStateTransitionEnum { kStepUp, kStepDown, kRollback };
+
+ /**
+ * Updates metrics around user ops when a state transition that kills user ops and select
+ * internal operations occurs (i.e. step up, step down, or rollback). Also logs the metrics.
+ */
+ virtual void updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const = 0;
+
protected:
ReplicationCoordinator();
};
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index a15e1944f92..d67ade26d70 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -105,15 +105,20 @@ Counter64 attemptsToBecomeSecondary;
ServerStatusMetricField<Counter64> displayAttemptsToBecomeSecondary(
"repl.apply.attemptsToBecomeSecondary", &attemptsToBecomeSecondary);
-// Tracks the number of operations killed on step down.
+// Tracks the last state transition performed in this replca set.
+std::string lastStateTransition;
+ServerStatusMetricField<std::string> displayLastStateTransition(
+ "repl.stateTransition.lastStateTransition", &lastStateTransition);
+
+// Tracks the number of operations killed on state transition.
Counter64 userOpsKilled;
-ServerStatusMetricField<Counter64> displayuserOpsKilled("repl.stepDown.userOperationsKilled",
+ServerStatusMetricField<Counter64> displayUserOpsKilled("repl.stateTransition.userOperationsKilled",
&userOpsKilled);
-// Tracks the number of operations left running on step down.
+// Tracks the number of operations left running on state transition.
Counter64 userOpsRunning;
-ServerStatusMetricField<Counter64> displayUserOpsRunning("repl.stepDown.userOperationsRunning",
- &userOpsRunning);
+ServerStatusMetricField<Counter64> displayUserOpsRunning(
+ "repl.stateTransition.userOperationsRunning", &userOpsRunning);
using CallbackArgs = executor::TaskExecutor::CallbackArgs;
using CallbackFn = executor::TaskExecutor::CallbackFn;
@@ -996,7 +1001,8 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// internal operations. Although secondaries cannot accept writes, a step up can kill writes
// that were blocked behind the RSTL lock held by a step down attempt. These writes will be
// killed with a retryable error code during step up.
- AutoGetRstlForStepUpStepDown arsu(this, opCtx);
+ AutoGetRstlForStepUpStepDown arsu(
+ this, opCtx, ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepUp);
lk.lock();
// Exit drain mode only if we're actually in draining mode, the apply buffer is empty in the
@@ -1024,10 +1030,6 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
invariant(status);
}
- // Reset the counters on step up.
- userOpsKilled.decrement(userOpsKilled.get());
- userOpsRunning.decrement(userOpsRunning.get());
-
// Must calculate the commit level again because firstOpTimeOfMyTerm wasn't set when we logged
// our election in onTransitionToPrimary(), above.
_updateLastCommittedOpTimeAndWallTime(lk);
@@ -1785,15 +1787,38 @@ void ReplicationCoordinatorImpl::waitForStepDownAttempt_forTest() {
}
}
-void ReplicationCoordinatorImpl::_updateAndLogStatsOnStepDown(
- const AutoGetRstlForStepUpStepDown* arsd) const {
- userOpsRunning.increment(arsd->getUserOpsRunning());
+void ReplicationCoordinatorImpl::updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const {
+
+ // Clear the current metrics before setting.
+ userOpsKilled.decrement(userOpsKilled.get());
+ userOpsRunning.decrement(userOpsRunning.get());
+
+ switch (stateTransition) {
+ case ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepUp:
+ lastStateTransition = "stepUp";
+ break;
+ case ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepDown:
+ lastStateTransition = "stepDown";
+ break;
+ case ReplicationCoordinator::OpsKillingStateTransitionEnum::kRollback:
+ lastStateTransition = "rollback";
+ break;
+ default:
+ MONGO_UNREACHABLE;
+ }
+
+ userOpsKilled.increment(numOpsKilled);
+ userOpsRunning.increment(numOpsRunning);
BSONObjBuilder bob;
+ bob.append("lastStateTransition", lastStateTransition);
bob.appendNumber("userOpsKilled", userOpsKilled.get());
bob.appendNumber("userOpsRunning", userOpsRunning.get());
- log() << "Stepping down from primary, stats: " << bob.obj();
+ log() << "State transition ops metrics: " << bob.obj();
}
void ReplicationCoordinatorImpl::_killConflictingOpsOnStepUpAndStepDown(
@@ -1816,19 +1841,25 @@ void ReplicationCoordinatorImpl::_killConflictingOpsOnStepUpAndStepDown(
if (locker->wasGlobalLockTakenInModeConflictingWithWrites() ||
PrepareConflictTracker::get(toKill).isWaitingOnPrepareConflict()) {
serviceCtx->killOperation(lk, toKill, reason);
- userOpsKilled.increment();
+ arsc->incrementUserOpsKilled();
} else {
- arsc->incrUserOpsRunningBy();
+ arsc->incrementUserOpsRunning();
}
}
}
}
ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::AutoGetRstlForStepUpStepDown(
- ReplicationCoordinatorImpl* repl, OperationContext* opCtx, Date_t deadline)
- : _replCord(repl), _opCtx(opCtx) {
+ ReplicationCoordinatorImpl* repl,
+ OperationContext* opCtx,
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ Date_t deadline)
+ : _replCord(repl), _opCtx(opCtx), _stateTransition(stateTransition) {
invariant(_replCord && _opCtx);
+ // The state transition should never be rollback within this class.
+ invariant(_stateTransition != ReplicationCoordinator::OpsKillingStateTransitionEnum::kRollback);
+
// Enqueues RSTL in X mode.
_rstlLock.emplace(_opCtx, MODE_X, ReplicationStateTransitionLockGuard::EnqueueOnly());
@@ -1878,6 +1909,8 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn()
if (_stopKillingOps.wait_for(
lock, Milliseconds(10).toSystemDuration(), [this] { return _killSignaled; })) {
log() << "Stopped killing user operations";
+ _replCord->updateAndLogStateTransitionMetrics(
+ _stateTransition, getUserOpsKilled(), getUserOpsRunning());
_killSignaled = false;
return;
}
@@ -1898,11 +1931,19 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_stopAndWaitForKi
_killOpThread.reset();
}
+size_t ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::getUserOpsKilled() const {
+ return _userOpsKilled;
+}
+
+void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::incrementUserOpsKilled(size_t val) {
+ _userOpsKilled += val;
+}
+
size_t ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::getUserOpsRunning() const {
return _userOpsRunning;
}
-void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::incrUserOpsRunningBy(size_t val) {
+void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::incrementUserOpsRunning(size_t val) {
_userOpsRunning += val;
}
@@ -1948,7 +1989,8 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// fail if it does not acquire the lock immediately. In such a scenario, we use the
// stepDownUntil deadline instead.
auto deadline = force ? stepDownUntil : waitUntil;
- AutoGetRstlForStepUpStepDown arsd(this, opCtx, deadline);
+ AutoGetRstlForStepUpStepDown arsd(
+ this, opCtx, ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepDown, deadline);
stdx::unique_lock<Latch> lk(_mutex);
@@ -2067,7 +2109,6 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
yieldLocksForPreparedTransactions(opCtx);
lk.lock();
- _updateAndLogStatsOnStepDown(&arsd);
// Clear the node's election candidate metrics since it is no longer primary.
ReplicationMetrics::get(opCtx).clearElectionCandidateMetrics();
@@ -2637,7 +2678,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
// Primary node won't be electable or removed after the configuration change.
// So, finish the reconfig under RSTL, so that the step down occurs safely.
- arsd.emplace(this, opCtx);
+ arsd.emplace(this, opCtx, ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepDown);
lk.lock();
if (_topCoord->isSteppingDownUnconditionally()) {
@@ -2651,7 +2692,6 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
yieldLocksForPreparedTransactions(opCtx);
lk.lock();
- _updateAndLogStatsOnStepDown(&arsd.get());
// Clear the node's election candidate metrics since it is no longer primary.
ReplicationMetrics::get(opCtx).clearElectionCandidateMetrics();
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 78f1df824b0..529ce2335fd 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -339,6 +339,11 @@ public:
virtual void finishRecoveryIfEligible(OperationContext* opCtx) override;
+ virtual void updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const override;
+
// ================== Test support API ===================
/**
@@ -491,9 +496,11 @@ private:
// operations (user/system) and aborts stashed running transactions.
class AutoGetRstlForStepUpStepDown {
public:
- AutoGetRstlForStepUpStepDown(ReplicationCoordinatorImpl* repl,
- OperationContext* opCtx,
- Date_t deadline = Date_t::max());
+ AutoGetRstlForStepUpStepDown(
+ ReplicationCoordinatorImpl* repl,
+ OperationContext* opCtx,
+ ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ Date_t deadline = Date_t::max());
// Disallows copying.
AutoGetRstlForStepUpStepDown(const AutoGetRstlForStepUpStepDown&) = delete;
@@ -510,6 +517,16 @@ private:
void rstlReacquire();
/*
+ * Returns _userOpsKilled value.
+ */
+ size_t getUserOpsKilled() const;
+
+ /*
+ * Increments _userOpsKilled by val.
+ */
+ void incrementUserOpsKilled(size_t val = 1);
+
+ /*
* Returns _userOpsRunning value.
*/
size_t getUserOpsRunning() const;
@@ -517,7 +534,7 @@ private:
/*
* Increments _userOpsRunning by val.
*/
- void incrUserOpsRunningBy(size_t val = 1);
+ void incrementUserOpsRunning(size_t val = 1);
/*
* Returns the step up/step down opCtx.
@@ -570,7 +587,9 @@ private:
boost::optional<ReplicationStateTransitionLockGuard> _rstlLock;
// Thread that will run killOpThreadFn().
std::unique_ptr<stdx::thread> _killOpThread;
- // Tracks number of operations left running on step down.
+ // Tracks number of operations killed on step up / step down.
+ size_t _userOpsKilled = 0;
+ // Tracks number of operations left running on step up / step down.
size_t _userOpsRunning = 0;
// Protects killSignaled and stopKillingOps cond. variable.
Mutex _mutex = MONGO_MAKE_LATCH("AutoGetRstlForStepUpStepDown::_mutex");
@@ -578,6 +597,9 @@ private:
stdx::condition_variable _stopKillingOps;
// Once this is set to true, the killOpThreadFn method will terminate.
bool _killSignaled = false;
+ // The state transition that is in progress. Should never be set to rollback within this
+ // class.
+ ReplicationCoordinator::OpsKillingStateTransitionEnum _stateTransition;
};
struct Waiter {
@@ -1047,12 +1069,6 @@ private:
executor::TaskExecutor::EventHandle _stepDownStart();
/**
- * Update the "repl.stepDown.userOperationsRunning" counter and log number of operations
- * killed and left running on step down.
- */
- void _updateAndLogStatsOnStepDown(const AutoGetRstlForStepUpStepDown* arsd) const;
-
- /**
* kill all conflicting operations that are blocked either on prepare conflict or have taken
* global lock not in MODE_IS. The conflicting operations can be either user or system
* operations marked as killable.
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 4f97dce7ebe..a337b04507b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -390,7 +390,8 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
// kill all write operations which are no longer safe to run on step down. Also, operations that
// have taken global lock in S mode and operations blocked on prepare conflict will be killed to
// avoid 3-way deadlock between read, prepared transaction and step down thread.
- AutoGetRstlForStepUpStepDown arsd(this, opCtx.get());
+ AutoGetRstlForStepUpStepDown arsd(
+ this, opCtx.get(), ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepDown);
stdx::unique_lock<Latch> lk(_mutex);
// This node has already stepped down due to reconfig. So, signal anyone who is waiting on the
@@ -407,7 +408,6 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
yieldLocksForPreparedTransactions(opCtx.get());
lk.lock();
- _updateAndLogStatsOnStepDown(&arsd);
// Clear the node's election candidate metrics since it is no longer primary.
ReplicationMetrics::get(opCtx.get()).clearElectionCandidateMetrics();
@@ -619,7 +619,8 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// Primary node will be either unelectable or removed after the configuration change.
// So, finish the reconfig under RSTL, so that the step down occurs safely.
- arsd.emplace(this, opCtx.get());
+ arsd.emplace(
+ this, opCtx.get(), ReplicationCoordinator::OpsKillingStateTransitionEnum::kStepDown);
lk.lock();
if (_topCoord->isSteppingDownUnconditionally()) {
@@ -633,7 +634,6 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
yieldLocksForPreparedTransactions(opCtx.get());
lk.lock();
- _updateAndLogStatsOnStepDown(&arsd.get());
// Clear the node's election candidate metrics since it is no longer primary.
ReplicationMetrics::get(opCtx.get()).clearElectionCandidateMetrics();
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index eb09d7384be..c3a6135807c 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -562,5 +562,12 @@ void ReplicationCoordinatorMock::finishRecoveryIfEligible(OperationContext* opCt
return;
}
+void ReplicationCoordinatorMock::updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const {
+ return;
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 596776e1498..b857547662e 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -319,6 +319,11 @@ public:
virtual void finishRecoveryIfEligible(OperationContext* opCtx) override;
+ virtual void updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const override;
+
virtual void setCanAcceptNonLocalWrites(bool canAcceptNonLocalWrites);
private:
diff --git a/src/mongo/db/repl/replication_coordinator_noop.cpp b/src/mongo/db/repl/replication_coordinator_noop.cpp
index 44d45bada49..cde97b7c3d9 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.cpp
+++ b/src/mongo/db/repl/replication_coordinator_noop.cpp
@@ -470,5 +470,12 @@ void ReplicationCoordinatorNoOp::finishRecoveryIfEligible(OperationContext* opCt
MONGO_UNREACHABLE;
}
+void ReplicationCoordinatorNoOp::updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const {
+ MONGO_UNREACHABLE;
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_noop.h b/src/mongo/db/repl/replication_coordinator_noop.h
index e8390f6fc42..961f72bff02 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.h
+++ b/src/mongo/db/repl/replication_coordinator_noop.h
@@ -257,6 +257,11 @@ public:
void finishRecoveryIfEligible(OperationContext* opCtx) final;
+ void updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const final;
+
private:
ServiceContext* const _service;
};
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index f88a6cdd751..12a5bc38b03 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -267,13 +267,13 @@ bool RollbackImpl::_isInShutdown() const {
return _inShutdown;
}
-namespace {
-void killAllUserOperations(OperationContext* opCtx) {
+void RollbackImpl::_killAllUserOperations(OperationContext* opCtx) {
invariant(opCtx);
ServiceContext* serviceCtx = opCtx->getServiceContext();
invariant(serviceCtx);
int numOpsKilled = 0;
+ int numOpsRunning = 0;
for (ServiceContext::LockedClientsCursor cursor(serviceCtx); Client* client = cursor.next();) {
stdx::lock_guard<Client> lk(*client);
@@ -291,12 +291,17 @@ void killAllUserOperations(OperationContext* opCtx) {
if (toKill && !toKill->isKillPending()) {
serviceCtx->killOperation(lk, toKill, ErrorCodes::InterruptedDueToReplStateChange);
numOpsKilled++;
+ } else {
+ numOpsRunning++;
}
}
- log() << "Killed {} operation(s) while transitioning to ROLLBACK"_format(numOpsKilled);
+ // Update the metrics for tracking user operations during state transitions.
+ _replicationCoordinator->updateAndLogStateTransitionMetrics(
+ ReplicationCoordinator::OpsKillingStateTransitionEnum::kRollback,
+ numOpsKilled,
+ numOpsRunning);
}
-} // namespace
Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
invariant(opCtx);
@@ -312,7 +317,7 @@ Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
// Kill all user operations to ensure we can successfully acquire the RSTL. Since the node
// must be a secondary, this is only killing readers, whose connections will be closed
// shortly regardless.
- killAllUserOperations(opCtx);
+ _killAllUserOperations(opCtx);
rstlLock.waitForLockUntil(Date_t::max());
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 69dbb520161..d2a303c2e2c 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -349,6 +349,12 @@ private:
OperationContext* opCtx, RollBackLocalOperations::RollbackCommonPoint commonPoint) const;
/**
+ * Kills all user operations currently being performed. Since this node is a secondary, these
+ * operations are all reads.
+ */
+ void _killAllUserOperations(OperationContext* opCtx);
+
+ /**
* Uses the ReplicationCoordinator to transition the current member state to ROLLBACK.
* If the transition to ROLLBACK fails, this could mean that we have been elected PRIMARY. In
* this case, we return a NotSecondary error.
diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp
index 408b5c78ee6..c24d840e964 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.cpp
+++ b/src/mongo/embedded/replication_coordinator_embedded.cpp
@@ -496,5 +496,12 @@ void ReplicationCoordinatorEmbedded::finishRecoveryIfEligible(OperationContext*
UASSERT_NOT_IMPLEMENTED;
}
+void ReplicationCoordinatorEmbedded::updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const {
+ UASSERT_NOT_IMPLEMENTED;
+}
+
} // namespace embedded
} // namespace mongo
diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h
index 9fe16bf52dc..883d2578962 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.h
+++ b/src/mongo/embedded/replication_coordinator_embedded.h
@@ -265,6 +265,11 @@ public:
void finishRecoveryIfEligible(OperationContext* opCtx) override;
+ void updateAndLogStateTransitionMetrics(
+ const ReplicationCoordinator::OpsKillingStateTransitionEnum stateTransition,
+ const size_t numOpsKilled,
+ const size_t numOpsRunning) const override;
+
private:
// Back pointer to the ServiceContext that has started the instance.
ServiceContext* const _service;