summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-10-17 00:51:52 +0000
committerevergreen <evergreen@mongodb.com>2019-10-17 00:51:52 +0000
commit27ed83ca30107c8e39417ba1dfed5ec0dd8b859d (patch)
tree399e5b548782b6c84627389f94ddb61909e2746a /src/mongo/db/repl
parentb37eb88c8ae801751711dd54f2506d3561989db7 (diff)
downloadmongo-27ed83ca30107c8e39417ba1dfed5ec0dd8b859d.tar.gz
SERVER-43987 Require predicates with OperationContext::waitForConditionOrInterrupt()
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp42
1 files changed, 26 insertions, 16 deletions
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index d67ade26d70..7103ecf2864 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1382,27 +1382,36 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
return {ErrorCodes::ShutdownInProgress, "Shutdown in progress"};
}
- // If we are doing a majority committed read we only need to wait for a new snapshot.
if (isMajorityCommittedRead) {
+ // If we are doing a majority committed read we only need to wait for a new snapshot to
+ // update getCurrentOpTime() past targetOpTime. This block should only run once and
+ // return without further looping.
+
LOG(3) << "waitUntilOpTime: waiting for a new snapshot until " << opCtx->getDeadline();
- auto waitStatus =
- opCtx->waitForConditionOrInterruptNoAssert(_currentCommittedSnapshotCond, lock);
- if (!waitStatus.isOK()) {
- return waitStatus.withContext(
+ try {
+ opCtx->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock, [&] {
+ return _inShutdown || (targetOpTime <= getCurrentOpTime());
+ });
+ } catch (const DBException& e) {
+ return e.toStatus().withContext(
str::stream() << "Error waiting for snapshot not less than "
<< targetOpTime.toString() << ", current relevant optime is "
<< getCurrentOpTime().toString() << ".");
}
- if (!_currentCommittedSnapshot) {
- // It is possible for the thread to be awoken due to a spurious wakeup, meaning
- // the condition variable was never set.
- LOG(3) << "waitUntilOpTime: awoken but current committed snapshot is null."
- << " Continuing to wait for new snapshot.";
- } else {
+
+ if (_inShutdown) {
+ return {ErrorCodes::ShutdownInProgress, "Shutdown in progress"};
+ }
+
+ if (_currentCommittedSnapshot) {
+ // It seems that targetOpTime can sometimes be default OpTime{}. When there is no
+ // _currentCommittedSnapshot, _getCurrentCommittedSnapshotOpTime_inlock() and thus
+ // getCurrentOpTime() also return default OpTime{}. Hence this branch that only runs
+ // if _currentCommittedSnapshot actually exists.
LOG(3) << "Got notified of new snapshot: " << _currentCommittedSnapshot->toString();
}
- continue;
+ return Status::OK();
}
// We just need to wait for the opTime to catch up to what we need (not majority RC).
@@ -4003,10 +4012,11 @@ void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* op
uassert(ErrorCodes::NotYetInitialized,
"Cannot use snapshots until replica set is finished initializing.",
_rsConfigState != kConfigUninitialized && _rsConfigState != kConfigInitiating);
- while (!_currentCommittedSnapshot ||
- _currentCommittedSnapshot->opTime.getTimestamp() < untilSnapshot) {
- opCtx->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock);
- }
+
+ opCtx->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock, [&] {
+ return _currentCommittedSnapshot &&
+ _currentCommittedSnapshot->opTime.getTimestamp() >= untilSnapshot;
+ });
}
size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {