summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/read_concern_mongod.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
3 files changed, 18 insertions, 4 deletions
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 0967968159b..fb41d7122c6 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -34,6 +34,7 @@
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/curop_failpoint_helpers.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
@@ -51,6 +52,8 @@ namespace mongo {
namespace {
+MONGO_FAIL_POINT_DEFINE(hangBeforeLinearizableReadConcern);
+
/**
* Synchronize writeRequests
*/
@@ -344,6 +347,12 @@ MONGO_REGISTER_SHIM(waitForReadConcern)
MONGO_REGISTER_SHIM(waitForLinearizableReadConcern)(OperationContext* opCtx)->Status {
+ CurOpFailpointHelpers::waitWhileFailPointEnabled(
+ &hangBeforeLinearizableReadConcern, opCtx, "hangBeforeLinearizableReadConcern", [opCtx]() {
+ log() << "batch update - hangBeforeLinearizableReadConcern fail point enabled. "
+ "Blocking until fail point is disabled.";
+ });
+
repl::ReplicationCoordinator* replCoord =
repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
@@ -370,6 +379,7 @@ MONGO_REGISTER_SHIM(waitForLinearizableReadConcern)(OperationContext* opCtx)->St
repl::OpTime lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
auto awaitReplResult = replCoord->awaitReplication(opCtx, lastOpApplied, wc);
+
if (awaitReplResult.status == ErrorCodes::WriteConcernFailed) {
return Status(ErrorCodes::LinearizableReadConcernError,
"Failed to confirm that read was linearizable.");
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 65ecfe92aa5..287cf9bae41 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -97,6 +97,8 @@ MONGO_FAIL_POINT_DEFINE(stepdownHangBeforePerformingPostMemberStateUpdateActions
MONGO_FAIL_POINT_DEFINE(transitionToPrimaryHangBeforeTakingGlobalExclusiveLock);
MONGO_FAIL_POINT_DEFINE(holdStableTimestampAtSpecificTimestamp);
+MONGO_EXPORT_SERVER_PARAMETER(closeConnectionsOnStepdown, bool, true);
+
using CallbackArgs = executor::TaskExecutor::CallbackArgs;
using CallbackFn = executor::TaskExecutor::CallbackFn;
using CallbackHandle = executor::TaskExecutor::CallbackHandle;
@@ -2591,7 +2593,7 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator(WithLock l
invariant(!_readWriteAbility->canAcceptNonLocalWrites(lk));
serverGlobalParams.validateFeaturesAsMaster.store(false);
- result = kActionCloseAllConnections;
+ result = kActionSteppedDownOrRemoved;
} else {
result = kActionFollowerModeStateChange;
}
@@ -2696,8 +2698,10 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
case kActionFollowerModeStateChange:
_onFollowerModeStateChange();
break;
- case kActionCloseAllConnections:
- _externalState->closeConnections();
+ case kActionSteppedDownOrRemoved:
+ if (closeConnectionsOnStepdown.load()) {
+ _externalState->closeConnections();
+ }
_externalState->shardingOnStepDownHook();
_externalState->stopNoopWriter();
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index ab6e2c4dd62..4e81b40b93a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -439,7 +439,7 @@ private:
*/
enum PostMemberStateUpdateAction {
kActionNone,
- kActionCloseAllConnections, // Also indicates that we should clear sharding state.
+ kActionSteppedDownOrRemoved,
kActionFollowerModeStateChange,
kActionStartSingleNodeElection
};