summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/sharding/txn_two_phase_commit_failover.js6
-rw-r--r--jstests/sharding/txn_two_phase_commit_killop.js3
-rw-r--r--src/mongo/db/transaction_coordinator_driver.cpp14
-rw-r--r--src/mongo/db/transaction_coordinator_futures_util.cpp46
4 files changed, 35 insertions, 34 deletions
diff --git a/jstests/sharding/txn_two_phase_commit_failover.js b/jstests/sharding/txn_two_phase_commit_failover.js
index ba185f33aed..85b6880fc97 100644
--- a/jstests/sharding/txn_two_phase_commit_failover.js
+++ b/jstests/sharding/txn_two_phase_commit_failover.js
@@ -158,8 +158,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
assert.commandWorked(coordPrimary.adminCommand({
configureFailPoint: failpointData.failpoint,
- mode: "alwaysOn",
- skip: (failpointData.skip ? failpointData.skip : 0),
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
}));
// Run commitTransaction through a parallel shell.
@@ -269,6 +268,9 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
failpointDataArr.forEach(function(failpointData) {
if (failpointData.failpoint == "hangWhileTargetingRemoteHost") {
failpointData.numTimesShouldBeHit++;
+ if (failpointData.skip) {
+ failpointData.skip++;
+ }
}
});
diff --git a/jstests/sharding/txn_two_phase_commit_killop.js b/jstests/sharding/txn_two_phase_commit_killop.js
index 415b99286b5..55245306b23 100644
--- a/jstests/sharding/txn_two_phase_commit_killop.js
+++ b/jstests/sharding/txn_two_phase_commit_killop.js
@@ -98,8 +98,7 @@
// Turn on failpoint to make the coordinator hang at a the specified point.
assert.commandWorked(coordinator.adminCommand({
configureFailPoint: failpointData.failpoint,
- mode: "alwaysOn",
- skip: (failpointData.skip ? failpointData.skip : 0),
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
}));
// Run commitTransaction through a parallel shell.
diff --git a/src/mongo/db/transaction_coordinator_driver.cpp b/src/mongo/db/transaction_coordinator_driver.cpp
index 157b3001bf6..6e048c731b7 100644
--- a/src/mongo/db/transaction_coordinator_driver.cpp
+++ b/src/mongo/db/transaction_coordinator_driver.cpp
@@ -165,8 +165,8 @@ void persistParticipantListBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWritingParticipantList)) {
LOG(0) << "Hit hangBeforeWritingParticipantList failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingParticipantList);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingParticipantList);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
@@ -233,9 +233,9 @@ void persistParticipantListBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWaitingForParticipantListWriteConcern)) {
LOG(0) << "Hit hangBeforeWaitingForParticipantListWriteConcern failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
+ opCtx, hangBeforeWaitingForParticipantListWriteConcern);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangBeforeWaitingForParticipantListWriteConcern);
WriteConcernResult unusedWCResult;
uassertStatusOK(
@@ -332,8 +332,8 @@ void persistDecisionBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWritingDecision)) {
LOG(0) << "Hit hangBeforeWritingDecision failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingDecision);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingDecision);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
@@ -421,9 +421,9 @@ void persistDecisionBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWaitingForDecisionWriteConcern)) {
LOG(0) << "Hit hangBeforeWaitingForDecisionWriteConcern failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
+ hangBeforeWaitingForDecisionWriteConcern);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- hangBeforeWaitingForDecisionWriteConcern);
WriteConcernResult unusedWCResult;
uassertStatusOK(
@@ -501,8 +501,8 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeDeletingCoordinatorDoc)) {
LOG(0) << "Hit hangBeforeDeletingCoordinatorDoc failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDeletingCoordinatorDoc);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDeletingCoordinatorDoc);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
diff --git a/src/mongo/db/transaction_coordinator_futures_util.cpp b/src/mongo/db/transaction_coordinator_futures_util.cpp
index 8d78428adc8..699af67c887 100644
--- a/src/mongo/db/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/transaction_coordinator_futures_util.cpp
@@ -92,33 +92,33 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork(
- [ this, shardId, commandObj = commandObj.getOwned() ](OperationContext * opCtx) {
- // Note: This internal authorization is tied to the lifetime of 'opCtx', which is
- // destroyed by 'scheduleWork' immediately after this lambda ends.
- AuthorizationSession::get(Client::getCurrent())->grantInternalAuthorization();
-
- if (MONGO_FAIL_POINT(hangWhileTargetingLocalHost)) {
- LOG(0) << "Hit hangWhileTargetingLocalHost failpoint";
- }
+ return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
+ opCtx) {
+ // Note: This internal authorization is tied to the lifetime of 'opCtx', which is
+ // destroyed by 'scheduleWork' immediately after this lambda ends.
+ AuthorizationSession::get(Client::getCurrent())->grantInternalAuthorization();
+
+ if (MONGO_FAIL_POINT(hangWhileTargetingLocalHost)) {
+ LOG(0) << "Hit hangWhileTargetingLocalHost failpoint";
MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingLocalHost);
+ }
- const auto service = opCtx->getServiceContext();
- auto start = _executor->now();
+ const auto service = opCtx->getServiceContext();
+ auto start = _executor->now();
- auto requestOpMsg =
- OpMsgRequest::fromDBAndBody(NamespaceString::kAdminDb, commandObj).serialize();
- const auto replyOpMsg = OpMsg::parseOwned(
- service->getServiceEntryPoint()->handleRequest(opCtx, requestOpMsg).response);
+ auto requestOpMsg =
+ OpMsgRequest::fromDBAndBody(NamespaceString::kAdminDb, commandObj).serialize();
+ const auto replyOpMsg = OpMsg::parseOwned(
+ service->getServiceEntryPoint()->handleRequest(opCtx, requestOpMsg).response);
- // Document sequences are not yet being used for responses.
- invariant(replyOpMsg.sequences.empty());
+ // Document sequences are not yet being used for responses.
+ invariant(replyOpMsg.sequences.empty());
- // 'ResponseStatus' is the response format of a remote request sent over the network
- // so we simulate that format manually here, since we sent the request over the
- // loopback.
- return ResponseStatus{replyOpMsg.body.getOwned(), _executor->now() - start};
- });
+ // 'ResponseStatus' is the response format of a remote request sent over the network
+ // so we simulate that format manually here, since we sent the request over the
+ // loopback.
+ return ResponseStatus{replyOpMsg.body.getOwned(), _executor->now() - start};
+ });
}
return _targetHostAsync(shardId, readPref)
@@ -214,8 +214,8 @@ Future<HostAndPort> AsyncWorkScheduler::_targetHostAsync(const ShardId& shardId,
if (MONGO_FAIL_POINT(hangWhileTargetingRemoteHost)) {
LOG(0) << "Hit hangWhileTargetingRemoteHost failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingRemoteHost);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingRemoteHost);
// TODO (SERVER-35678): Return a SemiFuture<HostAndPort> rather than using a blocking call
return shard->getTargeter()->findHostWithMaxWait(readPref, Seconds(20)).get(opCtx);