summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2019-02-11 10:58:25 -0500
committerEsha Maharishi <esha.maharishi@mongodb.com>2019-02-11 22:30:57 -0500
commitd8cef1c71c1e9d8c59e354e08db121046af68c21 (patch)
tree7a44b2e765c6691879468cd8f907a3f50dfd0d87 /src
parente85c37fe4c1663ec2be1532b9dd94c2d19fd7cfc (diff)
downloadmongo-d8cef1c71c1e9d8c59e354e08db121046af68c21.tar.gz
SERVER-39474 Fix incorrect use of 'skip' parameter to configureFailPoint in txn_two_phase_commit_failover.js
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/transaction_coordinator_driver.cpp14
-rw-r--r--src/mongo/db/transaction_coordinator_futures_util.cpp46
2 files changed, 30 insertions, 30 deletions
diff --git a/src/mongo/db/transaction_coordinator_driver.cpp b/src/mongo/db/transaction_coordinator_driver.cpp
index 157b3001bf6..6e048c731b7 100644
--- a/src/mongo/db/transaction_coordinator_driver.cpp
+++ b/src/mongo/db/transaction_coordinator_driver.cpp
@@ -165,8 +165,8 @@ void persistParticipantListBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWritingParticipantList)) {
LOG(0) << "Hit hangBeforeWritingParticipantList failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingParticipantList);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingParticipantList);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
@@ -233,9 +233,9 @@ void persistParticipantListBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWaitingForParticipantListWriteConcern)) {
LOG(0) << "Hit hangBeforeWaitingForParticipantListWriteConcern failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
+ opCtx, hangBeforeWaitingForParticipantListWriteConcern);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangBeforeWaitingForParticipantListWriteConcern);
WriteConcernResult unusedWCResult;
uassertStatusOK(
@@ -332,8 +332,8 @@ void persistDecisionBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWritingDecision)) {
LOG(0) << "Hit hangBeforeWritingDecision failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingDecision);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingDecision);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
@@ -421,9 +421,9 @@ void persistDecisionBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeWaitingForDecisionWriteConcern)) {
LOG(0) << "Hit hangBeforeWaitingForDecisionWriteConcern failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
+ hangBeforeWaitingForDecisionWriteConcern);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- hangBeforeWaitingForDecisionWriteConcern);
WriteConcernResult unusedWCResult;
uassertStatusOK(
@@ -501,8 +501,8 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
if (MONGO_FAIL_POINT(hangBeforeDeletingCoordinatorDoc)) {
LOG(0) << "Hit hangBeforeDeletingCoordinatorDoc failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDeletingCoordinatorDoc);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDeletingCoordinatorDoc);
OperationSessionInfo sessionInfo;
sessionInfo.setSessionId(lsid);
diff --git a/src/mongo/db/transaction_coordinator_futures_util.cpp b/src/mongo/db/transaction_coordinator_futures_util.cpp
index 8d78428adc8..699af67c887 100644
--- a/src/mongo/db/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/transaction_coordinator_futures_util.cpp
@@ -92,33 +92,33 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork(
- [ this, shardId, commandObj = commandObj.getOwned() ](OperationContext * opCtx) {
- // Note: This internal authorization is tied to the lifetime of 'opCtx', which is
- // destroyed by 'scheduleWork' immediately after this lambda ends.
- AuthorizationSession::get(Client::getCurrent())->grantInternalAuthorization();
-
- if (MONGO_FAIL_POINT(hangWhileTargetingLocalHost)) {
- LOG(0) << "Hit hangWhileTargetingLocalHost failpoint";
- }
+ return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
+ opCtx) {
+ // Note: This internal authorization is tied to the lifetime of 'opCtx', which is
+ // destroyed by 'scheduleWork' immediately after this lambda ends.
+ AuthorizationSession::get(Client::getCurrent())->grantInternalAuthorization();
+
+ if (MONGO_FAIL_POINT(hangWhileTargetingLocalHost)) {
+ LOG(0) << "Hit hangWhileTargetingLocalHost failpoint";
MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingLocalHost);
+ }
- const auto service = opCtx->getServiceContext();
- auto start = _executor->now();
+ const auto service = opCtx->getServiceContext();
+ auto start = _executor->now();
- auto requestOpMsg =
- OpMsgRequest::fromDBAndBody(NamespaceString::kAdminDb, commandObj).serialize();
- const auto replyOpMsg = OpMsg::parseOwned(
- service->getServiceEntryPoint()->handleRequest(opCtx, requestOpMsg).response);
+ auto requestOpMsg =
+ OpMsgRequest::fromDBAndBody(NamespaceString::kAdminDb, commandObj).serialize();
+ const auto replyOpMsg = OpMsg::parseOwned(
+ service->getServiceEntryPoint()->handleRequest(opCtx, requestOpMsg).response);
- // Document sequences are not yet being used for responses.
- invariant(replyOpMsg.sequences.empty());
+ // Document sequences are not yet being used for responses.
+ invariant(replyOpMsg.sequences.empty());
- // 'ResponseStatus' is the response format of a remote request sent over the network
- // so we simulate that format manually here, since we sent the request over the
- // loopback.
- return ResponseStatus{replyOpMsg.body.getOwned(), _executor->now() - start};
- });
+ // 'ResponseStatus' is the response format of a remote request sent over the network
+ // so we simulate that format manually here, since we sent the request over the
+ // loopback.
+ return ResponseStatus{replyOpMsg.body.getOwned(), _executor->now() - start};
+ });
}
return _targetHostAsync(shardId, readPref)
@@ -214,8 +214,8 @@ Future<HostAndPort> AsyncWorkScheduler::_targetHostAsync(const ShardId& shardId,
if (MONGO_FAIL_POINT(hangWhileTargetingRemoteHost)) {
LOG(0) << "Hit hangWhileTargetingRemoteHost failpoint";
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingRemoteHost);
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingRemoteHost);
// TODO (SERVER-35678): Return a SemiFuture<HostAndPort> rather than using a blocking call
return shard->getTargeter()->findHostWithMaxWait(readPref, Seconds(20)).get(opCtx);