diff options
author | Paolo Polato <paolo.polato@mongodb.com> | 2022-06-08 06:16:31 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-06-08 06:41:28 +0000 |
commit | daf78f2f252f09913ff0f4c716580dbfc8cdc7f3 (patch) | |
tree | 78922dbf87c9b0682fb5f4838c4c4a699dcd0d26 | |
parent | 266864d409101588abcad8ebe75dec0cd60bf11c (diff) | |
download | mongo-daf78f2f252f09913ff0f4c716580dbfc8cdc7f3.tar.gz |
SERVER-65371 Interrupt shardSvrMoveRange when the node steps down
-rw-r--r-- | src/mongo/db/s/shardsvr_move_range_command.cpp | 44 |
1 files changed, 24 insertions, 20 deletions
diff --git a/src/mongo/db/s/shardsvr_move_range_command.cpp b/src/mongo/db/s/shardsvr_move_range_command.cpp index 0eb40a7fd31..df6d900aa3c 100644 --- a/src/mongo/db/s/shardsvr_move_range_command.cpp +++ b/src/mongo/db/s/shardsvr_move_range_command.cpp @@ -32,6 +32,7 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/migration_source_manager.h" #include "mongo/db/s/sharding_state.h" @@ -84,7 +85,6 @@ public: void typedRun(OperationContext* opCtx) { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - opCtx->setAlwaysInterruptAtStepDownOrUp(); // Make sure we're as up-to-date as possible with shard information. This catches the // case where we might have changed a shard's host by removing/adding a shard with the @@ -113,26 +113,30 @@ public: tc->setSystemOperationKillableByStepdown(lk); } auto uniqueOpCtx = Client::getCurrent()->makeOperationContext(); - auto opCtx = uniqueOpCtx.get(); - - { - // Ensure that opCtx will get interrupted in the event of a - // stepdown. This is to ensure that the MigrationSourceManager - // checks that there are no pending migrationCoordinators documents - // (under the ActiveMigrationRegistry lock) on the same term during - // which the migrationCoordinators document will be persisted. - Lock::GlobalLock lk(opCtx, MODE_IX); - } - - // Note: This internal authorization is tied to the lifetime of the - // client. - AuthorizationSession::get(opCtx->getClient()) - ->grantInternalAuthorization(opCtx->getClient()); - + auto executorOpCtx = uniqueOpCtx.get(); Status status = {ErrorCodes::InternalError, "Uninitialized value"}; - try { - _runImpl(opCtx, std::move(req), std::move(writeConcern)); + executorOpCtx->setAlwaysInterruptAtStepDownOrUp(); + { + // Ensure that opCtx will get interrupted in the event of a + // stepdown. This is to ensure that the MigrationSourceManager + // checks that there are no pending migrationCoordinators + // documents (under the ActiveMigrationRegistry lock) on the + // same term during which the migrationCoordinators document + // will be persisted. + Lock::GlobalLock lk(executorOpCtx, MODE_IX); + uassert(ErrorCodes::InterruptedDueToReplStateChange, + "Not primary while attempting to start chunk migration " + "donation", + repl::ReplicationCoordinator::get(executorOpCtx) + ->getMemberState() + .primary()); + } + // Note: This internal authorization is tied to the lifetime of the + // client. + AuthorizationSession::get(executorOpCtx->getClient()) + ->grantInternalAuthorization(executorOpCtx->getClient()); + _runImpl(executorOpCtx, std::move(req), std::move(writeConcern)); status = Status::OK(); } catch (const DBException& e) { status = e.toStatus(); @@ -142,7 +146,7 @@ public: "error"_attr = redact(status)); if (status.code() == ErrorCodes::LockTimeout) { - ShardingStatistics::get(opCtx) + ShardingStatistics::get(executorOpCtx) .countDonorMoveChunkLockTimeout.addAndFetch(1); } } |