summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/mongod_main.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp27
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.h8
3 files changed, 38 insertions, 1 deletions
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 2fd444f818a..30b24b89db0 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -1315,6 +1315,10 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
sr->shutdown();
}
+ if (ShardingState::get(serviceContext)->enabled()) {
+ TransactionCoordinatorService::get(serviceContext)->shutdown();
+ }
+
// Validator shutdown must be called after setKillAllOperations is called. Otherwise, this can
// deadlock.
if (auto validator = LogicalTimeValidator::get(serviceContext)) {
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index 39f0ef4bef7..349db0ac079 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -179,6 +179,10 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
joinPreviousRound();
stdx::lock_guard<Latch> lg(_mutex);
+ if (_isShuttingDown) {
+ return;
+ }
+
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -264,12 +268,25 @@ void TransactionCoordinatorService::onStepDown() {
_catalogAndSchedulerToCleanup->onStepDown();
}
+void TransactionCoordinatorService::shutdown() {
+ {
+ stdx::lock_guard<Latch> lg(_mutex);
+ _isShuttingDown = true;
+ }
+
+ onStepDown();
+ joinPreviousRound();
+}
+
void TransactionCoordinatorService::onShardingInitialization(OperationContext* opCtx,
bool isPrimary) {
if (!isPrimary)
return;
stdx::lock_guard<Latch> lg(_mutex);
+ if (_isShuttingDown) {
+ return;
+ }
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -289,18 +306,26 @@ TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx)
}
void TransactionCoordinatorService::joinPreviousRound() {
+ stdx::unique_lock<Latch> ul(_mutex);
+
// onStepDown must have been called
invariant(!_catalogAndScheduler);
if (!_catalogAndSchedulerToCleanup)
return;
+ auto schedulerToCleanup = _catalogAndSchedulerToCleanup;
+
+ ul.unlock();
+
LOGV2(22454, "Waiting for coordinator tasks from previous term to complete");
// Block until all coordinators scheduled the previous time the service was primary to have
// drained. Because the scheduler was interrupted, it should be extremely rare for there to be
// any coordinators left, so if this actually causes blocking, it would most likely be a bug.
- _catalogAndSchedulerToCleanup->join();
+ schedulerToCleanup->join();
+
+ ul.lock();
_catalogAndSchedulerToCleanup.reset();
}
diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h
index f50a511a8a3..4f931b75383 100644
--- a/src/mongo/db/s/transaction_coordinator_service.h
+++ b/src/mongo/db/s/transaction_coordinator_service.h
@@ -103,6 +103,11 @@ public:
void onStepDown();
/**
+ * Shuts down this service. This will no longer be usable once shutdown is called.
+ */
+ void shutdown();
+
+ /**
* Called when an already established replica set is added as a shard to a cluster. Ensures that
* the TransactionCoordinator service is started up if the replica set is currently primary.
*/
@@ -151,6 +156,9 @@ private:
// The catalog + scheduler instantiated at the last step-up attempt. When nullptr, it means
// onStepUp has not been called yet after the last stepDown (or construction).
std::shared_ptr<CatalogAndScheduler> _catalogAndScheduler;
+
+ // Sets to false once shutdown was called at least once.
+ bool _isShuttingDown{false};
};
} // namespace mongo