summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/primary_only_service.md2
-rw-r--r--jstests/replsets/tenant_migration_abort_forget_retry.js2
-rw-r--r--jstests/replsets/tenant_migration_donor_interrupt_on_stepdown_and_shutdown.js2
-rw-r--r--jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js2
-rw-r--r--jstests/replsets/tenant_migration_donor_try_abort.js6
-rw-r--r--src/mongo/base/error_codes.yml18
-rw-r--r--src/mongo/client/dbclient_rs.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter.h6
-rw-r--r--src/mongo/client/remote_command_targeter_factory_mock.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter_mock.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter_mock.h4
-rw-r--r--src/mongo/client/remote_command_targeter_rs.cpp6
-rw-r--r--src/mongo/client/remote_command_targeter_rs.h4
-rw-r--r--src/mongo/client/remote_command_targeter_standalone.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter_standalone.h4
-rw-r--r--src/mongo/client/replica_set_monitor_integration_test.cpp2
-rw-r--r--src/mongo/client/replica_set_monitor_interface.h10
-rw-r--r--src/mongo/client/scanning_replica_set_monitor.cpp8
-rw-r--r--src/mongo/client/scanning_replica_set_monitor.h12
-rw-r--r--src/mongo/client/server_ping_monitor.cpp4
-rw-r--r--src/mongo/client/streamable_replica_set_monitor.cpp18
-rw-r--r--src/mongo/client/streamable_replica_set_monitor.h8
-rw-r--r--src/mongo/db/cancelable_operation_context.cpp4
-rw-r--r--src/mongo/db/cancelable_operation_context.h18
-rw-r--r--src/mongo/db/cancelable_operation_context_test.cpp34
-rw-r--r--src/mongo/db/operation_context.cpp5
-rw-r--r--src/mongo/db/operation_context.h10
-rw-r--r--src/mongo/db/operation_context_test.cpp44
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp2
-rw-r--r--src/mongo/db/repl/primary_only_service.cpp4
-rw-r--r--src/mongo/db/repl/primary_only_service.h26
-rw-r--r--src/mongo/db/repl/primary_only_service_test.cpp12
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_access_blocker_util.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp76
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.h36
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp32
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.h6
-rw-r--r--src/mongo/db/repl/tenant_migration_util.cpp8
-rw-r--r--src/mongo/db/repl/tenant_migration_util.h2
-rw-r--r--src/mongo/db/repl/wait_for_majority_service.cpp12
-rw-r--r--src/mongo/db/repl/wait_for_majority_service.h4
-rw-r--r--src/mongo/db/repl/wait_for_majority_service_test.cpp50
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.h2
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp2
-rw-r--r--src/mongo/db/s/drop_database_coordinator.h2
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.h2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp102
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp16
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.h10
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.h2
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp22
-rw-r--r--src/mongo/db/s/resharding_test_commands.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp8
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp2
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h4
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp2
-rw-r--r--src/mongo/executor/cancelable_executor.h17
-rw-r--r--src/mongo/executor/cancelable_executor_test.cpp18
-rw-r--r--src/mongo/executor/network_interface.h2
-rw-r--r--src/mongo/executor/network_interface_mock.h2
-rw-r--r--src/mongo/executor/network_interface_tl.cpp6
-rw-r--r--src/mongo/executor/task_executor.cpp18
-rw-r--r--src/mongo/executor/task_executor.h22
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp50
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp8
-rw-r--r--src/mongo/executor/thread_pool_task_executor_integration_test.cpp6
-rw-r--r--src/mongo/executor/thread_pool_task_executor_test.cpp2
-rw-r--r--src/mongo/s/async_requests_sender.cpp2
-rw-r--r--src/mongo/s/mongos_main.cpp4
-rw-r--r--src/mongo/transport/session_asio.h6
-rw-r--r--src/mongo/util/README.md82
-rw-r--r--src/mongo/util/SConscript6
-rw-r--r--src/mongo/util/cancellation.h (renamed from src/mongo/util/cancelation.h)148
-rw-r--r--src/mongo/util/cancellation_bm.cpp (renamed from src/mongo/util/cancelation_bm.cpp)40
-rw-r--r--src/mongo/util/cancellation_test.cpp (renamed from src/mongo/util/cancelation_test.cpp)69
-rw-r--r--src/mongo/util/fail_point.h6
-rw-r--r--src/mongo/util/fail_point_test.cpp4
-rw-r--r--src/mongo/util/future_util.h22
-rw-r--r--src/mongo/util/future_util_test.cpp91
-rw-r--r--src/mongo/util/out_of_line_executor.h2
-rw-r--r--src/mongo/util/read_through_cache.h4
102 files changed, 717 insertions, 713 deletions
diff --git a/docs/primary_only_service.md b/docs/primary_only_service.md
index 36660b141a1..415d9da5100 100644
--- a/docs/primary_only_service.md
+++ b/docs/primary_only_service.md
@@ -91,7 +91,7 @@ Instance is explicitly interrupted, so that it can unblock any work running on t
*not* a part of an executor owned by the PrimaryOnlyService that are dependent on that Instance
signaling them (e.g. commands that are waiting on the Instance to reach a certain state). Currently
this happens via a call to an interrupt() method that each Instance must override, but in the future
-this is likely to change to signaling a CancelationToken owned by the Instance instead.
+this is likely to change to signaling a CancellationToken owned by the Instance instead.
## Instance lifetime
diff --git a/jstests/replsets/tenant_migration_abort_forget_retry.js b/jstests/replsets/tenant_migration_abort_forget_retry.js
index ed90b73fb3e..138957cc62f 100644
--- a/jstests/replsets/tenant_migration_abort_forget_retry.js
+++ b/jstests/replsets/tenant_migration_abort_forget_retry.js
@@ -89,7 +89,7 @@ if (!tenantMigrationTest.isFeatureFlagEnabled()) {
assert.soon(() => {
const res = assert.commandWorked(donorPrimary.adminCommand(
{currentOp: true, desc: "tenant donor migration", tenantId: tenantId}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
fp.off();
diff --git a/jstests/replsets/tenant_migration_donor_interrupt_on_stepdown_and_shutdown.js b/jstests/replsets/tenant_migration_donor_interrupt_on_stepdown_and_shutdown.js
index 7f33b401254..f6a5c3011e2 100644
--- a/jstests/replsets/tenant_migration_donor_interrupt_on_stepdown_and_shutdown.js
+++ b/jstests/replsets/tenant_migration_donor_interrupt_on_stepdown_and_shutdown.js
@@ -146,7 +146,7 @@ function testDonorAbortMigrationInterrupt(interruptFunc, verifyCmdResponseFunc,
assert.soon(() => {
const res = assert.commandWorked(
donorPrimary.adminCommand({currentOp: true, desc: "tenant donor migration"}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
interruptFunc(donorRst, migrationId, migrationOpts.tenantId);
diff --git a/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js b/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js
index 8e915d9e724..ed5e9aafec5 100644
--- a/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js
+++ b/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js
@@ -252,7 +252,7 @@ function testDonorAbortMigrationInterrupt(interruptFunc, fpName, isShutdown = fa
assert.soon(() => {
const res = assert.commandWorked(
donorPrimary.adminCommand({currentOp: true, desc: "tenant donor migration"}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
interruptFunc(donorRst);
diff --git a/jstests/replsets/tenant_migration_donor_try_abort.js b/jstests/replsets/tenant_migration_donor_try_abort.js
index cd243fb35e8..491e8c4937a 100644
--- a/jstests/replsets/tenant_migration_donor_try_abort.js
+++ b/jstests/replsets/tenant_migration_donor_try_abort.js
@@ -242,7 +242,7 @@ const migrationX509Options = TenantMigrationUtil.makeX509OptionsForTest();
assert.soon(() => {
const res = assert.commandWorked(tmt.getDonorPrimary().adminCommand(
{currentOp: true, desc: "tenant donor migration", tenantId: tenantId}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
barrierBeforeFetchingKeys.off();
@@ -351,7 +351,7 @@ const migrationX509Options = TenantMigrationUtil.makeX509OptionsForTest();
assert.soon(() => {
const res = assert.commandWorked(donorPrimary.adminCommand(
{currentOp: true, desc: "tenant donor migration", tenantId: tenantId}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
fp.off();
@@ -399,7 +399,7 @@ const migrationX509Options = TenantMigrationUtil.makeX509OptionsForTest();
assert.soon(() => {
const res = assert.commandWorked(donorPrimary.adminCommand(
{currentOp: true, desc: "tenant donor migration", tenantId: tenantId}));
- return res.inprog[0].receivedCancelation;
+ return res.inprog[0].receivedCancellation;
});
fp.off();
diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml
index 4f2ff964a5a..63f44f460c1 100644
--- a/src/mongo/base/error_codes.yml
+++ b/src/mongo/base/error_codes.yml
@@ -12,9 +12,9 @@ error_categories:
- NeedRetargettingError
- WriteConcernError
- ShutdownError
- # isCancelationError() includes all codes that,when passed to a function as its parameter
+ # isCancellationError() includes all codes that,when passed to a function as its parameter
# indicates that it cannot be executed as normal and must abort its intended work.
- - CancelationError
+ - CancellationError
- ConnectionFatalMessageParseError
- ExceededTimeLimitError
- SnapshotError
@@ -122,10 +122,10 @@ error_codes:
- {code: 87,name: CannotSplit}
- {code: 88,name: OBSOLETE_SplitFailed}
- {code: 89,name: NetworkTimeout,categories: [NetworkError,RetriableError,NetworkTimeoutError]}
- - {code: 90,name: CallbackCanceled,categories: [CancelationError]}
+ - {code: 90,name: CallbackCanceled,categories: [CancellationError]}
- {code: 91,name: ShutdownInProgress,
extra: ShutdownInProgressQuiesceInfo,
- categories: [ShutdownError,CancelationError,RetriableError],
+ categories: [ShutdownError,CancellationError,RetriableError],
extraIsOptional: True}
- {code: 92,name: SecondaryAheadOfPrimary}
- {code: 93,name: InvalidReplicaSetConfig}
@@ -368,10 +368,10 @@ error_codes:
- {code: 307,name: RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist}
- {code: 308,name: CurrentConfigNotCommittedYet}
- {code: 309,name: ExhaustCommandFinished}
- - {code: 310,name: PeriodicJobIsStopped,categories: [CancelationError]}
+ - {code: 310,name: PeriodicJobIsStopped,categories: [CancellationError]}
- {code: 311,name: TransactionCoordinatorCanceled,categories: [InternalOnly]}
- - {code: 312,name: OperationIsKilledAndDelisted,categories: [CancelationError,InternalOnly]}
+ - {code: 312,name: OperationIsKilledAndDelisted,categories: [CancellationError,InternalOnly]}
- {code: 313,name: ResumableRangeDeleterDisabled}
- {code: 314,name: ObjectIsBusy}
@@ -409,7 +409,7 @@ error_codes:
- {code: 332, name: CannotDowngrade}
- - {code: 333, name: ServiceExecutorInShutdown, categories: [ShutdownError,CancelationError,InternalOnly]}
+ - {code: 333, name: ServiceExecutorInShutdown, categories: [ShutdownError,CancellationError,InternalOnly]}
- {code: 334, name: MechanismUnavailable}
- {code: 335, name: TenantMigrationForgotten}
@@ -429,7 +429,7 @@ error_codes:
- {code: 10107,name: NotWritablePrimary,categories: [NotPrimaryError,RetriableError]}
- {code: 10334,name: BSONObjectTooLarge}
- {code: 11000,name: DuplicateKey,extra: DuplicateKeyErrorInfo}
- - {code: 11600,name: InterruptedAtShutdown,categories: [Interruption,ShutdownError,CancelationError,RetriableError]}
+ - {code: 11600,name: InterruptedAtShutdown,categories: [Interruption,ShutdownError,CancellationError,RetriableError]}
- {code: 11601,name: Interrupted,categories: [Interruption]}
- {code: 11602,name: InterruptedDueToReplStateChange,categories: [Interruption,NotPrimaryError,RetriableError]}
- {code: 12586,name: BackgroundOperationInProgressForDatabase}
@@ -444,5 +444,5 @@ error_codes:
- {code: 13436,name: NotPrimaryOrSecondary,categories: [NotPrimaryError,RetriableError]}
- {code: 14031,name: OutOfDiskSpace}
- {code: 17280,name: OBSOLETE_KeyTooLong}
- - {code: 46841,name: ClientMarkedKilled,categories: [Interruption,CancelationError]}
+ - {code: 46841,name: ClientMarkedKilled,categories: [Interruption,CancellationError]}
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index c0aa64f50d5..a1dab200294 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -423,7 +423,7 @@ Status DBClientReplicaSet::connect() {
// Returns true if there are any up hosts.
const ReadPreferenceSetting anyUpHost(ReadPreference::Nearest, TagSet());
return _getMonitor()
- ->getHostOrRefresh(anyUpHost, CancelationToken::uncancelable())
+ ->getHostOrRefresh(anyUpHost, CancellationToken::uncancelable())
.getNoThrow()
.getStatus();
}
@@ -763,7 +763,7 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
ReplicaSetMonitorPtr monitor = _getMonitor();
auto selectedNodeStatus =
- monitor->getHostOrRefresh(*readPref, CancelationToken::uncancelable()).getNoThrow();
+ monitor->getHostOrRefresh(*readPref, CancellationToken::uncancelable()).getNoThrow();
if (!selectedNodeStatus.isOK()) {
LOGV2_DEBUG(20138,
3,
diff --git a/src/mongo/client/remote_command_targeter.h b/src/mongo/client/remote_command_targeter.h
index 0b88d8fd12c..a457bb74944 100644
--- a/src/mongo/client/remote_command_targeter.h
+++ b/src/mongo/client/remote_command_targeter.h
@@ -75,10 +75,10 @@ public:
* an OperationContext is available.
*/
virtual SemiFuture<HostAndPort> findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) = 0;
+ const CancellationToken& cancelToken) = 0;
- virtual SemiFuture<std::vector<HostAndPort>> findHosts(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) = 0;
+ virtual SemiFuture<std::vector<HostAndPort>> findHosts(
+ const ReadPreferenceSetting& readPref, const CancellationToken& cancelToken) = 0;
/**
* Reports to the targeter that a 'status' indicating a not primary error was received when
diff --git a/src/mongo/client/remote_command_targeter_factory_mock.cpp b/src/mongo/client/remote_command_targeter_factory_mock.cpp
index 257927a9e4d..478dbfd96ec 100644
--- a/src/mongo/client/remote_command_targeter_factory_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_factory_mock.cpp
@@ -48,12 +48,12 @@ public:
}
SemiFuture<HostAndPort> findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override {
+ const CancellationToken& cancelToken) override {
return _mock->findHost(readPref, cancelToken);
}
SemiFuture<std::vector<HostAndPort>> findHosts(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override {
+ const CancellationToken& cancelToken) override {
return _mock->findHosts(readPref, cancelToken);
}
diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp
index ab546c2d597..6ae7c0dabce 100644
--- a/src/mongo/client/remote_command_targeter_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_mock.cpp
@@ -60,7 +60,7 @@ StatusWith<HostAndPort> RemoteCommandTargeterMock::findHost(OperationContext* op
}
SemiFuture<HostAndPort> RemoteCommandTargeterMock::findHost(const ReadPreferenceSetting&,
- const CancelationToken&) {
+ const CancellationToken&) {
if (!_findHostReturnValue.isOK()) {
return _findHostReturnValue.getStatus();
}
@@ -69,7 +69,7 @@ SemiFuture<HostAndPort> RemoteCommandTargeterMock::findHost(const ReadPreference
}
SemiFuture<std::vector<HostAndPort>> RemoteCommandTargeterMock::findHosts(
- const ReadPreferenceSetting&, const CancelationToken&) {
+ const ReadPreferenceSetting&, const CancellationToken&) {
return _findHostReturnValue;
}
diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h
index 4fc971cfe07..49ae1302c63 100644
--- a/src/mongo/client/remote_command_targeter_mock.h
+++ b/src/mongo/client/remote_command_targeter_mock.h
@@ -56,10 +56,10 @@ public:
* Returns ErrorCodes::InternalError if setFindHostReturnValue was never called.
*/
SemiFuture<HostAndPort> findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
SemiFuture<std::vector<HostAndPort>> findHosts(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) override;
diff --git a/src/mongo/client/remote_command_targeter_rs.cpp b/src/mongo/client/remote_command_targeter_rs.cpp
index 8c3c66c099d..f1b99732569 100644
--- a/src/mongo/client/remote_command_targeter_rs.cpp
+++ b/src/mongo/client/remote_command_targeter_rs.cpp
@@ -65,12 +65,12 @@ ConnectionString RemoteCommandTargeterRS::connectionString() {
}
SemiFuture<HostAndPort> RemoteCommandTargeterRS::findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
return _rsMonitor->getHostOrRefresh(readPref, cancelToken);
}
SemiFuture<std::vector<HostAndPort>> RemoteCommandTargeterRS::findHosts(
- const ReadPreferenceSetting& readPref, const CancelationToken& cancelToken) {
+ const ReadPreferenceSetting& readPref, const CancellationToken& cancelToken) {
return _rsMonitor->getHostsOrRefresh(readPref, cancelToken);
}
@@ -86,7 +86,7 @@ StatusWith<HostAndPort> RemoteCommandTargeterRS::findHost(OperationContext* opCt
// See comment in remote_command_targeter.h for details.
bool maxTimeMsLesser = (opCtx->getRemainingMaxTimeMillis() < Milliseconds(Seconds(20)));
auto swHostAndPort =
- _rsMonitor->getHostOrRefresh(readPref, opCtx->getCancelationToken()).getNoThrow(opCtx);
+ _rsMonitor->getHostOrRefresh(readPref, opCtx->getCancellationToken()).getNoThrow(opCtx);
if (maxTimeMsLesser && swHostAndPort.getStatus() == ErrorCodes::FailedToSatisfyReadPreference) {
return Status(ErrorCodes::MaxTimeMSExpired, "operation timed out");
diff --git a/src/mongo/client/remote_command_targeter_rs.h b/src/mongo/client/remote_command_targeter_rs.h
index 5dfe156f3b5..49c2b6850f6 100644
--- a/src/mongo/client/remote_command_targeter_rs.h
+++ b/src/mongo/client/remote_command_targeter_rs.h
@@ -57,10 +57,10 @@ public:
const ReadPreferenceSetting& readPref) override;
SemiFuture<std::vector<HostAndPort>> findHosts(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
SemiFuture<HostAndPort> findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
void markHostNotPrimary(const HostAndPort& host, const Status& status) override;
diff --git a/src/mongo/client/remote_command_targeter_standalone.cpp b/src/mongo/client/remote_command_targeter_standalone.cpp
index a62803c8fd0..d719f1fd44f 100644
--- a/src/mongo/client/remote_command_targeter_standalone.cpp
+++ b/src/mongo/client/remote_command_targeter_standalone.cpp
@@ -44,12 +44,12 @@ ConnectionString RemoteCommandTargeterStandalone::connectionString() {
}
SemiFuture<HostAndPort> RemoteCommandTargeterStandalone::findHost(
- const ReadPreferenceSetting& readPref, const CancelationToken& cancelToken) {
+ const ReadPreferenceSetting& readPref, const CancellationToken& cancelToken) {
return {_hostAndPort};
}
SemiFuture<std::vector<HostAndPort>> RemoteCommandTargeterStandalone::findHosts(
- const ReadPreferenceSetting& readPref, const CancelationToken& cancelToken) {
+ const ReadPreferenceSetting& readPref, const CancellationToken& cancelToken) {
return {{_hostAndPort}};
}
diff --git a/src/mongo/client/remote_command_targeter_standalone.h b/src/mongo/client/remote_command_targeter_standalone.h
index 25a43e83ce4..05c7bcbc545 100644
--- a/src/mongo/client/remote_command_targeter_standalone.h
+++ b/src/mongo/client/remote_command_targeter_standalone.h
@@ -48,11 +48,11 @@ public:
const ReadPreferenceSetting& readPref) override;
SemiFuture<HostAndPort> findHost(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
SemiFuture<std::vector<HostAndPort>> findHosts(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
void markHostNotPrimary(const HostAndPort& host, const Status& status) override;
diff --git a/src/mongo/client/replica_set_monitor_integration_test.cpp b/src/mongo/client/replica_set_monitor_integration_test.cpp
index 6a8877e0ae4..e8b51797431 100644
--- a/src/mongo/client/replica_set_monitor_integration_test.cpp
+++ b/src/mongo/client/replica_set_monitor_integration_test.cpp
@@ -160,7 +160,7 @@ TEST_F(ReplicaSetMonitorFixture, StreamableRSMWireVersion) {
// Schedule isMaster requests and wait for the responses.
auto primaryFuture =
rsm->getHostOrRefresh(ReadPreferenceSetting(mongo::ReadPreference::PrimaryOnly),
- CancelationToken::uncancelable());
+ CancellationToken::uncancelable());
primaryFuture.get();
ASSERT_EQ(rsm->getMinWireVersion(), WireVersion::LATEST_WIRE_VERSION);
diff --git a/src/mongo/client/replica_set_monitor_interface.h b/src/mongo/client/replica_set_monitor_interface.h
index 0e79c892dda..d632830cc79 100644
--- a/src/mongo/client/replica_set_monitor_interface.h
+++ b/src/mongo/client/replica_set_monitor_interface.h
@@ -36,7 +36,7 @@
#include "mongo/client/mongo_uri.h"
#include "mongo/client/replica_set_change_notifier.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/duration.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -82,20 +82,20 @@ public:
*/
virtual SemiFuture<HostAndPort> getHostOrRefresh(const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) = 0;
+ const CancellationToken& cancelToken) = 0;
SemiFuture<HostAndPort> getHostOrRefresh(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
return getHostOrRefresh(readPref, {} /* excludedHosts */, cancelToken);
}
virtual SemiFuture<std::vector<HostAndPort>> getHostsOrRefresh(
const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) = 0;
+ const CancellationToken& cancelToken) = 0;
SemiFuture<std::vector<HostAndPort>> getHostsOrRefresh(const ReadPreferenceSetting& readPref,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
return getHostsOrRefresh(readPref, {} /* excludedHosts */, cancelToken);
}
diff --git a/src/mongo/client/scanning_replica_set_monitor.cpp b/src/mongo/client/scanning_replica_set_monitor.cpp
index 682e4151759..daef814d55c 100644
--- a/src/mongo/client/scanning_replica_set_monitor.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor.cpp
@@ -213,7 +213,7 @@ template <typename Callback>
auto ScanningReplicaSetMonitor::SetState::scheduleWorkAt(Date_t when, Callback&& cb) const {
auto wrappedCallback = [cb = std::forward<Callback>(cb),
anchor = shared_from_this()](const CallbackArgs& cbArgs) mutable {
- if (ErrorCodes::isCancelationError(cbArgs.status)) {
+ if (ErrorCodes::isCancellationError(cbArgs.status)) {
// Do no more work if we're removed or canceled
return;
}
@@ -309,7 +309,7 @@ void ScanningReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy s
SemiFuture<HostAndPort> ScanningReplicaSetMonitor::getHostOrRefresh(
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken&) {
+ const CancellationToken&) {
return _getHostsOrRefresh(
criteria, ReplicaSetMonitorInterface::kDefaultFindHostTimeout, excludedHosts)
.then([](const auto& hosts) {
@@ -322,7 +322,7 @@ SemiFuture<HostAndPort> ScanningReplicaSetMonitor::getHostOrRefresh(
SemiFuture<std::vector<HostAndPort>> ScanningReplicaSetMonitor::getHostsOrRefresh(
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken&) {
+ const CancellationToken&) {
return _getHostsOrRefresh(
criteria, ReplicaSetMonitorInterface::kDefaultFindHostTimeout, excludedHosts)
.semi();
@@ -367,7 +367,7 @@ Future<std::vector<HostAndPort>> ScanningReplicaSetMonitor::_getHostsOrRefresh(
HostAndPort ScanningReplicaSetMonitor::getPrimaryOrUassert() {
return ReplicaSetMonitorInterface::getHostOrRefresh(kPrimaryOnlyReadPreference,
- CancelationToken::uncancelable())
+ CancellationToken::uncancelable())
.get();
}
diff --git a/src/mongo/client/scanning_replica_set_monitor.h b/src/mongo/client/scanning_replica_set_monitor.h
index 9161b143124..094db3e3300 100644
--- a/src/mongo/client/scanning_replica_set_monitor.h
+++ b/src/mongo/client/scanning_replica_set_monitor.h
@@ -72,21 +72,21 @@ public:
void drop() override;
/**
- * NOTE: Cancelation via CancelationTokens is not implemented for the ScanningReplicaSetMonitor,
- * so any token passed in will be ignored.
+ * NOTE: Cancellation via CancellationTokens is not implemented for the
+ * ScanningReplicaSetMonitor, so any token passed in will be ignored.
*/
SemiFuture<HostAndPort> getHostOrRefresh(const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken&) override;
+ const CancellationToken&) override;
/**
- * NOTE: Cancelation via CancelationTokens is not implemented for the ScanningReplicaSetMonitor,
- * so any token passed in will be ignored.
+ * NOTE: Cancellation via CancellationTokens is not implemented for the
+ * ScanningReplicaSetMonitor, so any token passed in will be ignored.
*/
SemiFuture<std::vector<HostAndPort>> getHostsOrRefresh(
const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken&) override;
+ const CancellationToken&) override;
HostAndPort getPrimaryOrUassert() override;
diff --git a/src/mongo/client/server_ping_monitor.cpp b/src/mongo/client/server_ping_monitor.cpp
index ae3527eb152..bc2df5b21d8 100644
--- a/src/mongo/client/server_ping_monitor.cpp
+++ b/src/mongo/client/server_ping_monitor.cpp
@@ -83,7 +83,7 @@ template <typename Callback>
auto SingleServerPingMonitor::_scheduleWorkAt(Date_t when, Callback&& cb) const {
auto wrappedCallback = [cb = std::forward<Callback>(cb),
anchor = shared_from_this()](const CallbackArgs& cbArgs) mutable {
- if (ErrorCodes::isCancelationError(cbArgs.status)) {
+ if (ErrorCodes::isCancellationError(cbArgs.status)) {
return;
}
@@ -145,7 +145,7 @@ void SingleServerPingMonitor::_doServerPing() {
std::move(request),
[anchor = shared_from_this(),
timer = Timer()](const executor::TaskExecutor::RemoteCommandCallbackArgs& result) mutable {
- if (ErrorCodes::isCancelationError(result.response.status)) {
+ if (ErrorCodes::isCancellationError(result.response.status)) {
// Do no more work if the SingleServerPingMonitor is removed or the request is
// canceled.
return;
diff --git a/src/mongo/client/streamable_replica_set_monitor.cpp b/src/mongo/client/streamable_replica_set_monitor.cpp
index 0fd60accebe..591469aeb50 100644
--- a/src/mongo/client/streamable_replica_set_monitor.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor.cpp
@@ -304,7 +304,7 @@ void StreamableReplicaSetMonitor::drop() {
SemiFuture<HostAndPort> StreamableReplicaSetMonitor::getHostOrRefresh(
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
return getHostsOrRefresh(criteria, excludedHosts, cancelToken)
.thenRunOn(_executor)
.then([self = shared_from_this()](const std::vector<HostAndPort>& result) {
@@ -326,7 +326,7 @@ std::vector<HostAndPort> StreamableReplicaSetMonitor::_extractHosts(
SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::getHostsOrRefresh(
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
// In the fast case (stable topology), we avoid mutex acquisition.
if (_isDropped.load()) {
return makeReplicaSetMonitorRemovedError(getName());
@@ -353,7 +353,7 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::getHostsOrRefr
"replicaSet"_attr = getName(),
"readPref"_attr = readPrefToStringFull(criteria));
- // Fail fast on timeout or cancelation.
+ // Fail fast on timeout or cancellation.
const Date_t& now = _executor->now();
if (deadline <= now || cancelToken.isCanceled()) {
return makeUnsatisfiedReadPrefError(getName(), criteria);
@@ -385,7 +385,7 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
WithLock,
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken,
+ const CancellationToken& cancelToken,
const Date_t& deadline) {
auto query = std::make_shared<HostQuery>();
@@ -397,7 +397,7 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
// Make the deadline task cancelable for when the query is satisfied or when the input
// cancelToken is canceled.
- query->deadlineCancelSource = CancelationSource(cancelToken);
+ query->deadlineCancelSource = CancellationSource(cancelToken);
query->start = _executor->now();
// Add the query to the list of outstanding queries.
@@ -407,11 +407,11 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
// It will be removed as a listener when all waiting queries have been satisfied.
_eventsPublisher->registerListener(_queryProcessor);
- // After a deadline or when the input cancelation token is canceled, cancel this query. If the
+ // After a deadline or when the input cancellation token is canceled, cancel this query. If the
// query completes first, the deadlineCancelSource will be used to cancel this task.
_executor->sleepUntil(deadline, query->deadlineCancelSource.token())
.getAsync([this, query, queryIter, self = shared_from_this(), cancelToken](Status status) {
- // If the deadline was reached or cancelation occurred on the input cancelation token,
+ // If the deadline was reached or cancellation occurred on the input cancellation token,
// mark the query as canceled. Otherwise, the deadlineCancelSource must have been
// canceled due to the query completing successfully.
if (status.isOK() || cancelToken.isCanceled()) {
@@ -456,7 +456,7 @@ boost::optional<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_getHosts
HostAndPort StreamableReplicaSetMonitor::getPrimaryOrUassert() {
return ReplicaSetMonitorInterface::getHostOrRefresh(kPrimaryOnlyReadPreference,
- CancelationToken::uncancelable())
+ CancellationToken::uncancelable())
.get();
}
@@ -799,7 +799,7 @@ void StreamableReplicaSetMonitor::_processOutstanding(
// Iterate through the outstanding queries and try to resolve them via calls to _getHosts. If we
// succeed in resolving a query, the query is removed from the list. If a query has already been
- // canceled, or there are no results, it will be skipped. Cancelation logic elsewhere will
+ // canceled, or there are no results, it will be skipped. Cancellation logic elsewhere will
// handle removing the canceled queries from the list.
while (it != _outstandingQueries.end()) {
auto& query = *it;
diff --git a/src/mongo/client/streamable_replica_set_monitor.h b/src/mongo/client/streamable_replica_set_monitor.h
index 8134d6bd8bf..8d716ddaf8e 100644
--- a/src/mongo/client/streamable_replica_set_monitor.h
+++ b/src/mongo/client/streamable_replica_set_monitor.h
@@ -97,12 +97,12 @@ public:
SemiFuture<HostAndPort> getHostOrRefresh(const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
SemiFuture<std::vector<HostAndPort>> getHostsOrRefresh(
const ReadPreferenceSetting& readPref,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken) override;
+ const CancellationToken& cancelToken) override;
HostAndPort getPrimaryOrUassert() override;
@@ -178,7 +178,7 @@ private:
return !wasAlreadyDone;
}
- CancelationSource deadlineCancelSource;
+ CancellationSource deadlineCancelSource;
ReadPreferenceSetting criteria;
@@ -206,7 +206,7 @@ private:
WithLock,
const ReadPreferenceSetting& criteria,
const std::vector<HostAndPort>& excludedHosts,
- const CancelationToken& cancelToken,
+ const CancellationToken& cancelToken,
const Date_t& deadline);
// Removes the query pointed to by iter and returns an iterator to the next item in the list.
diff --git a/src/mongo/db/cancelable_operation_context.cpp b/src/mongo/db/cancelable_operation_context.cpp
index c929f33f9e6..c605d892d92 100644
--- a/src/mongo/db/cancelable_operation_context.cpp
+++ b/src/mongo/db/cancelable_operation_context.cpp
@@ -34,12 +34,12 @@
#include "mongo/db/client.h"
#include "mongo/db/operation_context.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
namespace mongo {
CancelableOperationContext::CancelableOperationContext(ServiceContext::UniqueOperationContext opCtx,
- const CancelationToken& cancelToken,
+ const CancellationToken& cancelToken,
ExecutorPtr executor)
: _sharedBlock{std::make_shared<SharedBlock>()},
_opCtx{std::move(opCtx)},
diff --git a/src/mongo/db/cancelable_operation_context.h b/src/mongo/db/cancelable_operation_context.h
index ca9493a58c2..ee4225e2b5b 100644
--- a/src/mongo/db/cancelable_operation_context.h
+++ b/src/mongo/db/cancelable_operation_context.h
@@ -38,28 +38,28 @@
namespace mongo {
-class CancelationToken;
+class CancellationToken;
class OperationContext;
/**
* Wrapper class around an OperationContext that calls markKilled(ErrorCodes::CallbackCanceled) when
- * the supplied CancelationToken is canceled.
+ * the supplied CancellationToken is canceled.
*
- * This class is useful for having an OperationContext be interrupted when a CancelationToken is
- * canceled. Note that OperationContext::getCancelationToken() is instead useful for having a
- * CancelationToken be canceled when an OperationContext is interrupted. The combination of the two
- * enables bridging between OperationContext interruption and CancelationToken cancellation
+ * This class is useful for having an OperationContext be interrupted when a CancellationToken is
+ * canceled. Note that OperationContext::getCancellationToken() is instead useful for having a
+ * CancellationToken be canceled when an OperationContext is interrupted. The combination of the two
+ * enables bridging between OperationContext interruption and CancellationToken cancellation
* arbitrarily.
*
* IMPORTANT: Executors are allowed to refuse work. markKilled(ErrorCodes::CallbackCanceled) won't
- * be called when the supplied CancelationToken is canceled if the task executor has already been
+ * be called when the supplied CancellationToken is canceled if the task executor has already been
* shut down, for example. Use a task executor bound to the process lifetime if you must guarantee
- * that the OperationContext is interrupted when the CancelationToken is canceled.
+ * that the OperationContext is interrupted when the CancellationToken is canceled.
*/
class CancelableOperationContext {
public:
CancelableOperationContext(ServiceContext::UniqueOperationContext opCtx,
- const CancelationToken& cancelToken,
+ const CancellationToken& cancelToken,
ExecutorPtr executor);
CancelableOperationContext(const CancelableOperationContext&) = delete;
diff --git a/src/mongo/db/cancelable_operation_context_test.cpp b/src/mongo/db/cancelable_operation_context_test.cpp
index bcdf7f5e22d..ca680e77e63 100644
--- a/src/mongo/db/cancelable_operation_context_test.cpp
+++ b/src/mongo/db/cancelable_operation_context_test.cpp
@@ -77,21 +77,21 @@ TEST_F(CancelableOperationContextTest, ActsAsNormalOperationContext) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
auto opCtx = CancelableOperationContext{
- client->makeOperationContext(), CancelationToken::uncancelable(), executor()};
+ client->makeOperationContext(), CancellationToken::uncancelable(), executor()};
ASSERT_EQ(opCtx->getClient(), client.get());
ASSERT_EQ(opCtx.get()->getClient(), client.get());
- // The CancelationSource underlying the OperationContext* is unassociated with the one supplied
+ // The CancellationSource underlying the OperationContext* is unassociated with the one supplied
// to the CancelableOperationContext constructor.
- ASSERT_TRUE(opCtx->getCancelationToken().isCancelable());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCancelable());
}
-TEST_F(CancelableOperationContextTest, KilledWhenCancelationSourceIsCanceled) {
+TEST_F(CancelableOperationContextTest, KilledWhenCancellationSourceIsCanceled) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto opCtx = CancelableOperationContext{
client->makeOperationContext(), cancelSource.token(), executor()};
@@ -102,18 +102,18 @@ TEST_F(CancelableOperationContextTest, KilledWhenCancelationSourceIsCanceled) {
ASSERT_EQ(opCtx->checkForInterruptNoAssert(), ErrorCodes::CallbackCanceled);
}
-TEST_F(CancelableOperationContextTest, SafeWhenCancelationSourceIsCanceledUnderClientMutex) {
+TEST_F(CancelableOperationContextTest, SafeWhenCancellationSourceIsCanceledUnderClientMutex) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto opCtx = CancelableOperationContext{
client->makeOperationContext(), cancelSource.token(), executor()};
ASSERT_OK(opCtx->checkForInterruptNoAssert());
{
- // Holding the Client mutex while canceling the CancelationSource won't lead to
+ // Holding the Client mutex while canceling the CancellationSource won't lead to
// self-deadlock.
stdx::lock_guard<Client> lk(*opCtx->getClient());
cancelSource.cancel();
@@ -122,11 +122,11 @@ TEST_F(CancelableOperationContextTest, SafeWhenCancelationSourceIsCanceledUnderC
ASSERT_EQ(opCtx->checkForInterruptNoAssert(), ErrorCodes::CallbackCanceled);
}
-TEST_F(CancelableOperationContextTest, SafeWhenDestructedBeforeCancelationSourceIsCanceled) {
+TEST_F(CancelableOperationContextTest, SafeWhenDestructedBeforeCancellationSourceIsCanceled) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
boost::optional<CancelableOperationContext> opCtx;
opCtx.emplace(client->makeOperationContext(), cancelSource.token(), executor());
@@ -134,11 +134,11 @@ TEST_F(CancelableOperationContextTest, SafeWhenDestructedBeforeCancelationSource
cancelSource.cancel();
}
-TEST_F(CancelableOperationContextTest, NotKilledWhenCancelationSourceIsDestructed) {
+TEST_F(CancelableOperationContextTest, NotKilledWhenCancellationSourceIsDestructed) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- boost::optional<CancelationSource> cancelSource;
+ boost::optional<CancellationSource> cancelSource;
cancelSource.emplace();
auto opCtx = CancelableOperationContext{
client->makeOperationContext(), cancelSource->token(), executor()};
@@ -150,11 +150,11 @@ TEST_F(CancelableOperationContextTest, NotKilledWhenCancelationSourceIsDestructe
}
TEST_F(CancelableOperationContextTest,
- NotKilledWhenCancelationSourceIsCanceledAndTaskExecutorAlreadyShutDown) {
+ NotKilledWhenCancellationSourceIsCanceledAndTaskExecutorAlreadyShutDown) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto opCtx = CancelableOperationContext{
client->makeOperationContext(), cancelSource.token(), executor()};
@@ -165,12 +165,12 @@ TEST_F(CancelableOperationContextTest,
ASSERT_OK(opCtx->checkForInterruptNoAssert());
}
-TEST_F(CancelableOperationContextTest, SafeWhenOperationContextOwnCancelationTokenIsUsed) {
+TEST_F(CancelableOperationContextTest, SafeWhenOperationContextOwnCancellationTokenIsUsed) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
auto opCtx = client->makeOperationContext();
- auto cancelToken = opCtx->getCancelationToken();
+ auto cancelToken = opCtx->getCancellationToken();
auto cancelableOpCtx = CancelableOperationContext{std::move(opCtx), cancelToken, executor()};
ASSERT_OK(cancelableOpCtx->checkForInterruptNoAssert());
@@ -189,7 +189,7 @@ TEST_F(CancelableOperationContextTest, SafeWhenOperationContextKilledManually) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("CancelableOperationContextTest");
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto opCtx = CancelableOperationContext{
client->makeOperationContext(), cancelSource.token(), executor()};
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 765536b4929..cb97d6a197a 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -292,8 +292,9 @@ Status OperationContext::checkForInterruptNoAssert() noexcept {
// - _baton is notified (someone's queuing work for the baton)
// - _baton::run returns (timeout fired / networking is ready / socket disconnected)
//
-// We release the lock held by m whenever we call markKilled, since it may trigger CancelationSource
-// cancelation which can in turn emplace a SharedPromise which then may acquire a mutex.
+// We release the lock held by m whenever we call markKilled, since it may trigger
+// CancellationSource cancellation which can in turn emplace a SharedPromise which then may acquire
+// a mutex.
StatusWith<stdx::cv_status> OperationContext::waitForConditionOrInterruptNoAssertUntil(
stdx::condition_variable& cv, BasicLockableAdapter m, Date_t deadline) noexcept {
invariant(getClient());
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index f72b9d95992..44f393d91a8 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -46,7 +46,7 @@
#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/transport/session.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/decorable.h"
#include "mongo/util/fail_point.h"
@@ -235,11 +235,11 @@ public:
}
/**
- * Returns a CancelationToken that will be canceled when the OperationContext is killed via
+ * Returns a CancellationToken that will be canceled when the OperationContext is killed via
* markKilled (including for internal reasons, like the OperationContext deadline being
* reached).
*/
- CancelationToken getCancelationToken() {
+ CancellationToken getCancellationToken() {
return _cancelSource.token();
}
@@ -640,9 +640,9 @@ private:
// once from OK to some kill code.
AtomicWord<ErrorCodes::Error> _killCode{ErrorCodes::OK};
- // Used to cancel all tokens obtained via getCancelationToken() when this OperationContext is
+ // Used to cancel all tokens obtained via getCancellationToken() when this OperationContext is
// killed.
- CancelationSource _cancelSource;
+ CancellationSource _cancelSource;
BatonHandle _baton;
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index d7d538455d2..e06c084101a 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -257,11 +257,11 @@ TEST(OperationContextTest, setIsExecutingShutdownWorks) {
ASSERT_OK(opCtx->getKillStatus());
}
-TEST(OperationContextTest, CancelationTokenIsCanceledWhenMarkKilledIsCalled) {
+TEST(OperationContextTest, CancellationTokenIsCanceledWhenMarkKilledIsCalled) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("OperationContextTest");
auto opCtx = client->makeOperationContext();
- auto cancelToken = opCtx->getCancelationToken();
+ auto cancelToken = opCtx->getCancellationToken();
// Should not be canceled yet.
ASSERT_FALSE(cancelToken.isCanceled());
@@ -272,11 +272,11 @@ TEST(OperationContextTest, CancelationTokenIsCanceledWhenMarkKilledIsCalled) {
ASSERT_TRUE(cancelToken.isCanceled());
}
-TEST(OperationContextTest, CancelationTokenIsCancelableAtFirst) {
+TEST(OperationContextTest, CancellationTokenIsCancelableAtFirst) {
auto serviceCtx = ServiceContext::make();
auto client = serviceCtx->makeClient("OperationContextTest");
auto opCtx = client->makeOperationContext();
- auto cancelToken = opCtx->getCancelationToken();
+ auto cancelToken = opCtx->getCancellationToken();
ASSERT_TRUE(cancelToken.isCancelable());
}
@@ -329,12 +329,12 @@ TEST_F(OperationDeadlineTests, OperationDeadlineExpiration) {
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->checkForInterruptNoAssert());
}
-TEST_F(OperationDeadlineTests, CancelationTokenIsCanceledAfterDeadlineExpires) {
+TEST_F(OperationDeadlineTests, CancellationTokenIsCanceledAfterDeadlineExpires) {
auto opCtx = client->makeOperationContext();
const Seconds timeout{1};
opCtx->setDeadlineAfterNowBy(timeout, ErrorCodes::ExceededTimeLimit);
- auto cancelToken = opCtx->getCancelationToken();
+ auto cancelToken = opCtx->getCancellationToken();
// Should not be canceled yet.
ASSERT_FALSE(cancelToken.isCanceled());
@@ -343,7 +343,7 @@ TEST_F(OperationDeadlineTests, CancelationTokenIsCanceledAfterDeadlineExpires) {
mockClock->advance(timeout * 2);
// This is required for the OperationContext to realize that the timeout has passed and mark
- // itself killed, which is what triggers cancelation.
+ // itself killed, which is what triggers cancellation.
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->checkForInterruptNoAssert());
// Should be canceled now.
@@ -351,12 +351,12 @@ TEST_F(OperationDeadlineTests, CancelationTokenIsCanceledAfterDeadlineExpires) {
}
TEST_F(OperationDeadlineTests,
- WaitingOnAFutureWithAnOperationContextThatHasCancelationCallbacksDoesNotDeadlock) {
+ WaitingOnAFutureWithAnOperationContextThatHasCancellationCallbacksDoesNotDeadlock) {
auto opCtx = client->makeOperationContext();
const Seconds timeout{1};
opCtx->setDeadlineAfterNowBy(timeout, ErrorCodes::ExceededTimeLimit);
- auto cancelToken = opCtx->getCancelationToken();
+ auto cancelToken = opCtx->getCancellationToken();
// Should not be canceled yet.
ASSERT_FALSE(cancelToken.isCanceled());
@@ -364,7 +364,7 @@ TEST_F(OperationDeadlineTests,
// Advance past the timeout.
mockClock->advance(timeout * 2);
- // Chain a callback to the token. This will mean that calling cancel() on the CancelationSource
+ // Chain a callback to the token. This will mean that calling cancel() on the CancellationSource
// will eventually have to acquire a mutex when fulfilling its SharedPromie.
auto fut = cancelToken.onCancel().unsafeToInlineFuture().then([] {});
@@ -420,11 +420,11 @@ TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCV) {
auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::unique_lock<Latch> lk(m);
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
ASSERT_THROWS_CODE(opCtx->waitForConditionOrInterrupt(cv, lk, [] { return false; }),
DBException,
ErrorCodes::ExceededTimeLimit);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
@@ -433,12 +433,12 @@ TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::unique_lock<Latch> lk(m);
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
ASSERT_THROWS_CODE(opCtx->waitForConditionOrInterruptUntil(
cv, lk, mockClock->now() + Seconds{10}, [] { return false; }),
DBException,
ErrorCodes::ExceededTimeLimit);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
TEST_F(OperationDeadlineTests, NestedTimeoutsTimeoutInOrder) {
@@ -729,12 +729,12 @@ TEST_F(OperationDeadlineTests, DuringWaitMaxTimeExpirationDominatesUntilExpirati
auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::unique_lock<Latch> lk(m);
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
ASSERT_THROWS_CODE(
opCtx->waitForConditionOrInterruptUntil(cv, lk, mockClock->now(), [] { return false; }),
DBException,
ErrorCodes::ExceededTimeLimit);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
class ThreadedOperationDeadlineTests : public OperationDeadlineTests {
@@ -862,13 +862,13 @@ TEST_F(ThreadedOperationDeadlineTests, KillArrivesWhileWaiting) {
auto waiterResult = startWaiter(opCtx.get(), &state);
ASSERT(stdx::future_status::ready !=
waiterResult.wait_for(Milliseconds::zero().toSystemDuration()));
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
{
stdx::lock_guard<Client> clientLock(*opCtx->getClient());
opCtx->markKilled();
}
ASSERT_THROWS_CODE(waiterResult.get(), DBException, ErrorCodes::Interrupted);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
TEST_F(ThreadedOperationDeadlineTests, MaxTimeExpiresWhileWaiting) {
@@ -885,10 +885,10 @@ TEST_F(ThreadedOperationDeadlineTests, MaxTimeExpiresWhileWaiting) {
mockClock->advance(Seconds{9});
ASSERT(stdx::future_status::ready !=
waiterResult.wait_for(Milliseconds::zero().toSystemDuration()));
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
mockClock->advance(Seconds{2});
ASSERT_THROWS_CODE(waiterResult.get(), DBException, ErrorCodes::ExceededTimeLimit);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
TEST_F(ThreadedOperationDeadlineTests, UntilExpiresWhileWaiting) {
@@ -1061,7 +1061,7 @@ TEST(OperationContextTest, TestWaitForConditionOrInterruptUntilAPI) {
Date_t deadline = Date_t::now() + Milliseconds(500);
ASSERT_EQ(opCtx->waitForConditionOrInterruptUntil(cv, lk, deadline, [] { return false; }),
false);
- ASSERT_FALSE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_FALSE(opCtx->getCancellationToken().isCanceled());
// Case (3). Expect an error of `MaxTimeMSExpired`.
opCtx->setDeadlineByDate(Date_t::now(), ErrorCodes::MaxTimeMSExpired);
@@ -1070,7 +1070,7 @@ TEST(OperationContextTest, TestWaitForConditionOrInterruptUntilAPI) {
opCtx->waitForConditionOrInterruptUntil(cv, lk, deadline, [] { return false; }),
DBException,
ErrorCodes::MaxTimeMSExpired);
- ASSERT_TRUE(opCtx->getCancelationToken().isCanceled());
+ ASSERT_TRUE(opCtx->getCancellationToken().isCanceled());
}
TEST(OperationContextTest, TestIsWaitingForConditionOrInterrupt) {
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
index 8038e7bbcb5..52568382913 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
@@ -109,7 +109,7 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont
try {
killAllExpiredTransactions(opCtx.get());
- } catch (ExceptionForCat<ErrorCategory::CancelationError>& ex) {
+ } catch (ExceptionForCat<ErrorCategory::CancellationError>& ex) {
LOGV2_DEBUG(4684101, 2, "Periodic job canceled", "{reason}"_attr = ex.reason());
}
},
diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp
index c2d81d786eb..7ccd7830582 100644
--- a/src/mongo/db/repl/primary_only_service.cpp
+++ b/src/mongo/db/repl/primary_only_service.cpp
@@ -338,7 +338,7 @@ void PrimaryOnlyService::onStepUp(const OpTime& stepUpOpTime) {
str::stream() << "term " << newTerm << " is not greater than " << _term);
_term = newTerm;
_state = State::kRebuilding;
- _source = CancelationSource();
+ _source = CancellationSource();
// Install a new executor, while moving the old one into 'newThenOldScopedExecutor' so it
// can be accessed outside of _mutex.
@@ -678,7 +678,7 @@ void PrimaryOnlyService::_rebuildInstances(long long term) noexcept {
std::shared_ptr<PrimaryOnlyService::Instance> PrimaryOnlyService::_insertNewInstance(
WithLock wl, std::shared_ptr<Instance> instance, InstanceID instanceID) {
- CancelationSource instanceSource(_source.token());
+ CancellationSource instanceSource(_source.token());
auto instanceCompleteFuture =
ExecutorFuture<void>(**_scopedExecutor)
.then([serviceName = getServiceName(),
diff --git a/src/mongo/db/repl/primary_only_service.h b/src/mongo/db/repl/primary_only_service.h
index ddcd0cfb46d..4b9a82a4b5f 100644
--- a/src/mongo/db/repl/primary_only_service.h
+++ b/src/mongo/db/repl/primary_only_service.h
@@ -43,7 +43,7 @@
#include "mongo/executor/scoped_task_executor.h"
#include "mongo/executor/task_executor.h"
#include "mongo/platform/mutex.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point.h"
@@ -116,11 +116,11 @@ public:
* lifetime by getting a shared_ptr via 'shared_from_this' or else the Instance may be
* destroyed out from under them.
*
- * 2. On stepdown/shutdown of a PrimaryOnlyService, the input cancelation token will be
+ * 2. On stepdown/shutdown of a PrimaryOnlyService, the input cancellation token will be
* marked canceled.
*/
virtual SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept = 0;
+ const CancellationToken& token) noexcept = 0;
/**
* This is the function that is called when this running Instance needs to be interrupted.
@@ -338,7 +338,7 @@ private:
class ActiveInstance {
public:
ActiveInstance(std::shared_ptr<Instance> instance,
- CancelationSource source,
+ CancellationSource source,
SemiFuture<void> instanceComplete)
: _instance(std::move(instance)),
_instanceComplete(std::move(instanceComplete)),
@@ -374,13 +374,13 @@ private:
// A future that will be resolved when the passed in Instance has finished running.
const SemiFuture<void> _instanceComplete;
- // Each instance of a PrimaryOnlyService will own a CancelationSource for memory management
- // purposes. Any memory associated with an instance's CancelationSource will be cleaned up
+ // Each instance of a PrimaryOnlyService will own a CancellationSource for memory management
+ // purposes. Any memory associated with an instance's CancellationSource will be cleaned up
// upon the destruction of an instance. It must be instantiated from a token from the
- // CancelationSource of the PrimaryOnlyService class in order to attain a hierarchical
- // ownership pattern that allows for cancelation token clean up if the PrimaryOnlyService is
- // shutdown/stepdown.
- CancelationSource _source;
+ // CancellationSource of the PrimaryOnlyService class in order to attain a hierarchical
+ // ownership pattern that allows for cancellation token clean up if the PrimaryOnlyService
+ // is shutdown/stepdown.
+ CancellationSource _source;
};
/*
@@ -399,7 +399,7 @@ private:
* state machine collection.
*/
virtual ExecutorFuture<void> _rebuildService(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return ExecutorFuture<void>(**executor, Status::OK());
};
@@ -479,9 +479,9 @@ private:
// A set of OpCtxs running on Client threads associated with this PrimaryOnlyService.
stdx::unordered_set<OperationContext*> _opCtxs; // (M)
- // CancelationSource used on stepdown/shutdown to cancel work in all running instances of a
+ // CancellationSource used on stepdown/shutdown to cancel work in all running instances of a
// PrimaryOnlyService.
- CancelationSource _source;
+ CancellationSource _source;
};
/**
diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp
index f293e1c1203..56d14b1f468 100644
--- a/src/mongo/db/repl/primary_only_service_test.cpp
+++ b/src/mongo/db/repl/primary_only_service_test.cpp
@@ -101,7 +101,7 @@ public:
_service(service) {}
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override {
+ const CancellationToken& token) noexcept override {
if (MONGO_unlikely(TestServiceHangDuringInitialization.shouldFail())) {
TestServiceHangDuringInitialization.pauseWhileSet();
}
@@ -163,7 +163,7 @@ public:
}
void interrupt(Status status) override {
- // Currently unused. Functionality has been put into cancelation logic.
+ // Currently unused. Functionality has been put into cancellation logic.
}
// Whether or not an op is reported depends on the "reportOp" field of the state doc the
@@ -263,7 +263,7 @@ public:
private:
ExecutorFuture<void> _rebuildService(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) override {
+ const CancellationToken& token) override {
auto nss = getStateDocumentsNS();
AllowOpCtxWhenServiceRebuildingBlock allowOpCtxBlock(Client::getCurrent());
@@ -363,7 +363,7 @@ DEATH_TEST_F(PrimaryOnlyServiceTest,
registry.registerService(std::move(service2));
}
-TEST_F(PrimaryOnlyServiceTest, CancelationOnStepdown) {
+TEST_F(PrimaryOnlyServiceTest, CancellationOnStepdown) {
// Used to ensure that _scheduleRun is run before we run the stepdown logic so that we fulfill
// the _completionPromise.
auto timesEntered = TestServiceHangDuringInitialization.setMode(FailPoint::alwaysOn);
@@ -380,7 +380,7 @@ TEST_F(PrimaryOnlyServiceTest, CancelationOnStepdown) {
ASSERT_EQ(instance->getCompletionFuture().getNoThrow().code(), ErrorCodes::Interrupted);
}
-TEST_F(PrimaryOnlyServiceTest, ResetCancelationSourceOnStepupAndCompleteSuccessfully) {
+TEST_F(PrimaryOnlyServiceTest, ResetCancellationSourceOnStepupAndCompleteSuccessfully) {
{
// Used to ensure that _scheduleRun is run before we run the stepdown logic so that we
// fulfill the _completionPromise.
@@ -410,7 +410,7 @@ TEST_F(PrimaryOnlyServiceTest, ResetCancelationSourceOnStepupAndCompleteSuccessf
}
}
-TEST_F(PrimaryOnlyServiceTest, ResetCancelationSourceOnStepupAndStepDownAgain) {
+TEST_F(PrimaryOnlyServiceTest, ResetCancellationSourceOnStepupAndStepDownAgain) {
{
// Used to ensure that _scheduleRun is run before we run the stepdown logic so that we
// fulfill the _completionPromise.
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index 3a8708455fd..fd5c6d4b53b 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -111,7 +111,7 @@ Status RollbackChecker::reset_sync() {
if (!cbh.isOK()) {
return Status(ErrorCodes::CallbackCanceled,
- "RollbackChecker reset failed due to callback cancelation");
+ "RollbackChecker reset failed due to callback cancellation");
}
_executor->wait(cbh.getValue());
diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
index c8bbb3045ee..754b4127726 100644
--- a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
@@ -139,7 +139,7 @@ SemiFuture<void> checkIfCanReadOrBlock(OperationContext* opCtx, StringData dbNam
}
// Source to cancel the timeout if the operation completed in time.
- CancelationSource cancelTimeoutSource;
+ CancellationSource cancelTimeoutSource;
auto canReadFuture = mtab->getCanReadFuture(opCtx);
diff --git a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
index 242dae39b44..7e0b428a274 100644
--- a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/repl/tenant_migration_conflict_info.h"
#include "mongo/db/repl/tenant_migration_donor_access_blocker.h"
#include "mongo/logv2/log.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/future_util.h"
@@ -82,7 +82,7 @@ Status TenantMigrationDonorAccessBlocker::checkIfCanWrite() {
Status TenantMigrationDonorAccessBlocker::waitUntilCommittedOrAborted(OperationContext* opCtx,
OperationType operationType) {
// Source to cancel the timeout if the operation completed in time.
- CancelationSource cancelTimeoutSource;
+ CancellationSource cancelTimeoutSource;
auto executor = getAsyncBlockingOperationsExecutor();
std::vector<ExecutorFuture<void>> futures;
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index e58a4383d81..2901635ebbd 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -53,7 +53,7 @@
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future_util.h"
namespace mongo {
@@ -78,35 +78,35 @@ const ReadPreferenceSetting kPrimaryOnlyReadPreference(ReadPreference::PrimaryOn
const int kMaxRecipientKeyDocsFindAttempts = 10;
-bool shouldStopCreatingTTLIndex(Status status, const CancelationToken& token) {
+bool shouldStopCreatingTTLIndex(Status status, const CancellationToken& token) {
return status.isOK() || token.isCanceled();
}
-bool shouldStopInsertingDonorStateDoc(Status status, const CancelationToken& token) {
+bool shouldStopInsertingDonorStateDoc(Status status, const CancellationToken& token) {
return status.isOK() || status == ErrorCodes::ConflictingOperationInProgress ||
token.isCanceled();
}
-bool shouldStopUpdatingDonorStateDoc(Status status, const CancelationToken& token) {
+bool shouldStopUpdatingDonorStateDoc(Status status, const CancellationToken& token) {
return status.isOK() || token.isCanceled();
}
-bool shouldStopSendingRecipientCommand(Status status, const CancelationToken& token) {
+bool shouldStopSendingRecipientCommand(Status status, const CancellationToken& token) {
return status.isOK() ||
!(ErrorCodes::isRetriableError(status) ||
status == ErrorCodes::FailedToSatisfyReadPreference) ||
token.isCanceled();
}
-bool shouldStopFetchingRecipientClusterTimeKeyDocs(Status status, const CancelationToken& token) {
+bool shouldStopFetchingRecipientClusterTimeKeyDocs(Status status, const CancellationToken& token) {
// TODO (SERVER-54926): Convert HostUnreachable error in
// _fetchAndStoreRecipientClusterTimeKeyDocs to specific error.
return status.isOK() || !ErrorCodes::isRetriableError(status) ||
status.code() == ErrorCodes::HostUnreachable || token.isCanceled();
}
-void checkIfReceivedDonorAbortMigration(const CancelationToken& serviceToken,
- const CancelationToken& instanceToken) {
+void checkIfReceivedDonorAbortMigration(const CancellationToken& serviceToken,
+ const CancellationToken& instanceToken) {
// If only the instance token was canceled, then we must have gotten donorAbortMigration.
uassert(ErrorCodes::TenantMigrationAborted,
"Migration aborted due to receiving donorAbortMigration.",
@@ -150,7 +150,7 @@ void setPromiseOkIfNotReady(WithLock lk, Promise& promise) {
// will copy cluster time keys from the other. The donor service is set up on all mongods on stepup
// to primary, so this index will be created on both donors and recipients.
ExecutorFuture<void> TenantMigrationDonorService::createStateDocumentTTLIndex(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return AsyncTry([this] {
auto nss = getStateDocumentsNS();
@@ -171,11 +171,11 @@ ExecutorFuture<void> TenantMigrationDonorService::createStateDocumentTTLIndex(
})
.until([token](Status status) { return shouldStopCreatingTTLIndex(status, token); })
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<void> TenantMigrationDonorService::createExternalKeysTTLIndex(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return AsyncTry([this] {
const auto nss = NamespaceString::kExternalKeysCollectionNamespace;
@@ -197,11 +197,11 @@ ExecutorFuture<void> TenantMigrationDonorService::createExternalKeysTTLIndex(
})
.until([token](Status status) { return shouldStopCreatingTTLIndex(status, token); })
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<void> TenantMigrationDonorService::_rebuildService(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return createStateDocumentTTLIndex(executor, token).then([this, executor, token] {
return createExternalKeysTTLIndex(executor, token);
});
@@ -257,7 +257,7 @@ TenantMigrationDonorService::Instance::~Instance() {
// Unlike the TenantMigrationDonorService's scoped task executor which is shut down on stepdown
// and joined on stepup, _recipientCmdExecutor is only shut down and joined when the Instance
// is destroyed. This is safe since ThreadPoolTaskExecutor::shutdown() only cancels the
- // outstanding work on the task executor which the cancelation token will already do, and the
+ // outstanding work on the task executor which the cancellation token will already do, and the
// Instance will be destroyed on stepup so this is equivalent to joining the task executor on
// stepup.
_recipientCmdExecutor->shutdown();
@@ -280,7 +280,7 @@ TenantMigrationDonorService::Instance::_makeRecipientCmdExecutor() {
// require passing the pointer to the TenantMigrationService into the Instance and making
// constructInstance not const so we can set the client's decoration here. Right now there
// is no need for that since the task executor is only used with scheduleRemoteCommand and
- // no opCtx will be created (the cancelation token is responsible for canceling the
+ // no opCtx will be created (the cancellation token is responsible for canceling the
// outstanding work on the task executor).
stdx::lock_guard<Client> lk(*client);
client->setSystemOperationKillableByStepdown(lk);
@@ -330,7 +330,7 @@ boost::optional<BSONObj> TenantMigrationDonorService::Instance::reportForCurrent
BSONObjBuilder bob;
bob.append("desc", "tenant donor migration");
bob.append("migrationCompleted", _completionPromise.getFuture().isReady());
- bob.append("receivedCancelation", _abortMigrationSource.token().isCanceled());
+ bob.append("receivedCancellation", _abortMigrationSource.token().isCanceled());
bob.append("instanceID", _migrationUuid.toBSON());
bob.append("tenantId", _tenantId);
bob.append("recipientConnectionString", _recipientConnectionString);
@@ -416,8 +416,8 @@ ExecutorFuture<void>
TenantMigrationDonorService::Instance::_fetchAndStoreRecipientClusterTimeKeyDocs(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& serviceToken,
- const CancelationToken& instanceToken) {
+ const CancellationToken& serviceToken,
+ const CancellationToken& instanceToken) {
return AsyncTry([this,
self = shared_from_this(),
executor,
@@ -530,19 +530,19 @@ TenantMigrationDonorService::Instance::_fetchAndStoreRecipientClusterTimeKeyDocs
auto writeConcernFuture = repl::ReplicationCoordinator::get(_serviceContext)
->awaitReplicationAsyncNoWTimeout(
lastKeyOpTime, votingMembersWriteConcern);
- return future_util::withCancelation(std::move(writeConcernFuture),
- instanceToken);
+ return future_util::withCancellation(std::move(writeConcernFuture),
+ instanceToken);
});
})
.until([instanceToken](Status status) {
return shouldStopFetchingRecipientClusterTimeKeyDocs(status, instanceToken);
})
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_insertStateDoc(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
stdx::lock_guard<Latch> lg(_mutex);
invariant(_stateDoc.getState() == TenantMigrationDonorStateEnum::kUninitialized);
@@ -576,13 +576,13 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_insertState
return shouldStopInsertingDonorStateDoc(swOpTime.getStatus(), token);
})
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateStateDoc(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
const TenantMigrationDonorStateEnum nextState,
- const CancelationToken& token) {
+ const CancellationToken& token) {
stdx::lock_guard<Latch> lg(_mutex);
const auto originalStateDocBson = _stateDoc.toBSON();
@@ -685,12 +685,12 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
return shouldStopUpdatingDonorStateDoc(swOpTime.getStatus(), token);
})
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<repl::OpTime>
TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
stdx::lock_guard<Latch> lg(_mutex);
_stateDoc.setExpireAt(_serviceContext->getFastClockSource()->now() +
@@ -726,13 +726,13 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable(
return shouldStopUpdatingDonorStateDoc(swOpTime.getStatus(), token);
})
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ExecutorFuture<void> TenantMigrationDonorService::Instance::_waitForMajorityWriteConcern(
std::shared_ptr<executor::ScopedTaskExecutor> executor, repl::OpTime opTime) {
return WaitForMajorityService::get(_serviceContext)
- .waitUntilMajority(std::move(opTime), CancelationToken::uncancelable())
+ .waitUntilMajority(std::move(opTime), CancellationToken::uncancelable())
.thenRunOn(**executor)
.then([this, self = shared_from_this()] {
stdx::lock_guard<Latch> lg(_mutex);
@@ -759,7 +759,7 @@ ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendCommandToRecipi
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
const BSONObj& cmdObj,
- const CancelationToken& token) {
+ const CancellationToken& token) {
return AsyncTry(
[this, self = shared_from_this(), executor, recipientTargeterRS, cmdObj, token] {
return recipientTargeterRS->findHost(kPrimaryOnlyReadPreference, token)
@@ -796,7 +796,7 @@ ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendCommandToRecipi
ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendRecipientSyncDataCommand(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& token) {
+ const CancellationToken& token) {
auto opCtxHolder = cc().makeOperationContext();
auto opCtx = opCtxHolder.get();
@@ -827,7 +827,7 @@ ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendRecipientSyncDa
ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendRecipientForgetMigrationCommand(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& token) {
+ const CancellationToken& token) {
auto opCtxHolder = cc().makeOperationContext();
auto opCtx = opCtxHolder.get();
@@ -848,7 +848,7 @@ ExecutorFuture<void> TenantMigrationDonorService::Instance::_sendRecipientForget
SemiFuture<void> TenantMigrationDonorService::Instance::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& serviceToken) noexcept {
+ const CancellationToken& serviceToken) noexcept {
{
stdx::lock_guard<Latch> lg(_mutex);
if (!_stateDoc.getMigrationStart()) {
@@ -858,7 +858,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
pauseTenantMigrationBeforeEnteringFutureChain.pauseWhileSet();
- _abortMigrationSource = CancelationSource(serviceToken);
+ _abortMigrationSource = CancellationSource(serviceToken);
{
stdx::lock_guard<Latch> lg(_mutex);
setPromiseOkIfNotReady(lg, _migrationCancelablePromise);
@@ -881,7 +881,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
return _insertStateDoc(executor, _abortMigrationSource.token())
.then([this, self = shared_from_this(), executor](repl::OpTime opTime) {
// TODO (SERVER-53389): TenantMigration{Donor, Recipient}Service should
- // use its base PrimaryOnlyService's cancelation source to pass tokens
+ // use its base PrimaryOnlyService's cancellation source to pass tokens
// in calls to WaitForMajorityService::waitUntilMajority.
return _waitForMajorityWriteConcern(executor, std::move(opTime));
})
@@ -932,7 +932,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
.then([this, self = shared_from_this(), executor](repl::OpTime opTime) {
// TODO (SERVER-53389): TenantMigration{Donor, Recipient}Service should
- // use its base PrimaryOnlyService's cancelation source to pass tokens
+ // use its base PrimaryOnlyService's cancellation source to pass tokens
// in calls to WaitForMajorityService::waitUntilMajority.
return _waitForMajorityWriteConcern(executor, std::move(opTime));
});
@@ -964,7 +964,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
.then([this, self = shared_from_this(), executor, serviceToken](
repl::OpTime opTime) {
// TODO (SERVER-53389): TenantMigration{Donor, Recipient}Service should
- // use its base PrimaryOnlyService's cancelation source to pass tokens
+ // use its base PrimaryOnlyService's cancellation source to pass tokens
// in calls to WaitForMajorityService::waitUntilMajority.
checkIfReceivedDonorAbortMigration(serviceToken,
_abortMigrationSource.token());
@@ -989,7 +989,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
}
// Source to cancel the timeout if the operation completed in time.
- CancelationSource cancelTimeoutSource;
+ CancellationSource cancelTimeoutSource;
auto deadlineReachedFuture = (*executor)->sleepFor(
Milliseconds(repl::tenantMigrationBlockingStateTimeoutMS.load()),
@@ -1057,7 +1057,7 @@ SemiFuture<void> TenantMigrationDonorService::Instance::run(
.then([this, self = shared_from_this(), executor, serviceToken](
repl::OpTime opTime) {
// TODO (SERVER-53389): TenantMigration{Donor, Recipient}Service should
- // use its base PrimaryOnlyService's cancelation source to pass tokens
+ // use its base PrimaryOnlyService's cancellation source to pass tokens
// in calls to WaitForMajorityService::waitUntilMajority.
return _waitForMajorityWriteConcern(executor, std::move(opTime))
.then([this, self = shared_from_this()] {
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.h b/src/mongo/db/repl/tenant_migration_donor_service.h
index 47e45c5e945..b7bbd9dbd82 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.h
+++ b/src/mongo/db/repl/tenant_migration_donor_service.h
@@ -36,7 +36,7 @@
#include "mongo/db/repl/repl_server_parameters_gen.h"
#include "mongo/db/repl/tenant_migration_access_blocker_util.h"
#include "mongo/executor/thread_pool_task_executor.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -83,7 +83,7 @@ public:
~Instance();
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
void interrupt(Status status) override;
@@ -123,7 +123,7 @@ public:
/**
* Returns a Future that will be resolved when a migration has called the run() method and
- * instantiated the CancelationSource.
+ * instantiated the CancellationSource.
*/
SharedSemiFuture<void> getMigrationCancelableFuture() const {
return _migrationCancelablePromise.getFuture();
@@ -172,15 +172,15 @@ public:
ExecutorFuture<void> _fetchAndStoreRecipientClusterTimeKeyDocs(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& serviceToken,
- const CancelationToken& instanceToken);
+ const CancellationToken& serviceToken,
+ const CancellationToken& instanceToken);
/**
* Inserts the state document to _stateDocumentsNS and returns the opTime for the insert
* oplog entry.
*/
ExecutorFuture<repl::OpTime> _insertStateDoc(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token);
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token);
/**
* Updates the state document to have the given state. Then, persists the updated document
@@ -191,14 +191,14 @@ public:
ExecutorFuture<repl::OpTime> _updateStateDoc(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
const TenantMigrationDonorStateEnum nextState,
- const CancelationToken& token);
+ const CancellationToken& token);
/**
* Sets the "expireAt" time for the state document to be garbage collected, and returns the
* the opTime for the write.
*/
ExecutorFuture<repl::OpTime> _markStateDocAsGarbageCollectable(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token);
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token);
/**
* Waits for given opTime to be majority committed.
@@ -213,7 +213,7 @@ public:
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
const BSONObj& cmdObj,
- const CancelationToken& token);
+ const CancellationToken& token);
/**
* Sends the recipientSyncData command to the recipient replica set.
@@ -221,7 +221,7 @@ public:
ExecutorFuture<void> _sendRecipientSyncDataCommand(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& token);
+ const CancellationToken& token);
/**
* Sends the recipientForgetMigration command to the recipient replica set.
@@ -229,7 +229,7 @@ public:
ExecutorFuture<void> _sendRecipientForgetMigrationCommand(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<RemoteCommandTargeter> recipientTargeterRS,
- const CancelationToken& token);
+ const CancellationToken& token);
ThreadPool::Limits _getRecipientCmdThreadPoolLimits() const {
ThreadPool::Limits recipientCmdThreadPoolLimits;
@@ -271,7 +271,7 @@ public:
// The latest majority-committed migration state.
DurableState _durableState;
- // Promise that is resolved when run() has been called and the CancelationSource has been
+ // Promise that is resolved when run() has been called and the CancellationSource has been
// instantiated.
SharedPromise<void> _migrationCancelablePromise;
@@ -289,20 +289,20 @@ public:
// abort.
SharedPromise<void> _decisionPromise;
- // This CancelationSource is instantiated from CancelationToken that is passed into run().
- // It allows for manual cancelation of work from the instance.
- CancelationSource _abortMigrationSource;
+ // This CancellationSource is instantiated from CancellationToken that is passed into run().
+ // It allows for manual cancellation of work from the instance.
+ CancellationSource _abortMigrationSource;
};
private:
ExecutorFuture<void> createStateDocumentTTLIndex(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token);
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token);
ExecutorFuture<void> createExternalKeysTTLIndex(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token);
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token);
ExecutorFuture<void> _rebuildService(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) override;
+ const CancellationToken& token) override;
ServiceContext* const _serviceContext;
};
diff --git a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
index 4ddf2bb1260..305ef72cf0b 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/repl/tenant_migration_decoration.h"
#include "mongo/db/repl/tenant_migration_recipient_access_blocker.h"
#include "mongo/logv2/log.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/future_util.h"
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 5ccd84db32d..d9b06537fc7 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -69,7 +69,7 @@
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future_util.h"
namespace mongo {
@@ -240,7 +240,7 @@ ThreadPool::Limits TenantMigrationRecipientService::getThreadPoolLimits() const
}
ExecutorFuture<void> TenantMigrationRecipientService::_rebuildService(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return AsyncTry([this] {
auto nss = getStateDocumentsNS();
@@ -261,7 +261,7 @@ ExecutorFuture<void> TenantMigrationRecipientService::_rebuildService(
})
.until([token](Status status) { return status.isOK() || token.isCanceled(); })
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
std::shared_ptr<PrimaryOnlyService::Instance> TenantMigrationRecipientService::constructInstance(
@@ -462,9 +462,9 @@ TenantMigrationRecipientService::Instance::waitUntilMigrationReachesReturnAfterR
auto status = swDonorRecipientOpTimePair.getStatus();
- // A cancelation error may occur due to an interrupt. If that is the case, replace the error
+ // A cancellation error may occur due to an interrupt. If that is the case, replace the error
// code with the interrupt code, the true reason for interruption.
- if (ErrorCodes::isCancelationError(status)) {
+ if (ErrorCodes::isCancellationError(status)) {
stdx::lock_guard lk(_mutex);
if (!_taskState.getInterruptStatus().isOK()) {
status = _taskState.getInterruptStatus();
@@ -568,13 +568,13 @@ TenantMigrationRecipientService::Instance::_createAndConnectClients() {
// Only ever used to cancel when the setTenantMigrationRecipientInstanceHostTimeout failpoint is
// set.
- CancelationSource getHostCancelSource;
+ CancellationSource getHostCancelSource;
setTenantMigrationRecipientInstanceHostTimeout.execute([&](const BSONObj& data) {
auto exec = **_scopedExecutor;
const auto deadline =
exec->now() + Milliseconds(data["findHostTimeoutMillis"].safeNumberLong());
// Cancel the find host request after a timeout. Ignore callback handle.
- exec->sleepUntil(deadline, CancelationToken::uncancelable())
+ exec->sleepUntil(deadline, CancellationToken::uncancelable())
.getAsync([getHostCancelSource](auto) mutable { getHostCancelSource.cancel(); });
});
@@ -714,7 +714,7 @@ TenantMigrationRecipientService::Instance::_createAndConnectClients() {
return true;
})
- .on(**_scopedExecutor, CancelationToken::uncancelable())
+ .on(**_scopedExecutor, CancellationToken::uncancelable())
.semi();
}
@@ -796,7 +796,7 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_initializeStateDoc(
// doesn't rollback.
auto writeOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(writeOpTime, CancelationToken::uncancelable());
+ .waitUntilMajority(writeOpTime, CancellationToken::uncancelable());
})
.semi();
}
@@ -1446,7 +1446,7 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_onCloneSuccess() {
auto writeOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(writeOpTime, CancelationToken::uncancelable());
+ .waitUntilMajority(writeOpTime, CancellationToken::uncancelable());
})
.semi();
}
@@ -1476,7 +1476,7 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_getDataConsistentFu
tenantMigrationRecipientEntryHelpers::updateStateDoc(opCtx.get(), stateDoc));
return WaitForMajorityService::get(opCtx->getServiceContext())
.waitUntilMajority(repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
- CancelationToken::uncancelable());
+ CancellationToken::uncancelable());
})
.semi();
}
@@ -1568,7 +1568,7 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_markStateDocAsGarba
auto writeOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(writeOpTime, CancelationToken::uncancelable());
+ .waitUntilMajority(writeOpTime, CancellationToken::uncancelable());
})
.semi();
}
@@ -1730,13 +1730,13 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_updateStateDocForMa
auto writeOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(writeOpTime, CancelationToken::uncancelable());
+ .waitUntilMajority(writeOpTime, CancellationToken::uncancelable());
})
.semi();
}
void TenantMigrationRecipientService::Instance::_fetchAndStoreDonorClusterTimeKeyDocs(
- const CancelationToken& token) {
+ const CancellationToken& token) {
std::vector<ExternalKeysCollectionDocument> keyDocs;
auto cursor =
_client->query(NamespaceString::kKeysCollectionNamespace,
@@ -1785,7 +1785,7 @@ void TenantMigrationRecipientService::Instance::_compareRecipientAndDonorFCV() c
SemiFuture<void> TenantMigrationRecipientService::Instance::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
_scopedExecutor = executor;
auto scopedOutstandingMigrationCounter =
TenantMigrationStatistics::get(_serviceContext)->getScopedOutstandingReceivingCount();
@@ -2147,7 +2147,7 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::run(
// Network and cancellation errors can be caused due to interrupt() (which shuts
// down the cloner/fetcher dbClientConnection & oplog applier), so replace those
// error status with interrupt status, if set.
- if (ErrorCodes::isCancelationError(status) || ErrorCodes::isNetworkError(status)) {
+ if (ErrorCodes::isCancellationError(status) || ErrorCodes::isNetworkError(status)) {
stdx::lock_guard lk(_mutex);
if (_taskState.isInterrupted()) {
LOGV2(4881207,
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.h b/src/mongo/db/repl/tenant_migration_recipient_service.h
index 2d40b6cda81..e68d0844f55 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.h
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.h
@@ -83,7 +83,7 @@ public:
BSONObj stateDoc);
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept final;
+ const CancellationToken& token) noexcept final;
/*
* Interrupts the running instance and cause the completion future to complete with
@@ -338,7 +338,7 @@ public:
* Fetches all key documents from the donor's admin.system.keys collection, stores them in
* config.external_validation_keys, and refreshes the keys cache.
*/
- void _fetchAndStoreDonorClusterTimeKeyDocs(const CancelationToken& token);
+ void _fetchAndStoreDonorClusterTimeKeyDocs(const CancellationToken& token);
/**
* Retrieves the start optimes from the donor and updates the in-memory state accordingly.
@@ -568,7 +568,7 @@ public:
private:
ExecutorFuture<void> _rebuildService(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) override;
+ const CancellationToken& token) override;
ServiceContext* const _serviceContext;
diff --git a/src/mongo/db/repl/tenant_migration_util.cpp b/src/mongo/db/repl/tenant_migration_util.cpp
index aaa28a77277..7be5314a922 100644
--- a/src/mongo/db/repl/tenant_migration_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_util.cpp
@@ -46,7 +46,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
#include "mongo/db/repl/wait_for_majority_service.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future_util.h"
namespace mongo {
@@ -334,7 +334,7 @@ createRetryableWritesOplogFetchingPipelineForTenantMigrations(
return Pipeline::create(std::move(stages), expCtx);
}
-bool shouldStopUpdatingExternalKeys(Status status, const CancelationToken& token) {
+bool shouldStopUpdatingExternalKeys(Status status, const CancellationToken& token) {
return status.isOK() || token.isCanceled();
}
@@ -343,7 +343,7 @@ ExecutorFuture<void> markExternalKeysAsGarbageCollectable(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<executor::TaskExecutor> parentExecutor,
UUID migrationId,
- const CancelationToken& token) {
+ const CancellationToken& token) {
auto ttlExpiresAt = serviceContext->getFastClockSource()->now() +
Milliseconds{repl::tenantMigrationGarbageCollectionDelayMS.load()} +
Seconds{repl::tenantMigrationExternalKeysRemovalBufferSecs.load()};
@@ -388,7 +388,7 @@ ExecutorFuture<void> markExternalKeysAsGarbageCollectable(
// AsyncTry itself on an executor that won't shut down.
//
// TODO SERVER-54735: Stop using the parent executor here.
- .on(parentExecutor, CancelationToken::uncancelable());
+ .on(parentExecutor, CancellationToken::uncancelable());
}
} // namespace tenant_migration_util
diff --git a/src/mongo/db/repl/tenant_migration_util.h b/src/mongo/db/repl/tenant_migration_util.h
index dfa288d913c..04cf2245048 100644
--- a/src/mongo/db/repl/tenant_migration_util.h
+++ b/src/mongo/db/repl/tenant_migration_util.h
@@ -152,7 +152,7 @@ ExecutorFuture<void> markExternalKeysAsGarbageCollectable(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
std::shared_ptr<executor::TaskExecutor> parentExecutor,
UUID migrationId,
- const CancelationToken& token);
+ const CancellationToken& token);
/**
* Creates a view on the oplog that allows a tenant migration recipient to fetch retryable writes
diff --git a/src/mongo/db/repl/wait_for_majority_service.cpp b/src/mongo/db/repl/wait_for_majority_service.cpp
index deb45b3158f..eb288cb6ef5 100644
--- a/src/mongo/db/repl/wait_for_majority_service.cpp
+++ b/src/mongo/db/repl/wait_for_majority_service.cpp
@@ -86,7 +86,7 @@ void WaitForMajorityService::startup(ServiceContext* ctx) {
invariant(_state == State::kNotStarted);
_pool = makeThreadPool();
_waitForMajorityClient = ClientStrand::make(ctx->makeClient(kWaitClientName));
- _waitForMajorityCancelationClient = ClientStrand::make(ctx->makeClient(kCancelClientName));
+ _waitForMajorityCancellationClient = ClientStrand::make(ctx->makeClient(kCancelClientName));
_backgroundWorkComplete = _periodicallyWaitForMajority();
_pool->startup();
_state = State::kRunning;
@@ -102,7 +102,7 @@ void WaitForMajorityService::shutDown() {
_state = State::kShutdown;
_waitForMajorityClient->getClientPointer()->setKilled();
- _waitForMajorityCancelationClient->getClientPointer()->setKilled();
+ _waitForMajorityCancellationClient->getClientPointer()->setKilled();
for (auto&& request : _queuedOpTimes) {
if (!request.second->hasBeenProcessed.swap(true)) {
@@ -119,11 +119,11 @@ void WaitForMajorityService::shutDown() {
// in the thread pool to complete since that work might be using the client
// objects.
_waitForMajorityClient.reset();
- _waitForMajorityCancelationClient.reset();
+ _waitForMajorityCancellationClient.reset();
}
SemiFuture<void> WaitForMajorityService::waitUntilMajority(const repl::OpTime& opTime,
- const CancelationToken& cancelToken) {
+ const CancellationToken& cancelToken) {
auto [promise, future] = makePromiseFuture<void>();
auto request = std::make_shared<Request>(std::move(promise));
@@ -172,7 +172,7 @@ SemiFuture<void> WaitForMajorityService::waitUntilMajority(const repl::OpTime& o
if (!s.isOK()) {
return;
}
- auto clientGuard = _waitForMajorityCancelationClient->bind();
+ auto clientGuard = _waitForMajorityCancellationClient->bind();
if (!request->hasBeenProcessed.swap(true)) {
request->result.setError(waitUntilMajorityCanceledStatus());
stdx::lock_guard lk(_mutex);
@@ -229,7 +229,7 @@ SemiFuture<void> WaitForMajorityService::_periodicallyWaitForMajority() {
// TODO (SERVER-53766): Replace with condition-free looping utility.
return false;
})
- .on(_pool, CancelationToken::uncancelable())
+ .on(_pool, CancellationToken::uncancelable())
.semi();
}
diff --git a/src/mongo/db/repl/wait_for_majority_service.h b/src/mongo/db/repl/wait_for_majority_service.h
index 26220487eed..e3689bf1f49 100644
--- a/src/mongo/db/repl/wait_for_majority_service.h
+++ b/src/mongo/db/repl/wait_for_majority_service.h
@@ -108,7 +108,7 @@ public:
* Enqueue a request to wait for the given opTime to be majority committed.
*/
SemiFuture<void> waitUntilMajority(const repl::OpTime& opTime,
- const CancelationToken& cancelToken);
+ const CancellationToken& cancelToken);
private:
enum class State { kNotStarted, kRunning, kShutdown };
@@ -145,7 +145,7 @@ private:
// Manages the Client responsible for the thread that cancels existing requests to wait on
// opTimes.
- ClientStrandPtr _waitForMajorityCancelationClient;
+ ClientStrandPtr _waitForMajorityCancellationClient;
// This mutex synchronizes access to the members declared below.
Mutex _mutex = MONGO_MAKE_LATCH("WaitForMajorityService::_mutex");
diff --git a/src/mongo/db/repl/wait_for_majority_service_test.cpp b/src/mongo/db/repl/wait_for_majority_service_test.cpp
index 14c0266ee89..d2ee9419888 100644
--- a/src/mongo/db/repl/wait_for_majority_service_test.cpp
+++ b/src/mongo/db/repl/wait_for_majority_service_test.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
namespace mongo {
namespace {
@@ -153,7 +153,7 @@ TEST_F(WaitForMajorityServiceNoStartupTest, ShutdownBeforeStartupDoesNotCrashOrH
TEST_F(WaitForMajorityServiceTest, WaitOneOpTime) {
repl::OpTime t1(Timestamp(1, 0), 2);
- auto future = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
+ auto future = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
ASSERT_FALSE(future.isReady());
@@ -166,8 +166,8 @@ TEST_F(WaitForMajorityServiceTest, WaitOneOpTime) {
TEST_F(WaitForMajorityServiceTest, WaitWithSameOpTime) {
repl::OpTime t1(Timestamp(1, 0), 2);
- auto future1 = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
- auto future1b = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
+ auto future1 = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
+ auto future1b = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
ASSERT_FALSE(future1.isReady());
ASSERT_FALSE(future1b.isReady());
@@ -185,14 +185,14 @@ TEST_F(WaitForMajorityServiceTest, WaitWithOpTimeEarlierThanLowestQueued) {
repl::OpTime earlierOpTime(Timestamp(1, 0), 2);
auto laterFuture =
- waitService()->waitUntilMajority(laterOpTime, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(laterOpTime, CancellationToken::uncancelable());
// Wait until the background thread picks up the queued opTime.
waitForMajorityCallCountGreaterThan(0);
// The 2nd request has an earlier time, so it will interrupt 'laterOpTime' and skip the line.
auto earlierFuture =
- waitService()->waitUntilMajority(earlierOpTime, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(earlierOpTime, CancellationToken::uncancelable());
// Wait for background thread to finish transitioning from waiting on laterOpTime to
// earlierOpTime.
@@ -218,8 +218,8 @@ TEST_F(WaitForMajorityServiceTest, WaitWithDifferentOpTime) {
repl::OpTime t1(Timestamp(1, 0), 2);
repl::OpTime t2(Timestamp(14, 0), 2);
- auto future1 = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
- auto future2 = waitService()->waitUntilMajority(t2, CancelationToken::uncancelable());
+ auto future1 = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
+ auto future2 = waitService()->waitUntilMajority(t2, CancellationToken::uncancelable());
ASSERT_FALSE(future1.isReady());
ASSERT_FALSE(future2.isReady());
@@ -241,8 +241,8 @@ TEST_F(WaitForMajorityServiceTest, WaitWithOpTimeEarlierThanOpTimeAlreadyWaited)
repl::OpTime t1(Timestamp(5, 0), 2);
repl::OpTime t2(Timestamp(14, 0), 2);
- auto future1 = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
- auto future2 = waitService()->waitUntilMajority(t2, CancelationToken::uncancelable());
+ auto future1 = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
+ auto future2 = waitService()->waitUntilMajority(t2, CancellationToken::uncancelable());
ASSERT_FALSE(future1.isReady());
ASSERT_FALSE(future2.isReady());
@@ -255,9 +255,9 @@ TEST_F(WaitForMajorityServiceTest, WaitWithOpTimeEarlierThanOpTimeAlreadyWaited)
ASSERT_EQ(t1, getLastOpTimeWaited());
repl::OpTime oldTs(Timestamp(4, 0), 2);
- auto oldFuture = waitService()->waitUntilMajority(oldTs, CancelationToken::uncancelable());
+ auto oldFuture = waitService()->waitUntilMajority(oldTs, CancellationToken::uncancelable());
auto alreadyWaitedFuture =
- waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
ASSERT_FALSE(future2.isReady());
@@ -276,8 +276,8 @@ TEST_F(WaitForMajorityServiceTest, ShutdownShouldCancelQueuedRequests) {
repl::OpTime t1(Timestamp(5, 0), 2);
repl::OpTime t2(Timestamp(14, 0), 2);
- auto future1 = waitService()->waitUntilMajority(t1, CancelationToken::uncancelable());
- auto future2 = waitService()->waitUntilMajority(t2, CancelationToken::uncancelable());
+ auto future1 = waitService()->waitUntilMajority(t1, CancellationToken::uncancelable());
+ auto future2 = waitService()->waitUntilMajority(t2, CancellationToken::uncancelable());
ASSERT_FALSE(future1.isReady());
ASSERT_FALSE(future2.isReady());
@@ -299,13 +299,13 @@ TEST_F(WaitForMajorityServiceTest, WriteConcernErrorGetsPropagatedCorrectly) {
{ErrorCodes::PrimarySteppedDown, "test stepdown"}, Milliseconds(0));
});
- auto future = waitService()->waitUntilMajority(t, CancelationToken::uncancelable());
+ auto future = waitService()->waitUntilMajority(t, CancellationToken::uncancelable());
ASSERT_THROWS_CODE(future.get(), AssertionException, ErrorCodes::PrimarySteppedDown);
}
TEST_F(WaitForMajorityServiceTest, CanCancelWaitOnOneOptime) {
repl::OpTime t(Timestamp(1, 2), 4);
- CancelationSource source;
+ CancellationSource source;
auto future = waitService()->waitUntilMajority(t, source.token());
ASSERT_FALSE(future.isReady());
source.cancel();
@@ -316,10 +316,10 @@ TEST_F(WaitForMajorityServiceTest, CanCancelWaitOnOneOptime) {
TEST_F(WaitForMajorityServiceTest, CancelingEarlierOpTimeRequestDoesNotAffectLaterOpTimeRequests) {
repl::OpTime earlier(Timestamp(1, 2), 4);
repl::OpTime later(Timestamp(5, 2), 5);
- CancelationSource source;
+ CancellationSource source;
auto cancelFuture = waitService()->waitUntilMajority(earlier, source.token());
auto uncancelableFuture =
- waitService()->waitUntilMajority(later, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(later, CancellationToken::uncancelable());
ASSERT_FALSE(cancelFuture.isReady());
ASSERT_FALSE(uncancelableFuture.isReady());
// Wait until the background thread picks up the initial request. Otherwise, there is a race
@@ -340,10 +340,10 @@ TEST_F(WaitForMajorityServiceTest, CancelingEarlierOpTimeRequestDoesNotAffectLat
TEST_F(WaitForMajorityServiceTest, CancelingOneRequestOnOpTimeDoesNotAffectOthersOnSameOpTime) {
repl::OpTime t1(Timestamp(1, 2), 4);
repl::OpTime t1Dupe(Timestamp(1, 2), 4);
- CancelationSource source;
+ CancellationSource source;
auto cancelFuture = waitService()->waitUntilMajority(t1, source.token());
auto uncancelableFuture =
- waitService()->waitUntilMajority(t1Dupe, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(t1Dupe, CancellationToken::uncancelable());
ASSERT_FALSE(cancelFuture.isReady());
ASSERT_FALSE(uncancelableFuture.isReady());
source.cancel();
@@ -358,12 +358,12 @@ TEST_F(WaitForMajorityServiceTest, CancelingOneRequestOnOpTimeDoesNotAffectOther
TEST_F(WaitForMajorityServiceTest, CancelingLaterOpTimeRequestDoesNotAffectEarlierOpTimeRequests) {
repl::OpTime t1(Timestamp(1, 2), 4);
repl::OpTime smallerOpTime(Timestamp(1, 2), 1);
- CancelationSource source;
+ CancellationSource source;
auto cancelFuture = waitService()->waitUntilMajority(t1, source.token());
// Wait until the background thread picks up the queued opTime.
waitForMajorityCallCountGreaterThan(0);
auto earlierFuture =
- waitService()->waitUntilMajority(smallerOpTime, CancelationToken::uncancelable());
+ waitService()->waitUntilMajority(smallerOpTime, CancellationToken::uncancelable());
// Wait for background thread to finish transitioning from waiting on t1 to smallerOpTime.
waitForMajorityCallCountGreaterThan(1);
ASSERT_FALSE(cancelFuture.isReady());
@@ -379,7 +379,7 @@ TEST_F(WaitForMajorityServiceTest, CancelingLaterOpTimeRequestDoesNotAffectEarli
TEST_F(WaitForMajorityServiceTest, SafeToCallCancelOnRequestAlreadyCompletedByShutdown) {
repl::OpTime t(Timestamp(1, 2), 4);
- CancelationSource source;
+ CancellationSource source;
auto deadFuture = waitService()->waitUntilMajority(t, source.token());
ASSERT_FALSE(deadFuture.isReady());
waitService()->shutDown();
@@ -390,7 +390,7 @@ TEST_F(WaitForMajorityServiceTest, SafeToCallCancelOnRequestAlreadyCompletedBySh
TEST_F(WaitForMajorityServiceTest, SafeToCallCancelOnRequestAlreadyCompletedByWaiting) {
repl::OpTime t(Timestamp(1, 2), 4);
- CancelationSource source;
+ CancellationSource source;
auto future = waitService()->waitUntilMajority(t, source.token());
ASSERT_FALSE(future.isReady());
waitForMajorityCallCountGreaterThan(0);
@@ -402,7 +402,7 @@ TEST_F(WaitForMajorityServiceTest, SafeToCallCancelOnRequestAlreadyCompletedByWa
TEST_F(WaitForMajorityServiceTest, PassingAlreadyCanceledTokenCompletesFutureWithNoWaiting) {
repl::OpTime t(Timestamp(1, 2), 4);
- CancelationSource source;
+ CancellationSource source;
source.cancel();
auto future = waitService()->waitUntilMajority(t, source.token());
ASSERT_EQ(future.getNoThrow(), kCanceledStatus);
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index c9fb4705341..afe8968c2ff 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -123,7 +123,7 @@ void DropCollectionCoordinator::_removeStateDocument() {
ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then(_executePhase(Phase::kFreezeCollection,
[this, anchor = shared_from_this()] {
diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h
index 72c758a450f..d858b9618b6 100644
--- a/src/mongo/db/s/drop_collection_coordinator.h
+++ b/src/mongo/db/s/drop_collection_coordinator.h
@@ -50,7 +50,7 @@ public:
private:
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
template <typename Func>
auto _executePhase(const Phase& newPhase, Func&& func) {
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index 20bb54af73b..2dc75e83a7c 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -168,7 +168,7 @@ void DropDatabaseCoordinator::_removeStateDocument() {
ExecutorFuture<void> DropDatabaseCoordinator::_runImpl(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then(_executePhase(
Phase::kDrop,
diff --git a/src/mongo/db/s/drop_database_coordinator.h b/src/mongo/db/s/drop_database_coordinator.h
index 1388e594f9e..37dc4dcc099 100644
--- a/src/mongo/db/s/drop_database_coordinator.h
+++ b/src/mongo/db/s/drop_database_coordinator.h
@@ -50,7 +50,7 @@ public:
private:
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
template <typename Func>
auto _executePhase(const Phase& newPhase, Func&& func) {
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 7ba16c13cc5..239b32b1a9b 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -62,7 +62,7 @@
#include "mongo/db/write_concern.h"
#include "mongo/executor/task_executor.h"
#include "mongo/logv2/log.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future_util.h"
namespace mongo {
@@ -356,7 +356,7 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
ErrorCodes::isNotPrimaryError(swNumDeleted.getStatus());
})
.withDelayBetweenIterations(delayBetweenBatches)
- .on(executor, CancelationToken::uncancelable())
+ .on(executor, CancellationToken::uncancelable())
.ignoreValue();
}
@@ -406,7 +406,7 @@ ExecutorFuture<void> waitForDeletionsToMajorityReplicate(
// Asynchronously wait for majority write concern.
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(clientOpTime, CancelationToken::uncancelable())
+ .waitUntilMajority(clientOpTime, CancellationToken::uncancelable())
.thenRunOn(executor);
});
}
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 61c2c4d7c5f..b5ba45d7fa2 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -343,7 +343,7 @@ auto ReshardingCollectionCloner::_withTemporaryOperationContext(Callable&& calla
}
SemiFuture<void> ReshardingCollectionCloner::run(std::shared_ptr<executor::TaskExecutor> executor,
- CancelationToken cancelToken) {
+ CancellationToken cancelToken) {
struct ChainContext {
std::unique_ptr<Pipeline, PipelineDeleter> pipeline;
bool moreToCome = true;
@@ -372,7 +372,7 @@ SemiFuture<void> ReshardingCollectionCloner::run(std::shared_ptr<executor::TaskE
});
}
- if (status.isA<ErrorCategory::CancelationError>() ||
+ if (status.isA<ErrorCategory::CancellationError>() ||
status.isA<ErrorCategory::NotPrimaryError>()) {
// Cancellation and NotPrimary errors indicate the primary-only service Instance
// will be shut down or is shutting down now - provided the cancelToken is also
@@ -411,7 +411,7 @@ SemiFuture<void> ReshardingCollectionCloner::run(std::shared_ptr<executor::TaskE
.on(executor, cancelToken)
.onCompletion([this, chainCtx](Status status) {
if (chainCtx->pipeline) {
- // Guarantee the pipeline is always cleaned up - even upon cancelation.
+ // Guarantee the pipeline is always cleaned up - even upon cancellation.
_withTemporaryOperationContext([&](auto* opCtx) {
chainCtx->pipeline->dispose(opCtx);
chainCtx->pipeline.reset();
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.h b/src/mongo/db/s/resharding/resharding_collection_cloner.h
index d3b4f624422..5f31adaddb3 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.h
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.h
@@ -38,7 +38,7 @@
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/s/shard_id.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future.h"
namespace mongo {
@@ -93,7 +93,7 @@ public:
* (b) the cancellation token was canceled due to a stepdown or abort.
*/
SemiFuture<void> run(std::shared_ptr<executor::TaskExecutor> executor,
- CancelationToken cancelToken);
+ CancellationToken cancelToken);
/**
* Fetches and inserts a single batch of documents.
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 6c0b2c63a4a..987c2e5ce26 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -71,7 +71,7 @@ MONGO_FAIL_POINT_DEFINE(reshardingPauseCoordinatorBeforeCompletion)
const std::string kReshardingCoordinatorActiveIndexName = "ReshardingCoordinatorActiveIndex";
const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
-bool shouldStopAttemptingToCreateIndex(Status status, const CancelationToken& token) {
+bool shouldStopAttemptingToCreateIndex(Status status, const CancellationToken& token) {
return status.isOK() || token.isCanceled();
}
@@ -794,7 +794,7 @@ std::shared_ptr<repl::PrimaryOnlyService::Instance> ReshardingCoordinatorService
}
ExecutorFuture<void> ReshardingCoordinatorService::_rebuildService(
- std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) {
return AsyncTry([this] {
auto nss = getStateDocumentsNS();
@@ -815,7 +815,7 @@ ExecutorFuture<void> ReshardingCoordinatorService::_rebuildService(
})
.until([token](Status status) { return shouldStopAttemptingToCreateIndex(status, token); })
.withBackoffBetweenIterations(kExponentialBackoff)
- .on(**executor, CancelationToken::uncancelable());
+ .on(**executor, CancellationToken::uncancelable());
}
ReshardingCoordinatorService::ReshardingCoordinator::ReshardingCoordinator(const BSONObj& state)
@@ -859,7 +859,7 @@ void ReshardingCoordinatorService::ReshardingCoordinator::installCoordinatorDoc(
}
ExecutorFuture<void> waitForMinimumOperationDuration(
- std::shared_ptr<executor::TaskExecutor> executor, const CancelationToken& token) {
+ std::shared_ptr<executor::TaskExecutor> executor, const CancellationToken& token) {
// Ensure to have at least `minDuration` elapsed after starting the operation and before
// engaging the critical section, unless the operation is already interrupted or canceled.
const auto minDuration =
@@ -906,7 +906,7 @@ BSONObj createFinishReshardCollectionCommand(const NamespaceString& nss) {
SemiFuture<void> ReshardingCoordinatorService::ReshardingCoordinator::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then([this, executor] { _insertCoordDocAndChangeOrigCollEntry(); })
.then([this, executor] { _calculateParticipantsAndChunksThenWriteToDisk(); })
@@ -966,7 +966,7 @@ SemiFuture<void> ReshardingCoordinatorService::ReshardingCoordinator::run(
// Wait for all participants to acknowledge the operation reached an unrecoverable
// error.
- future_util::withCancelation(
+ future_util::withCancellation(
_reshardingCoordinatorObserver->awaitAllParticipantsDoneAborting(), token)
.get();
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.h b/src/mongo/db/s/resharding/resharding_coordinator_service.h
index 7880d2ffc9c..a814988922f 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.h
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.h
@@ -124,7 +124,7 @@ public:
private:
ExecutorFuture<void> _rebuildService(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) override;
+ const CancellationToken& token) override;
};
class ReshardingCoordinatorService::ReshardingCoordinator final
@@ -134,7 +134,7 @@ public:
~ReshardingCoordinator();
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
void interrupt(Status status) override;
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
index bfbd5d4270a..a7ee508664e 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
@@ -208,7 +208,7 @@ std::vector<repl::OplogEntry> ReshardingDonorOplogIterator::_fillBatch(Pipeline&
}
ExecutorFuture<std::vector<repl::OplogEntry>> ReshardingDonorOplogIterator::getNextBatch(
- std::shared_ptr<executor::TaskExecutor> executor, CancelationToken cancelToken) {
+ std::shared_ptr<executor::TaskExecutor> executor, CancellationToken cancelToken) {
if (_hasSeenFinalOplogEntry) {
invariant(!_pipeline);
return ExecutorFuture(std::move(executor), std::vector<repl::OplogEntry>{});
@@ -248,8 +248,8 @@ ExecutorFuture<std::vector<repl::OplogEntry>> ReshardingDonorOplogIterator::getN
if (batch.empty() && !_hasSeenFinalOplogEntry) {
return ExecutorFuture(executor)
.then([this, cancelToken] {
- return future_util::withCancelation(_insertNotifier->awaitInsert(_resumeToken),
- cancelToken);
+ return future_util::withCancellation(_insertNotifier->awaitInsert(_resumeToken),
+ cancelToken);
})
.then([this, cancelToken, executor] {
return getNextBatch(std::move(executor), cancelToken);
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h
index c799bb9d834..c152e6d04c7 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h
@@ -69,7 +69,7 @@ public:
* final oplog entry hasn't been returned yet.
*/
virtual ExecutorFuture<std::vector<repl::OplogEntry>> getNextBatch(
- std::shared_ptr<executor::TaskExecutor> executor, CancelationToken cancelToken) = 0;
+ std::shared_ptr<executor::TaskExecutor> executor, CancellationToken cancelToken) = 0;
};
/**
@@ -94,7 +94,7 @@ public:
OperationContext* opCtx, std::shared_ptr<MongoProcessInterface> mongoProcessInterface);
ExecutorFuture<std::vector<repl::OplogEntry>> getNextBatch(
- std::shared_ptr<executor::TaskExecutor> executor, CancelationToken cancelToken) override;
+ std::shared_ptr<executor::TaskExecutor> executor, CancellationToken cancelToken) override;
static constexpr auto kActualOpFieldName = "actualOp"_sd;
static constexpr auto kPreImageOpFieldName = "preImageOp"_sd;
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
index 4338bb7399b..341bbbb92f6 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
@@ -177,7 +177,7 @@ public:
// triggering an invariant due to the task executor's thread having a Client still.
return ExecutorFuture(executor)
.then([iter, executor] {
- return iter->getNextBatch(std::move(executor), CancelationToken::uncancelable());
+ return iter->getNextBatch(std::move(executor), CancellationToken::uncancelable());
})
.then([](auto x) { return x; })
.get();
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index f4d63c85c78..ed804b1dfc2 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -180,7 +180,7 @@ ReshardingDonorService::DonorStateMachine::~DonorStateMachine() {
SemiFuture<void> ReshardingDonorService::DonorStateMachine::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then(
[this] { _onPreparingToDonateCalculateTimestampThenTransitionToDonatingInitialData(); })
@@ -599,7 +599,7 @@ ExecutorFuture<void> ReshardingDonorService::DonorStateMachine::_updateCoordinat
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(clientOpTime, CancelationToken::uncancelable())
+ .waitUntilMajority(clientOpTime, CancellationToken::uncancelable())
.thenRunOn(**executor)
.then([this] {
auto opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.h b/src/mongo/db/s/resharding/resharding_donor_service.h
index fee37071046..cf5db786a75 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.h
+++ b/src/mongo/db/s/resharding/resharding_donor_service.h
@@ -79,7 +79,7 @@ public:
~DonorStateMachine();
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
void interrupt(Status status) override;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index 73503728507..114605b4ef2 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -273,7 +273,7 @@ ReshardingOplogApplier::ReshardingOplogApplier(
_oplogIter(std::move(oplogIterator)) {}
ExecutorFuture<void> ReshardingOplogApplier::applyUntilCloneFinishedTs(
- CancelationToken cancelToken) {
+ CancellationToken cancelToken) {
invariant(_stage == ReshardingOplogApplier::Stage::kStarted);
// It is safe to capture `this` because PrimaryOnlyService and RecipientStateMachine
@@ -284,7 +284,7 @@ ExecutorFuture<void> ReshardingOplogApplier::applyUntilCloneFinishedTs(
.onError([this](Status status) { return _onError(status); });
}
-ExecutorFuture<void> ReshardingOplogApplier::applyUntilDone(CancelationToken cancelToken) {
+ExecutorFuture<void> ReshardingOplogApplier::applyUntilDone(CancellationToken cancelToken) {
invariant(_stage == ReshardingOplogApplier::Stage::kReachedCloningTS);
// It is safe to capture `this` because PrimaryOnlyService and RecipientStateMachine
@@ -295,7 +295,7 @@ ExecutorFuture<void> ReshardingOplogApplier::applyUntilDone(CancelationToken can
.onError([this](Status status) { return _onError(status); });
}
-ExecutorFuture<void> ReshardingOplogApplier::_scheduleNextBatch(CancelationToken cancelToken) {
+ExecutorFuture<void> ReshardingOplogApplier::_scheduleNextBatch(CancellationToken cancelToken) {
return ExecutorFuture(_executor)
.then([this, cancelToken] {
auto batchClient = makeKillableClient(_service(), kClientName);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.h b/src/mongo/db/s/resharding/resharding_oplog_applier.h
index 061a7b17d50..bdecb1d5184 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.h
@@ -102,7 +102,7 @@ public:
* greater than or equal to reshardingCloneFinishedTs.
* It is undefined to call applyUntilCloneFinishedTs more than once.
*/
- ExecutorFuture<void> applyUntilCloneFinishedTs(CancelationToken cancelToken);
+ ExecutorFuture<void> applyUntilCloneFinishedTs(CancellationToken cancelToken);
/**
* Applies oplog from the iterator until it is exhausted or hits an error. It is an error to
@@ -111,7 +111,7 @@ public:
* It is an error to call this when applyUntilCloneFinishedTs future returns an error.
* It is undefined to call applyUntilDone more than once.
*/
- ExecutorFuture<void> applyUntilDone(CancelationToken cancelToken);
+ ExecutorFuture<void> applyUntilDone(CancellationToken cancelToken);
static boost::optional<ReshardingOplogApplierProgress> checkStoredProgress(
OperationContext* opCtx, const ReshardingSourceId& id);
@@ -130,7 +130,7 @@ private:
* Returns a future that becomes ready when the next batch of oplog entries have been collected
* and applied.
*/
- ExecutorFuture<void> _scheduleNextBatch(CancelationToken cancelToken);
+ ExecutorFuture<void> _scheduleNextBatch(CancellationToken cancelToken);
/**
* Setup the worker threads to apply the ops in the current buffer in parallel. Waits for all
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 224adf089f8..649691b7e52 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -72,7 +72,7 @@ public:
}
ExecutorFuture<std::vector<repl::OplogEntry>> getNextBatch(
- std::shared_ptr<executor::TaskExecutor> executor, CancelationToken cancelToken) override {
+ std::shared_ptr<executor::TaskExecutor> executor, CancellationToken cancelToken) override {
// This operation context is unused by the function but confirms that the Client calling
// getNextBatch() doesn't already have an operation context.
auto opCtx = cc().makeOperationContext();
@@ -336,7 +336,7 @@ TEST_F(ReshardingOplogApplierTest, NothingToIterate) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -380,7 +380,7 @@ TEST_F(ReshardingOplogApplierTest, ApplyBasicCrud) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -440,7 +440,7 @@ TEST_F(ReshardingOplogApplierTest, CanceledCloningBatch) {
writerPool.get());
// Cancel the rescheduling of the next batch.
- auto abortSource = CancelationSource();
+ auto abortSource = CancellationSource();
abortSource.cancel();
auto future = applier->applyUntilCloneFinishedTs(abortSource.token());
@@ -484,7 +484,7 @@ TEST_F(ReshardingOplogApplierTest, CanceledApplyingBatch) {
executor,
writerPool.get());
- auto abortSource = CancelationSource();
+ auto abortSource = CancellationSource();
auto future = applier->applyUntilCloneFinishedTs(abortSource.token());
future.get();
@@ -521,7 +521,7 @@ TEST_F(ReshardingOplogApplierTest, InsertTypeOplogAppliedInMultipleBatches) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -582,7 +582,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorDuringBatchApplyCloningPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::FailedToParse);
@@ -631,7 +631,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorDuringBatchApplyCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -681,7 +681,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstOplogCloningPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::InternalError);
@@ -728,7 +728,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstOplogCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -775,7 +775,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstBatchCloningPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::InternalError);
@@ -826,7 +826,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstBatchCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -878,7 +878,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingSecondBatchCloningPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::InternalError);
@@ -941,7 +941,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingSecondBatchCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -997,7 +997,7 @@ TEST_F(ReshardingOplogApplierTest, ExecutorIsShutDownCloningPhase) {
executor->shutdown();
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::ShutdownInProgress);
@@ -1041,7 +1041,7 @@ TEST_F(ReshardingOplogApplierTest, ExecutorIsShutDownCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1086,7 +1086,7 @@ TEST_F(ReshardingOplogApplierTest, WriterPoolIsShutDownCloningPhase) {
writerPool->shutdown();
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::ShutdownInProgress);
@@ -1130,7 +1130,7 @@ TEST_F(ReshardingOplogApplierTest, WriterPoolIsShutDownCatchUpPhase) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1187,7 +1187,7 @@ TEST_F(ReshardingOplogApplierTest, InsertOpIntoOuputCollectionUseReshardingAppli
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1246,7 +1246,7 @@ TEST_F(ReshardingOplogApplierTest,
DBDirectClient client(operationContext());
client.insert(appliedToNs().toString(), BSON("_id" << 1 << "sk" << 1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1300,7 +1300,7 @@ TEST_F(ReshardingOplogApplierTest,
DBDirectClient client(operationContext());
client.insert(appliedToNs().toString(), BSON("_id" << 1 << "sk" << -1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1367,7 +1367,7 @@ TEST_F(ReshardingOplogApplierTest,
DBDirectClient client(operationContext());
client.insert(appliedToNs().toString(), BSON("_id" << 1 << "sk" << -1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1433,7 +1433,7 @@ TEST_F(ReshardingOplogApplierTest,
DBDirectClient client(operationContext());
client.insert(appliedToNs().ns(), BSON("_id" << 1 << "sk" << -1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1501,7 +1501,7 @@ TEST_F(ReshardingOplogApplierTest,
writerPool.get());
// Apply the inserts first so there exists docs in the output collection
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1573,7 +1573,7 @@ TEST_F(ReshardingOplogApplierTest,
DBDirectClient client(operationContext());
client.insert(stashCollections()[1].toString(), BSON("_id" << 1 << "sk" << -3));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1662,7 +1662,7 @@ TEST_F(ReshardingOplogApplierTest, UpdateShouldModifyStashCollectionUseReshardin
DBDirectClient client(operationContext());
client.insert(appliedToNs().toString(), BSON("_id" << 1 << "sk" << -1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1727,7 +1727,7 @@ TEST_F(ReshardingOplogApplierTest, UpdateShouldDoNothingUseReshardingApplication
DBDirectClient client(operationContext());
client.insert(appliedToNs().ns(), BSON("_id" << 1 << "sk" << -1));
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1794,7 +1794,7 @@ TEST_F(ReshardingOplogApplierTest, UpdateOutputCollUseReshardingApplicationRules
writerPool.get());
// Apply the inserts first so there exists docs in the output collection.
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1854,7 +1854,7 @@ TEST_F(ReshardingOplogApplierTest, UnsupportedCommandOpsShouldErrorUseResharding
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -1900,7 +1900,7 @@ TEST_F(ReshardingOplogApplierTest,
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
ASSERT_THROWS_CODE(future.get(), DBException, ErrorCodes::OplogOperationUnsupported);
@@ -2361,7 +2361,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, GroupInserts) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2449,7 +2449,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, CrudWithEmptyConfigTransactions) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2536,7 +2536,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, MultipleTxnSameLsidInOneBatch) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2597,7 +2597,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithLowerExistingTxn) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2651,7 +2651,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithHigherExistingTxnNum) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2716,7 +2716,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithEqualExistingTxnNum) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2771,7 +2771,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithStmtIdAlreadyExecuted)
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2828,7 +2828,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithActiveUnpreparedTxnSame
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2886,7 +2886,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithActiveUnpreparedTxnWith
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -2943,7 +2943,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithPreparedTxnThatWillComm
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
// Sleep a little bit to make the applier block on the prepared transaction.
@@ -3008,7 +3008,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWithPreparedTxnThatWillAbor
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
// Sleep a little bit to make the applier block on the prepared transaction.
@@ -3080,7 +3080,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWriteWithPreImage) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3147,7 +3147,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, RetryableWriteWithPostImage) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3197,7 +3197,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithLowerExistingTxn) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3248,7 +3248,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithHigherExistingTxnNum) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3308,7 +3308,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithEqualExistingTxnNum) {
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3362,7 +3362,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithActiveUnpreparedTxnSameT
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3420,7 +3420,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnActiveUnpreparedTxnWithLower
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
future.get();
@@ -3477,7 +3477,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithPreparedTxnThatWillCommi
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
// Sleep a little bit to make the applier block on the prepared transaction.
@@ -3538,7 +3538,7 @@ TEST_F(ReshardingOplogApplierRetryableTest, ApplyTxnWithPreparedTxnThatWillAbort
executor,
writerPool.get());
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
auto future = applier->applyUntilCloneFinishedTs(cancelToken);
// Sleep a little bit to make the applier block on the prepared transaction.
@@ -3595,7 +3595,7 @@ TEST_F(ReshardingOplogApplierTest, MetricsAreReported) {
ASSERT_EQ(metricsAppliedCount(), 0);
- auto cancelToken = operationContext()->getCancelationToken();
+ auto cancelToken = operationContext()->getCancellationToken();
applier.applyUntilCloneFinishedTs(cancelToken).get(); // Stop at clone timestamp 7
ASSERT_EQ(metricsAppliedCount(),
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
index 8e69accb0bc..00023783a9d 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
@@ -132,7 +132,7 @@ Future<void> ReshardingOplogFetcher::awaitInsert(const ReshardingDonorOplogId& l
}
ExecutorFuture<void> ReshardingOplogFetcher::schedule(
- std::shared_ptr<executor::TaskExecutor> executor, const CancelationToken& cancelToken) {
+ std::shared_ptr<executor::TaskExecutor> executor, const CancellationToken& cancelToken) {
return ExecutorFuture(executor)
.then(
[this, executor, cancelToken] { return _reschedule(std::move(executor), cancelToken); })
@@ -143,7 +143,7 @@ ExecutorFuture<void> ReshardingOplogFetcher::schedule(
}
ExecutorFuture<void> ReshardingOplogFetcher::_reschedule(
- std::shared_ptr<executor::TaskExecutor> executor, const CancelationToken& cancelToken) {
+ std::shared_ptr<executor::TaskExecutor> executor, const CancellationToken& cancelToken) {
return ExecutorFuture(executor)
.then([this, executor, cancelToken] {
ThreadClient client(fmt::format("OplogFetcher-{}-{}",
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
index 43353369486..d9ea9d56434 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
@@ -42,7 +42,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/s/shard_id.h"
#include "mongo/util/background.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future.h"
#include "mongo/util/time_support.h"
#include "mongo/util/uuid.h"
@@ -96,7 +96,7 @@ public:
* will be rescheduled in a way that resumes where it had left off from.
*/
ExecutorFuture<void> schedule(std::shared_ptr<executor::TaskExecutor> executor,
- const CancelationToken& cancelToken);
+ const CancellationToken& cancelToken);
/**
* Given a shard, fetches and copies oplog entries until
@@ -138,7 +138,7 @@ private:
void _ensureCollection(Client* client, const NamespaceString nss);
AggregateCommand _makeAggregateCommand(Client* client);
ExecutorFuture<void> _reschedule(std::shared_ptr<executor::TaskExecutor> executor,
- const CancelationToken& cancelToken);
+ const CancellationToken& cancelToken);
ServiceContext* _service() const {
return _env->service();
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 935e1c2b41c..d76740a1411 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -239,7 +239,7 @@ ReshardingRecipientService::RecipientStateMachine::~RecipientStateMachine() {
SemiFuture<void> ReshardingRecipientService::RecipientStateMachine::run(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& stepdownToken) noexcept {
+ const CancellationToken& stepdownToken) noexcept {
auto abortToken = _initAbortSource(stepdownToken);
return ExecutorFuture<void>(**executor)
@@ -315,7 +315,7 @@ SemiFuture<void> ReshardingRecipientService::RecipientStateMachine::run(
_completionPromise.emplaceValue();
}
} else {
- _metrics()->onCompletion(ErrorCodes::isCancelationError(status)
+ _metrics()->onCompletion(ErrorCodes::isCancellationError(status)
? ReshardingOperationStatusEnum::kCanceled
: ReshardingOperationStatusEnum::kFailure);
stdx::lock_guard<Latch> lg(_mutex);
@@ -445,7 +445,7 @@ void ReshardingRecipientService::RecipientStateMachine::_initTxnCloner(
ExecutorFuture<void>
ReshardingRecipientService::RecipientStateMachine::_cloneThenTransitionToApplying(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor,
- const CancelationToken& abortToken) {
+ const CancellationToken& abortToken) {
if (_recipientCtx.getState() > RecipientStateEnum::kCloning) {
return ExecutorFuture(**executor);
}
@@ -560,7 +560,7 @@ ReshardingRecipientService::RecipientStateMachine::_applyThenTransitionToSteadyS
ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::
_awaitAllDonorsBlockingWritesThenTransitionToStrictConsistency(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor,
- const CancelationToken& abortToken) {
+ const CancellationToken& abortToken) {
if (_recipientCtx.getState() > RecipientStateEnum::kSteadyState) {
return ExecutorFuture<void>(**executor, Status::OK());
}
@@ -824,7 +824,7 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::_updateC
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
return WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(clientOpTime, CancelationToken::uncancelable())
+ .waitUntilMajority(clientOpTime, CancellationToken::uncancelable())
.thenRunOn(**executor)
.then([this] {
auto opCtx = cc().makeOperationContext();
@@ -957,10 +957,10 @@ void ReshardingRecipientService::RecipientStateMachine::_onAbortOrStepdown(WithL
}
}
-CancelationToken ReshardingRecipientService::RecipientStateMachine::_initAbortSource(
- const CancelationToken& stepdownToken) {
+CancellationToken ReshardingRecipientService::RecipientStateMachine::_initAbortSource(
+ const CancellationToken& stepdownToken) {
stdx::lock_guard<Latch> lk(_mutex);
- _abortSource = CancelationSource(stepdownToken);
+ _abortSource = CancellationSource(stepdownToken);
return _abortSource->token();
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.h b/src/mongo/db/s/resharding/resharding_recipient_service.h
index 8460178d087..36651e67c20 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.h
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.h
@@ -110,7 +110,7 @@ public:
~RecipientStateMachine();
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override;
+ const CancellationToken& token) noexcept override;
void interrupt(Status status) override;
@@ -143,14 +143,14 @@ private:
ExecutorFuture<void> _cloneThenTransitionToApplying(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor,
- const CancelationToken& abortToken);
+ const CancellationToken& abortToken);
ExecutorFuture<void> _applyThenTransitionToSteadyState(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor);
ExecutorFuture<void> _awaitAllDonorsBlockingWritesThenTransitionToStrictConsistency(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor,
- const CancelationToken& abortToken);
+ const CancellationToken& abortToken);
ExecutorFuture<void> _awaitCoordinatorHasDecisionPersistedThenTransitionToRenaming(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor);
@@ -199,7 +199,7 @@ private:
// Initializes the _abortSource and generates a token from it to return back the caller.
//
// Should only be called once per lifetime.
- CancelationToken _initAbortSource(const CancelationToken& stepdownToken);
+ CancellationToken _initAbortSource(const CancellationToken& stepdownToken);
// The in-memory representation of the immutable portion of the document in
// config.localReshardingOperations.recipient.
@@ -228,7 +228,7 @@ private:
Mutex _mutex = MONGO_MAKE_LATCH("RecipientStateMachine::_mutex");
// Canceled when there is an unrecoverable error or stepdown.
- boost::optional<CancelationSource> _abortSource;
+ boost::optional<CancellationSource> _abortSource;
boost::optional<ReshardingCriticalSection> _critSec;
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index 7fe56cd63b9..db2a13d0dbe 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -245,7 +245,7 @@ auto ReshardingTxnCloner::_withTemporaryOperationContext(Callable&& callable) {
ExecutorFuture<void> ReshardingTxnCloner::run(
ServiceContext* serviceContext,
std::shared_ptr<executor::TaskExecutor> executor,
- CancelationToken cancelToken,
+ CancellationToken cancelToken,
std::shared_ptr<MongoProcessInterface> mongoProcessInterface_forTest) {
struct ChainContext {
std::unique_ptr<Pipeline, PipelineDeleter> pipeline;
@@ -333,7 +333,7 @@ ExecutorFuture<void> ReshardingTxnCloner::run(
});
}
- if (status.isA<ErrorCategory::CancelationError>() ||
+ if (status.isA<ErrorCategory::CancellationError>() ||
status.isA<ErrorCategory::NotPrimaryError>()) {
// Cancellation and NotPrimary errors indicate the primary-only service Instance
// will be shut down or is shutting down now - provided the cancelToken is also
@@ -371,7 +371,7 @@ ExecutorFuture<void> ReshardingTxnCloner::run(
.on(executor, cancelToken)
.onCompletion([this, chainCtx](Status status) {
if (chainCtx->pipeline) {
- // Guarantee the pipeline is always cleaned up - even upon cancelation.
+ // Guarantee the pipeline is always cleaned up - even upon cancellation.
_withTemporaryOperationContext([&](auto* opCtx) {
chainCtx->pipeline->dispose(opCtx);
chainCtx->pipeline.reset();
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.h b/src/mongo/db/s/resharding/resharding_txn_cloner.h
index aea92f29b94..b931b9d8d08 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.h
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.h
@@ -74,7 +74,7 @@ public:
ExecutorFuture<void> run(
ServiceContext* serviceContext,
std::shared_ptr<executor::TaskExecutor> executor,
- CancelationToken cancelToken,
+ CancellationToken cancelToken,
std::shared_ptr<MongoProcessInterface> mongoProcessInterface_forTest = nullptr);
void updateProgressDocument_forTest(OperationContext* opCtx, const LogicalSessionId& progress) {
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 79f9e86a41d..1cb9713b80d 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -346,11 +346,11 @@ protected:
ExecutorFuture<void> runCloner(
ReshardingTxnCloner& cloner,
std::shared_ptr<executor::ThreadPoolTaskExecutor> executor,
- boost::optional<CancelationToken> customCancelToken = boost::none) {
- // Allows callers to control the cancelation of the cloner's run() function when specified.
+ boost::optional<CancellationToken> customCancelToken = boost::none) {
+ // Allows callers to control the cancellation of the cloner's run() function when specified.
auto cancelToken = customCancelToken.is_initialized()
? customCancelToken.get()
- : operationContext()->getCancelationToken();
+ : operationContext()->getCancellationToken();
// There isn't a guarantee that the reference count to `executor` has been decremented after
// .run() returns. We schedule a trivial task on the task executor to ensure the callback's
@@ -551,8 +551,8 @@ TEST_F(ReshardingTxnClonerTest, ClonerOneBatchThenCanceled) {
const auto txns = makeSortedTxns(4);
auto executor = makeTaskExecutorForCloner();
ReshardingTxnCloner cloner(kTwoSourceIdList[1], Timestamp::max());
- auto opCtxToken = operationContext()->getCancelationToken();
- auto cancelSource = CancelationSource(opCtxToken);
+ auto opCtxToken = operationContext()->getCancellationToken();
+ auto cancelSource = CancellationSource(opCtxToken);
auto future = runCloner(cloner, executor, cancelSource.token());
onCommandReturnTxnBatch(std::vector<BSONObj>(txns.begin(), txns.begin() + 2),
@@ -620,7 +620,7 @@ TEST_F(ReshardingTxnClonerTest, ClonerStoresProgressMultipleBatches) {
auto executor = makeTaskExecutorForCloner();
ReshardingTxnCloner cloner(kTwoSourceIdList[1], Timestamp::max());
- auto cancelSource = CancelationSource(operationContext()->getCancelationToken());
+ auto cancelSource = CancellationSource(operationContext()->getCancellationToken());
auto future = runCloner(cloner, executor, cancelSource.token());
// The progress document is updated asynchronously after the session record is updated. We fake
@@ -633,8 +633,8 @@ TEST_F(ReshardingTxnClonerTest, ClonerStoresProgressMultipleBatches) {
// Simulate a stepdown.
cancelSource.cancel();
- // With a non-mock network, disposing of the pipeline upon cancelation would also cancel the
- // original request.
+ // With a non-mock network, disposing of the pipeline upon cancellation would also cancel
+ // the original request.
return Status{ErrorCodes::CallbackCanceled, "Simulate cancellation"};
});
auto status = future.getNoThrow();
@@ -670,7 +670,7 @@ TEST_F(ReshardingTxnClonerTest, ClonerStoresProgressResume) {
auto executor = makeTaskExecutorForCloner();
ReshardingTxnCloner cloner(kTwoSourceIdList[1], Timestamp::max());
- auto cancelSource = CancelationSource(operationContext()->getCancelationToken());
+ auto cancelSource = CancellationSource(operationContext()->getCancellationToken());
auto future = runCloner(cloner, executor, cancelSource.token());
onCommandReturnTxnBatch({txns.front()}, CursorId{123}, true /* isFirstBatch */);
@@ -685,8 +685,8 @@ TEST_F(ReshardingTxnClonerTest, ClonerStoresProgressResume) {
// Simulate a stepdown.
cancelSource.cancel();
- // With a non-mock network, disposing of the pipeline upon cancelation would also cancel the
- // original request.
+ // With a non-mock network, disposing of the pipeline upon cancellation would also cancel
+ // the original request.
return Status{ErrorCodes::CallbackCanceled, "Simulate cancellation"};
});
diff --git a/src/mongo/db/s/resharding_test_commands.cpp b/src/mongo/db/s/resharding_test_commands.cpp
index ec4f8400ef4..550fb6e878d 100644
--- a/src/mongo/db/s/resharding_test_commands.cpp
+++ b/src/mongo/db/s/resharding_test_commands.cpp
@@ -99,7 +99,7 @@ public:
request().getAtClusterTime(),
request().getOutputNs());
- cloner.run(std::move(executor), opCtx->getCancelationToken()).get(opCtx);
+ cloner.run(std::move(executor), opCtx->getCancellationToken()).get(opCtx);
}
private:
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 1ba225f6747..f2c9a70828d 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -982,7 +982,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleCollAndChun
_executor->schedule([this, nss](auto status) {
if (!status.isOK()) {
- if (ErrorCodes::isCancelationError(status)) {
+ if (ErrorCodes::isCancellationError(status)) {
return;
}
@@ -1010,7 +1010,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
_executor->schedule([this, name = dbName.toString()](auto status) {
if (!status.isOK()) {
- if (ErrorCodes::isCancelationError(status)) {
+ if (ErrorCodes::isCancellationError(status)) {
return;
}
@@ -1075,7 +1075,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
return;
}
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
LOGV2(22096,
"Cache loader failed to schedule a persisted metadata update task for namespace "
"{namespace} due to {error}. Clearing task list so that scheduling will be "
@@ -1149,7 +1149,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
return;
}
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
LOGV2(22099,
"Cache loader failed to schedule a persisted metadata update task for database "
"{database} due to {error}. Clearing task list so that scheduling will be "
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index cf93ad8439c..75c7f1a2b2c 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -72,7 +72,7 @@ void ShardingDDLCoordinator::interrupt(Status status) {
}
SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept {
+ const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then([this, executor, token, anchor = shared_from_this()] {
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index efa199af4d2..f692a3a00f0 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -94,10 +94,10 @@ protected:
private:
SemiFuture<void> run(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept override final;
+ const CancellationToken& token) noexcept override final;
virtual ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancelationToken& token) noexcept = 0;
+ const CancellationToken& token) noexcept = 0;
void interrupt(Status status) override final;
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index f4e157b5cc3..209695d570d 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -174,7 +174,7 @@ private:
void _updateShardIdentityConfigString(Status status,
std::string setName,
ConnectionString update) {
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
LOGV2_DEBUG(22067,
2,
"Unable to schedule confirmed replica set update due to {error}",
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 384e4cffeda..7112a12c8f5 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/vector_clock_mutable.h"
#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/fail_point.h"
namespace mongo {
@@ -60,7 +60,7 @@ ExecutorFuture<void> waitForMajorityWithHangFailpoint(ServiceContext* service,
auto executor = Grid::get(service)->getExecutorPool()->getFixedExecutor();
auto waitForWC = [service, executor](repl::OpTime opTime) {
return WaitForMajorityService::get(service)
- .waitUntilMajority(opTime, CancelationToken::uncancelable())
+ .waitUntilMajority(opTime, CancellationToken::uncancelable())
.thenRunOn(executor);
};
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index b67d3bf67dc..c31d642cd0c 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -258,7 +258,7 @@ Future<AsyncWorkScheduler::HostAndShard> AsyncWorkScheduler::_targetHostAsync(
}
return shard->getTargeter()
- ->findHost(readPref, CancelationToken::uncancelable())
+ ->findHost(readPref, CancellationToken::uncancelable())
.thenRunOn(_executor)
.unsafeToInlineFuture()
.then([shard = std::move(shard)](HostAndPort host) -> HostAndShard {
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index e610934470b..fbe41427376 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -1196,7 +1196,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
_currentTimestamps.stable = stable;
_currentTimestamps.minOfCheckpointAndOldest = minOfCheckpointAndOldest;
} catch (const ExceptionForCat<ErrorCategory::Interruption>& ex) {
- if (!ErrorCodes::isCancelationError(ex))
+ if (!ErrorCodes::isCancellationError(ex))
throw;
// If we're interrupted at shutdown or after PeriodicRunner's client has been
// killed, it's fine to give up on future notifications.
diff --git a/src/mongo/executor/cancelable_executor.h b/src/mongo/executor/cancelable_executor.h
index fd5ef52e082..594f8f1746f 100644
--- a/src/mongo/executor/cancelable_executor.h
+++ b/src/mongo/executor/cancelable_executor.h
@@ -30,17 +30,16 @@
#pragma once
#include "mongo/base/status.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/out_of_line_executor.h"
namespace mongo {
/**
- * An executor supporting cancelation via a cancelation token. Given an existing OutOfLineExecutor
- * "exec" and a cancelation token "token", you can create a CancelableExecutor using
- * CancelableExecutor::make(exec, token). This executor will use "exec"
- * to actually execute any scheduled work, but will refuse to run any work
- * after "token" has been cancelled.
+ * An executor supporting cancellation via a cancellation token. Given an existing
+ * OutOfLineExecutor "exec" and a cancellation token "token", you can create a CancelableExecutor
+ * using CancelableExecutor::make(exec, token). This executor will use "exec" to actually execute
+ * any scheduled work, but will refuse to run any work after "token" has been cancelled.
*
* Refusal to run work is similar to any other executor's refusal: the callback is still
* invoked, but with a non-OK status (the kCallbackCanceledErrorStatus defined below), and
@@ -69,7 +68,7 @@ namespace mongo {
*/
class CancelableExecutor : public OutOfLineExecutor {
public:
- CancelableExecutor(ExecutorPtr exec, CancelationToken tok)
+ CancelableExecutor(ExecutorPtr exec, CancellationToken tok)
: _exec(std::move(exec)), _source(std::move(tok)) {}
CancelableExecutor(const CancelableExecutor&) = delete;
@@ -81,7 +80,7 @@ public:
* This is the preferred way to get a CancelableExecutor, since the ExecutorFuture interface
* expects shared_ptrs to executors it recieves in its constructor or .thenRunOn.
*/
- static std::shared_ptr<CancelableExecutor> make(ExecutorPtr exec, CancelationToken token) {
+ static std::shared_ptr<CancelableExecutor> make(ExecutorPtr exec, CancellationToken token) {
return std::make_shared<CancelableExecutor>(std::move(exec), std::move(token));
}
void schedule(OutOfLineExecutor::Task func) override {
@@ -96,6 +95,6 @@ public:
private:
ExecutorPtr _exec;
- CancelationSource _source;
+ CancellationSource _source;
};
} // namespace mongo
diff --git a/src/mongo/executor/cancelable_executor_test.cpp b/src/mongo/executor/cancelable_executor_test.cpp
index fda96586123..fa7f58209c9 100644
--- a/src/mongo/executor/cancelable_executor_test.cpp
+++ b/src/mongo/executor/cancelable_executor_test.cpp
@@ -34,7 +34,7 @@
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/thread_assertion_monitor.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/executor_test_util.h"
#include "mongo/util/future.h"
@@ -76,7 +76,7 @@ private:
TEST(CancelableExecutor, SchedulesWorkCorrectly) {
auto exec = InlineQueuedCountingExecutor::make();
auto [promise, future] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
auto fut2 =
std::move(future).thenRunOn(CancelableExecutor::make(exec, source.token())).then([] {
return 42;
@@ -91,7 +91,7 @@ TEST(CancelableExecutor, SchedulesWorkCorrectly) {
TEST(CancelableExecutor, WorkCanceledBeforeScheduleDoesNotRun) {
auto exec = InlineQueuedCountingExecutor::make();
auto [promise, future] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
auto fut2 =
std::move(future).thenRunOn(CancelableExecutor::make(exec, source.token())).then([] {
return 42;
@@ -109,7 +109,7 @@ TEST_F(CancelableExecutorTest, WorkCanceledAfterPreviousWorkOnExecutorHasRunDoes
mongo::unittest::threadAssertionMonitoredTest([this](auto& assertionMonitor) {
auto [promise, future] = makePromiseFuture<void>();
auto [innerPromise, innerFuture] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
unittest::Barrier barrier(2);
auto fut2 = std::move(future)
.thenRunOn(CancelableExecutor::make(executor(), source.token()))
@@ -135,10 +135,10 @@ TEST_F(CancelableExecutorTest, WorkCanceledAfterPreviousWorkOnExecutorHasRunDoes
// the CancelableExecutor being canceled before the callback is ready, and
// because the backing executor refuses work for some other reason, the backing
// executor's error status takes precedence.
-TEST(CancelableExecutor, ExecutorRejectionsTakePrecedenceOverCancelation) {
+TEST(CancelableExecutor, ExecutorRejectionsTakePrecedenceOverCancellation) {
auto exec = RejectingExecutor::make();
auto [promise, future] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
auto fut2 =
std::move(future).thenRunOn(CancelableExecutor::make(exec, source.token())).then([] {
return 42;
@@ -149,13 +149,13 @@ TEST(CancelableExecutor, ExecutorRejectionsTakePrecedenceOverCancelation) {
}
// Check that no continuations (even error-handling ones) scheduled on a CancelableExecutor
-// run after it is canceled, and that the cancelation error status is passed to the next
+// run after it is canceled, and that the cancellation error status is passed to the next
// error-handling callback on the future chain that runs in a non-canceled execution context/
// on an executor that accepts the work.
TEST(CancelableExecutor, ErrorsArePropagatedToAcceptingExecutor) {
auto exec = InlineQueuedCountingExecutor::make();
auto [promise, future] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
auto cancelExec = CancelableExecutor::make(exec, source.token());
source.cancel();
// It's safe to FAIL in the continuations here, because this executor runs
@@ -178,7 +178,7 @@ TEST(CancelableExecutor, ErrorsArePropagatedToAcceptingExecutor) {
TEST(CancelableExecutor, UncanceledExecutorCanBeChainedCorrectly) {
auto exec = InlineQueuedCountingExecutor::make();
auto [promise, future] = makePromiseFuture<void>();
- CancelationSource source;
+ CancellationSource source;
auto cancelExec = CancelableExecutor::make(exec, source.token());
auto fut2 = std::move(future)
.thenRunOn(exec)
diff --git a/src/mongo/executor/network_interface.h b/src/mongo/executor/network_interface.h
index 3a30c0e0893..21b0129158f 100644
--- a/src/mongo/executor/network_interface.h
+++ b/src/mongo/executor/network_interface.h
@@ -185,7 +185,7 @@ public:
}
/**
- * Requests cancelation of the network activity associated with "cbHandle" if it has not yet
+ * Requests cancellation of the network activity associated with "cbHandle" if it has not yet
* completed.
*
* Note that the work involved in onFinish may run locally as a result of invoking this
diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h
index eb605735eae..5d29786f091 100644
--- a/src/mongo/executor/network_interface_mock.h
+++ b/src/mongo/executor/network_interface_mock.h
@@ -420,7 +420,7 @@ private:
// NetworkInterfaceMock.
NetworkOperationList _operations; // (M)
- // The list of responses that have been enqueued from scheduleResponse(), cancelation, or
+ // The list of responses that have been enqueued from scheduleResponse(), cancellation, or
// timeout. This list is ordered by NetworkResponse::when and is drained front to back by
// runReadyNetworkOperations().
NetworkResponseList _responses; // (M)
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index 14460069e5e..b063ae2fa06 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -73,7 +73,7 @@ public:
// Increment the count of commands that experienced a local timeout
// Note that these commands do not count as "failed".
++_data.timedOut;
- } else if (ErrorCodes::isCancelationError(status)) {
+ } else if (ErrorCodes::isCancellationError(status)) {
// Increment the count of commands that were canceled locally
++_data.canceled;
} else if (ErrorCodes::isShutdownError(status)) {
@@ -938,7 +938,7 @@ void NetworkInterfaceTL::ExhaustCommandState::continueExhaustRequest(
}
if (requestState->interface()->inShutdown() ||
- ErrorCodes::isCancelationError(response.status)) {
+ ErrorCodes::isCancellationError(response.status)) {
finalResponsePromise.emplaceValue(response);
return;
}
@@ -1204,7 +1204,7 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
// Since the lock is released before canceling the timer, this thread can win the race with
// cancelAlarm(). Thus if status is CallbackCanceled, then this alarm is already removed from
// _inProgressAlarms.
- if (ErrorCodes::isCancelationError(status)) {
+ if (ErrorCodes::isCancellationError(status)) {
return;
}
diff --git a/src/mongo/executor/task_executor.cpp b/src/mongo/executor/task_executor.cpp
index 89b136a3fee..ad1bfd1496c 100644
--- a/src/mongo/executor/task_executor.cpp
+++ b/src/mongo/executor/task_executor.cpp
@@ -41,7 +41,7 @@ MONGO_FAIL_POINT_DEFINE(pauseScheduleCallWithCancelTokenUntilCanceled);
Status wrapCallbackHandleWithCancelToken(
const std::shared_ptr<TaskExecutor>& executor,
const StatusWith<TaskExecutor::CallbackHandle>& swCallbackHandle,
- const CancelationToken& token) {
+ const CancellationToken& token) {
if (!swCallbackHandle.isOK()) {
return swCallbackHandle.getStatus();
}
@@ -57,14 +57,14 @@ Status wrapCallbackHandleWithCancelToken(
/**
* Takes a schedule(Exhaust)RemoteCommand(OnAny)-style function and wraps it to return a future and
- * be cancelable with CancelationTokens.
+ * be cancelable with CancellationTokens.
*/
template <typename Request, typename Response, typename ScheduleFn, typename CallbackFn>
ExecutorFuture<Response> wrapScheduleCallWithCancelTokenAndFuture(
const std::shared_ptr<TaskExecutor>& executor,
ScheduleFn&& schedule,
const Request& request,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton,
const CallbackFn& cb) {
if (token.isCanceled()) {
@@ -149,7 +149,7 @@ void TaskExecutor::schedule(OutOfLineExecutor::Task func) {
}
}
-ExecutorFuture<void> TaskExecutor::sleepUntil(Date_t when, const CancelationToken& token) {
+ExecutorFuture<void> TaskExecutor::sleepUntil(Date_t when, const CancellationToken& token) {
if (token.isCanceled()) {
return ExecutorFuture<void>(shared_from_this(), TaskExecutor::kCallbackCanceledErrorStatus);
}
@@ -183,7 +183,7 @@ ExecutorFuture<void> TaskExecutor::sleepUntil(Date_t when, const CancelationToke
auto cbHandle = scheduleWorkAt(
when, [alarmState](const auto& args) mutable { alarmState->signal(args.status); });
- // Handle cancelation via the input CancelationToken.
+ // Handle cancellation via the input CancellationToken.
auto scheduleStatus =
wrapCallbackHandleWithCancelToken(shared_from_this(), std::move(cbHandle), token);
@@ -274,7 +274,7 @@ StatusWith<TaskExecutor::CallbackHandle> TaskExecutor::scheduleRemoteCommand(
}
ExecutorFuture<TaskExecutor::ResponseStatus> TaskExecutor::scheduleRemoteCommand(
- const RemoteCommandRequest& request, const CancelationToken& token, const BatonHandle& baton) {
+ const RemoteCommandRequest& request, const CancellationToken& token, const BatonHandle& baton) {
return wrapScheduleCallWithCancelTokenAndFuture<decltype(request),
TaskExecutor::ResponseStatus>(
shared_from_this(),
@@ -287,7 +287,7 @@ ExecutorFuture<TaskExecutor::ResponseStatus> TaskExecutor::scheduleRemoteCommand
ExecutorFuture<TaskExecutor::ResponseOnAnyStatus> TaskExecutor::scheduleRemoteCommandOnAny(
const RemoteCommandRequestOnAny& request,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton) {
return wrapScheduleCallWithCancelTokenAndFuture<decltype(request),
TaskExecutor::ResponseOnAnyStatus>(
@@ -313,7 +313,7 @@ StatusWith<TaskExecutor::CallbackHandle> TaskExecutor::scheduleExhaustRemoteComm
ExecutorFuture<TaskExecutor::ResponseStatus> TaskExecutor::scheduleExhaustRemoteCommand(
const RemoteCommandRequest& request,
const RemoteCommandCallbackFn& cb,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton) {
return wrapScheduleCallWithCancelTokenAndFuture<decltype(request),
TaskExecutor::ResponseStatus>(
@@ -328,7 +328,7 @@ ExecutorFuture<TaskExecutor::ResponseStatus> TaskExecutor::scheduleExhaustRemote
ExecutorFuture<TaskExecutor::ResponseOnAnyStatus> TaskExecutor::scheduleExhaustRemoteCommandOnAny(
const RemoteCommandRequestOnAny& request,
const RemoteCommandOnAnyCallbackFn& cb,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton) {
return wrapScheduleCallWithCancelTokenAndFuture<decltype(request),
TaskExecutor::ResponseOnAnyStatus>(
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index 28bfd4fa241..cfd689e0548 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -40,7 +40,7 @@
#include "mongo/executor/remote_command_response.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/transport/baton.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/future.h"
#include "mongo/util/out_of_line_executor.h"
#include "mongo/util/time_support.h"
@@ -281,7 +281,7 @@ public:
* Otherwise, if the executor shuts down or the token is canceled prior to the deadline being
* reached, the resulting ExecutorFuture will be set with ErrorCodes::CallbackCanceled.
*/
- ExecutorFuture<void> sleepUntil(Date_t when, const CancelationToken& token);
+ ExecutorFuture<void> sleepUntil(Date_t when, const CancellationToken& token);
/**
* Returns an ExecutorFuture that will be resolved with success after the given duration has
@@ -293,7 +293,7 @@ public:
* Otherwise, if the executor shuts down or the token is canceled prior to the deadline being
* reached, the resulting ExecutorFuture will be set with ErrorCodes::CallbackCanceled.
*/
- ExecutorFuture<void> sleepFor(Milliseconds duration, const CancelationToken& token) {
+ ExecutorFuture<void> sleepFor(Milliseconds duration, const CancellationToken& token) {
return sleepUntil(now() + duration, token);
}
@@ -318,14 +318,14 @@ public:
* resulting future will be set with an error only if there is a failure to send the request.
* Errors from the remote node will be contained in the ResponseStatus object.
*
- * The input CancelationToken may be used to cancel sending the request. There is no guarantee
+ * The input CancellationToken may be used to cancel sending the request. There is no guarantee
* that this will succeed in canceling the request and the resulting ExecutorFuture may contain
- * either success or error. If cancelation is successful, the resulting ExecutorFuture will be
+ * either success or error. If cancellation is successful, the resulting ExecutorFuture will be
* set with an error.
*/
ExecutorFuture<TaskExecutor::ResponseStatus> scheduleRemoteCommand(
const RemoteCommandRequest& request,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton = nullptr);
virtual StatusWith<CallbackHandle> scheduleRemoteCommandOnAny(
@@ -335,7 +335,7 @@ public:
ExecutorFuture<TaskExecutor::ResponseOnAnyStatus> scheduleRemoteCommandOnAny(
const RemoteCommandRequestOnAny& request,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton = nullptr);
@@ -368,9 +368,9 @@ public:
*
* May be called by client threads or callbacks running in the executor.
*
- * The input CancelationToken may be used to cancel sending the request. There is no guarantee
+ * The input CancellationToken may be used to cancel sending the request. There is no guarantee
* that this will succeed in canceling the request and the resulting ExecutorFuture may contain
- * either success or error. If cancelation is successful, the resulting ExecutorFuture will be
+ * either success or error. If cancellation is successful, the resulting ExecutorFuture will be
* set with a CallbackCanceled error.
*
* Cancelling the future will also result in cancelling any outstanding invocations of the
@@ -379,13 +379,13 @@ public:
ExecutorFuture<TaskExecutor::ResponseStatus> scheduleExhaustRemoteCommand(
const RemoteCommandRequest& request,
const RemoteCommandCallbackFn& cb,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton = nullptr);
ExecutorFuture<TaskExecutor::ResponseOnAnyStatus> scheduleExhaustRemoteCommandOnAny(
const RemoteCommandRequestOnAny& request,
const RemoteCommandOnAnyCallbackFn& cb,
- const CancelationToken& token,
+ const CancellationToken& token,
const BatonHandle& baton = nullptr);
/**
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 1e36baa4be1..0a4f534587e 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -44,7 +44,7 @@
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/str.h"
@@ -407,7 +407,7 @@ COMMON_EXECUTOR_TEST(SleepUntilReturnsReadyFutureWithSuccessWhenDeadlineAlreadyP
const Date_t now = net->now();
- auto alarm = executor.sleepUntil(now, CancelationToken::uncancelable());
+ auto alarm = executor.sleepUntil(now, CancellationToken::uncancelable());
ASSERT(alarm.isReady());
ASSERT_OK(alarm.getNoThrow());
shutdownExecutorThread();
@@ -425,7 +425,7 @@ COMMON_EXECUTOR_TEST(
const Date_t now = net->now();
const Milliseconds sleepDuration{1000};
const auto deadline = now + sleepDuration;
- auto alarm = executor.sleepUntil(deadline, CancelationToken::uncancelable());
+ auto alarm = executor.sleepUntil(deadline, CancellationToken::uncancelable());
ASSERT(alarm.isReady());
ASSERT_EQ(alarm.getNoThrow().code(), ErrorCodes::ShutdownInProgress);
@@ -437,7 +437,7 @@ COMMON_EXECUTOR_TEST(SleepUntilReturnsReadyFutureWithCallbackCanceledWhenTokenAl
const Date_t now = net->now();
const Milliseconds sleepDuration{1000};
const auto deadline = now + sleepDuration;
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
cancelSource.cancel();
auto alarm = executor.sleepUntil(deadline, cancelSource.token());
@@ -454,7 +454,7 @@ COMMON_EXECUTOR_TEST(
const Date_t now = net->now();
const Milliseconds sleepDuration{1000};
const auto deadline = now + sleepDuration;
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto alarm = executor.sleepUntil(deadline, cancelSource.token());
ASSERT_FALSE(alarm.isReady());
@@ -467,7 +467,7 @@ COMMON_EXECUTOR_TEST(
// Cancel before deadline.
cancelSource.cancel();
- // Required to process the cancelation.
+ // Required to process the cancellation.
net->enterNetwork();
net->exitNetwork();
@@ -487,7 +487,7 @@ COMMON_EXECUTOR_TEST(
const Date_t now = net->now();
const Milliseconds sleepDuration{1000};
const auto deadline = now + sleepDuration;
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto alarm = executor.sleepUntil(deadline, cancelSource.token());
ASSERT_FALSE(alarm.isReady());
@@ -515,7 +515,7 @@ COMMON_EXECUTOR_TEST(SleepUntilResolvesOutputFutureWithSuccessWhenDeadlinePasses
const Milliseconds sleepDuration{1000};
const auto deadline = now + sleepDuration;
- auto alarm = executor.sleepUntil(deadline, CancelationToken::uncancelable());
+ auto alarm = executor.sleepUntil(deadline, CancellationToken::uncancelable());
ASSERT_FALSE(alarm.isReady());
net->enterNetwork();
@@ -602,9 +602,9 @@ COMMON_EXECUTOR_TEST(ScheduleAndCancelRemoteCommand) {
}
COMMON_EXECUTOR_TEST(
- ScheduleRemoteCommandWithCancelationTokenSuccessfullyCancelsRequestIfCanceledAfterFunctionCallButBeforeProcessing) {
+ ScheduleRemoteCommandWithCancellationTokenSuccessfullyCancelsRequestIfCanceledAfterFunctionCallButBeforeProcessing) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto responseFuture = executor.scheduleRemoteCommand(kDummyRequest, cancelSource.token());
cancelSource.cancel();
@@ -614,7 +614,7 @@ COMMON_EXECUTOR_TEST(
getNet()->runReadyNetworkOperations();
getNet()->exitNetwork();
- // Wait for cancelation to happen and expect error status on future.
+ // Wait for cancellation to happen and expect error status on future.
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, responseFuture.getNoThrow());
shutdownExecutorThread();
@@ -622,10 +622,10 @@ COMMON_EXECUTOR_TEST(
}
COMMON_EXECUTOR_TEST(
- ScheduleRemoteCommandWithCancelationTokenSuccessfullyCancelsRequestIfCanceledBeforeFunctionCallAndBeforeProcessing) {
+ ScheduleRemoteCommandWithCancellationTokenSuccessfullyCancelsRequestIfCanceledBeforeFunctionCallAndBeforeProcessing) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
// Cancel before calling scheduleRemoteCommand.
cancelSource.cancel();
auto responseFuture = executor.scheduleRemoteCommand(kDummyRequest, cancelSource.token());
@@ -635,9 +635,9 @@ COMMON_EXECUTOR_TEST(
}
COMMON_EXECUTOR_TEST(
- ScheduleRemoteCommandWithCancelationTokenDoesNotCancelRequestIfCanceledAfterProcessing) {
+ ScheduleRemoteCommandWithCancellationTokenDoesNotCancelRequestIfCanceledAfterProcessing) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto responseFuture = executor.scheduleRemoteCommand(kDummyRequest, cancelSource.token());
launchExecutorThread();
@@ -659,7 +659,7 @@ COMMON_EXECUTOR_TEST(
}
COMMON_EXECUTOR_TEST(
- ScheduleRemoteCommandWithCancelationTokenReturnsShutdownInProgressIfExecutorAlreadyShutdownAndCancelNotCalled) {
+ ScheduleRemoteCommandWithCancellationTokenReturnsShutdownInProgressIfExecutorAlreadyShutdownAndCancelNotCalled) {
TaskExecutor& executor = getExecutor();
launchExecutorThread();
@@ -667,15 +667,15 @@ COMMON_EXECUTOR_TEST(
joinExecutorThread();
auto responseFuture =
- executor.scheduleRemoteCommand(kDummyRequest, CancelationToken::uncancelable());
+ executor.scheduleRemoteCommand(kDummyRequest, CancellationToken::uncancelable());
ASSERT_EQ(responseFuture.getNoThrow().getStatus().code(), ErrorCodes::ShutdownInProgress);
}
COMMON_EXECUTOR_TEST(
- ScheduleRemoteCommandWithCancelationTokenReturnsShutdownInProgressIfExecutorAlreadyShutdownAndCancelCalled) {
+ ScheduleRemoteCommandWithCancellationTokenReturnsShutdownInProgressIfExecutorAlreadyShutdownAndCancelCalled) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto responseFuture = executor.scheduleRemoteCommand(kDummyRequest, cancelSource.token());
launchExecutorThread();
@@ -893,7 +893,7 @@ COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandSwallowsErrorsWhenMoreToComeFla
COMMON_EXECUTOR_TEST(
ScheduleExhaustRemoteCommandFutureIsResolvedWhenMoreToComeFlagIsFalseOnFirstResponse) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
size_t numTimesCallbackCalled = 0;
auto cb = [&numTimesCallbackCalled](const TaskExecutor::RemoteCommandCallbackArgs&) {
@@ -932,7 +932,7 @@ COMMON_EXECUTOR_TEST(
COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWhenMoreToComeFlagIsFalse) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
size_t numTimesCallbackCalled = 0;
auto cb = [&numTimesCallbackCalled](const TaskExecutor::RemoteCommandCallbackArgs&) {
@@ -979,7 +979,7 @@ COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWhenMoreToComeF
COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWhenErrorResponseReceived) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
size_t numTimesCallbackCalled = 0;
auto cb = [&numTimesCallbackCalled](const TaskExecutor::RemoteCommandCallbackArgs&) {
@@ -1017,7 +1017,7 @@ COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWhenErrorRespon
COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureSwallowsErrorsWhenMoreToCome) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
size_t numTimesCallbackCalled = 0;
auto cb = [&numTimesCallbackCalled](const TaskExecutor::RemoteCommandCallbackArgs&) {
@@ -1064,9 +1064,9 @@ COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureSwallowsErrorsWhenMoreToC
joinExecutorThread();
}
-COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWithErrorOnCancelation) {
+COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWithErrorOnCancellation) {
TaskExecutor& executor = getExecutor();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
size_t numTimesCallbackCalled = 0;
auto cb = [&numTimesCallbackCalled](const TaskExecutor::RemoteCommandCallbackArgs&) {
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index d1787313d76..b9a500deab7 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -613,14 +613,14 @@ void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
}
_pool->schedule([this, cbState](auto status) {
- invariant(status.isOK() || ErrorCodes::isCancelationError(status.code()));
+ invariant(status.isOK() || ErrorCodes::isCancellationError(status.code()));
runCallback(std::move(cbState));
});
});
} else {
_pool->schedule([this, cbState](auto status) {
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
stdx::lock_guard<Latch> lk(_mutex);
cbState->canceled.store(1);
@@ -786,14 +786,14 @@ void ThreadPoolTaskExecutor::scheduleExhaustIntoPool_inlock(std::shared_ptr<Call
}
_pool->schedule([this, cbState, expectedExhaustIter](auto status) {
- invariant(status.isOK() || ErrorCodes::isCancelationError(status.code()));
+ invariant(status.isOK() || ErrorCodes::isCancellationError(status.code()));
runCallbackExhaust(cbState, expectedExhaustIter);
});
});
} else {
_pool->schedule([this, cbState, expectedExhaustIter](auto status) {
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
stdx::lock_guard<Latch> lk(_mutex);
cbState->canceled.store(1);
diff --git a/src/mongo/executor/thread_pool_task_executor_integration_test.cpp b/src/mongo/executor/thread_pool_task_executor_integration_test.cpp
index 081dac754d6..113046ff192 100644
--- a/src/mongo/executor/thread_pool_task_executor_integration_test.cpp
+++ b/src/mongo/executor/thread_pool_task_executor_integration_test.cpp
@@ -179,8 +179,10 @@ TEST_F(TaskExecutorFixture, RunExhaustFutureShouldReceiveMultipleResponses) {
opCtx.get());
RequestHandlerUtil exhaustRequestHandler;
- auto swFuture = executor()->scheduleExhaustRemoteCommand(
- std::move(rcr), exhaustRequestHandler.getRequestCallbackFn(), opCtx->getCancelationToken());
+ auto swFuture =
+ executor()->scheduleExhaustRemoteCommand(std::move(rcr),
+ exhaustRequestHandler.getRequestCallbackFn(),
+ opCtx->getCancellationToken());
for (size_t i = 0; i < 5; ++i) {
auto counters = exhaustRequestHandler.getCountersWhenReady();
diff --git a/src/mongo/executor/thread_pool_task_executor_test.cpp b/src/mongo/executor/thread_pool_task_executor_test.cpp
index 826f76a7c01..a1486a42d70 100644
--- a/src/mongo/executor/thread_pool_task_executor_test.cpp
+++ b/src/mongo/executor/thread_pool_task_executor_test.cpp
@@ -55,7 +55,7 @@ MONGO_INITIALIZER(ThreadPoolExecutorCommonTests)(InitializerContext*) {
});
}
-TEST_F(ThreadPoolExecutorTest, TimelyCancelationOfScheduleWorkAt) {
+TEST_F(ThreadPoolExecutorTest, TimelyCancellationOfScheduleWorkAt) {
auto net = getNet();
auto& executor = getExecutor();
launchExecutorThread();
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 879db7a0322..045404c4fad 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -191,7 +191,7 @@ SemiFuture<std::vector<HostAndPort>> AsyncRequestsSender::RemoteData::resolveSha
str::stream() << "Could not find shard " << _shardId);
}
- return shard->getTargeter()->findHosts(readPref, CancelationToken::uncancelable());
+ return shard->getTargeter()->findHosts(readPref, CancellationToken::uncancelable());
}
auto AsyncRequestsSender::RemoteData::scheduleRemoteCommand(std::vector<HostAndPort>&& hostAndPorts)
diff --git a/src/mongo/s/mongos_main.cpp b/src/mongo/s/mongos_main.cpp
index be70f3fbd78..be442b182f9 100644
--- a/src/mongo/s/mongos_main.cpp
+++ b/src/mongo/s/mongos_main.cpp
@@ -567,7 +567,7 @@ private:
self->_updateConfigServer(args.status, setName, update);
})
.getStatus();
- if (ErrorCodes::isCancelationError(schedStatus.code())) {
+ if (ErrorCodes::isCancellationError(schedStatus.code())) {
LOGV2_DEBUG(22848,
2,
"Unable to schedule updating sharding state with confirmed replica set due"
@@ -580,7 +580,7 @@ private:
}
void _updateConfigServer(Status status, std::string setName, ConnectionString update) {
- if (ErrorCodes::isCancelationError(status.code())) {
+ if (ErrorCodes::isCancellationError(status.code())) {
stdx::lock_guard lock(_mutex);
_updateStates.erase(setName);
return;
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index ef8ce3a6f28..8007aa3d781 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -488,7 +488,7 @@ private:
template <typename MutableBufferSequence>
Future<void> read(const MutableBufferSequence& buffers, const BatonHandle& baton = nullptr) {
- // TODO SERVER-47229 Guard active ops for cancelation here.
+ // TODO SERVER-47229 Guard active ops for cancellation here.
#ifdef MONGO_CONFIG_SSL
if (_sslSocket) {
return opportunisticRead(*_sslSocket, buffers, baton);
@@ -514,7 +514,7 @@ private:
template <typename ConstBufferSequence>
Future<void> write(const ConstBufferSequence& buffers, const BatonHandle& baton = nullptr) {
- // TODO SERVER-47229 Guard active ops for cancelation here.
+ // TODO SERVER-47229 Guard active ops for cancellation here.
#ifdef MONGO_CONFIG_SSL
_ranHandshake = true;
if (_sslSocket) {
@@ -680,7 +680,7 @@ private:
networkingBaton && networkingBaton->canWait()) {
return networkingBaton->addSession(*this, NetworkingBaton::Type::Out)
.onError([](Status error) {
- if (ErrorCodes::isCancelationError(error)) {
+ if (ErrorCodes::isCancellationError(error)) {
// If the baton has detached, it will cancel its polling. We catch that
// error here and return Status::OK so that we invoke
// opportunisticWrite() again and switch to asio::async_write() below.
diff --git a/src/mongo/util/README.md b/src/mongo/util/README.md
index eddb8b8038e..de708515615 100644
--- a/src/mongo/util/README.md
+++ b/src/mongo/util/README.md
@@ -9,7 +9,7 @@ For details on the server-internal *FailPoint* pattern, see [this document][fail
[fail_points]: ../../../docs/fail_points.md
-## Cancelation Sources and Tokens
+## Cancellation Sources and Tokens
### Intro
When writing asynchronous code, we often schedule code or operations to run at some point in the future, in a different execution context. Sometimes, we want to cancel that scheduled work - to stop it from ever running if it hasn't yet run, and possibly to interrupt its execution if it is safe to do so. For example, in the MongoDB server, we might want to:
@@ -17,33 +17,33 @@ When writing asynchronous code, we often schedule code or operations to run at s
- Cancel asynchronous work chained as continuations on futures
- Write and use services that asynchronously perform cancelable work for consumers in the background
-In the MongoDB server, we have two types that together make it easy to manage the cancelation of this sort of asynchronous work: CancelationSources and CancelationTokens.
+In the MongoDB server, we have two types that together make it easy to manage the cancellation of this sort of asynchronous work: CancellationSources and CancellationTokens.
-### The CancelationSource and CancelationToken Types
-A `CancelationSource` manages the cancelation state for some unit of asynchronous work. This unit of asynchronous work can consist of any number of cancelable operations that should all be canceled together, e.g. functions scheduled to run on an executor, continuations on futures, or operations run by a service implementing cancelation. A `CancelationSource` is constructed in an uncanceled state, and cancelation can be requested by calling the member `CancelationSource::cancel()`.
+### The CancellationSource and CancellationToken Types
+A `CancellationSource` manages the cancellation state for some unit of asynchronous work. This unit of asynchronous work can consist of any number of cancelable operations that should all be canceled together, e.g. functions scheduled to run on an executor, continuations on futures, or operations run by a service implementing cancellation. A `CancellationSource` is constructed in an uncanceled state, and cancellation can be requested by calling the member `CancellationSource::cancel()`.
-A `CancelationSource` can be used to produce associated CancelationTokens with the member function `CancelationSource::token()`. These CancelationTokens can be passed to asynchronous operations to make them cancelable. By accepting a `CancelationToken` as a parameter, an asynchronous operation signals that it will attempt to cancel the operation when the `CancelationSource` associated with that `CancelationToken` has been canceled.
+A `CancellationSource` can be used to produce associated CancellationTokens with the member function `CancellationSource::token()`. These CancellationTokens can be passed to asynchronous operations to make them cancelable. By accepting a `CancellationToken` as a parameter, an asynchronous operation signals that it will attempt to cancel the operation when the `CancellationSource` associated with that `CancellationToken` has been canceled.
-When passed a `CancelationToken`, asynchronous operations are able to handle the cancelation of the `CancelationSource` associated with that `CancelationToken` in two ways:
+When passed a `CancellationToken`, asynchronous operations are able to handle the cancellation of the `CancellationSource` associated with that `CancellationToken` in two ways:
- - The `CancelationToken::isCanceled()` member function can be used to check at any point in time if the `CancelationSource` the `CancelationToken` was obtained from has been canceled. The code implementing the asynchronous operation can therefore check the value of this member function at appropriate points and, if the `CancelationSource` has been canceled, refuse to run the work or stop running work if it is ongoing.
+ - The `CancellationToken::isCanceled()` member function can be used to check at any point in time if the `CancellationSource` the `CancellationToken` was obtained from has been canceled. The code implementing the asynchronous operation can therefore check the value of this member function at appropriate points and, if the `CancellationSource` has been canceled, refuse to run the work or stop running work if it is ongoing.
- - The `CancelationToken:onCancel()` member function returns a `SemiFuture` that will be resolved successfully when the underlying `CancelationSource` has been canceled or resolved with an error if it is destructed before being canceled. Continuations can therefore be chained on this future that will run when the associated `CancelationSource` has been canceled. Importantly, because `CancelationToken:onCancel()` returns a `SemiFuture`, implementors of asynchronous operations must provide an execution context in which they want their chained continuation to run. Normally, this continuation should be scheduled to run on an executor, by passing one to `SemiFuture::thenRunOn()`.
+ - The `CancellationToken:onCancel()` member function returns a `SemiFuture` that will be resolved successfully when the underlying `CancellationSource` has been canceled or resolved with an error if it is destructed before being canceled. Continuations can therefore be chained on this future that will run when the associated `CancellationSource` has been canceled. Importantly, because `CancellationToken:onCancel()` returns a `SemiFuture`, implementors of asynchronous operations must provide an execution context in which they want their chained continuation to run. Normally, this continuation should be scheduled to run on an executor, by passing one to `SemiFuture::thenRunOn()`.
- - Alternatively, the continuation can be forced to run inline by transforming the `SemiFuture` into an inline future, by using `SemiFuture::unsafeToInlineFuture()`. This should be used very cautiously. When a continuation is chained to the `CancelationToken:onCancel()` future via `SemiFuture::unsafeToInlineFuture()`, the thread that calls `CancelationSource::cancel()` will be forced to run the continuation inline when it makes that call. Note that this means if a service chains many continuations in this way on `CancelationToken`s obtained from the same `CancelationSource`, then whatever thread calls`CancelationSource::cancel()` on that source will be forced to run all of those continuations potentially blocking that thread from making further progress for a non-trivial amount of time. Do not use `SemiFuture::unsafeToInlineFuture()` in this way unless you are sure you can block the thread that cancels the underlying `CancelationSource` until cancelation is complete. Additionally, remember that because the `SemiFuture` returned by `CancelationToken::onCancel()` is resolved as soon as that `CancelationToken` is canceled, if you attempt to chain a continuation on that future when the `CancelationToken` has _already_ been canceled, that continuation will be ready to run right away. Ordinarily, this just means the continuation will immediately be scheduled on the provided executor, but if `SemiFuture::unsafeToInlineFuture` is used to force the continuation to run inline, it will run inline immediately, potentially leading to deadlocks if you're not careful.
+ - Alternatively, the continuation can be forced to run inline by transforming the `SemiFuture` into an inline future, by using `SemiFuture::unsafeToInlineFuture()`. This should be used very cautiously. When a continuation is chained to the `CancellationToken:onCancel()` future via `SemiFuture::unsafeToInlineFuture()`, the thread that calls `CancellationSource::cancel()` will be forced to run the continuation inline when it makes that call. Note that this means if a service chains many continuations in this way on `CancellationToken`s obtained from the same `CancellationSource`, then whatever thread calls`CancellationSource::cancel()` on that source will be forced to run all of those continuations potentially blocking that thread from making further progress for a non-trivial amount of time. Do not use `SemiFuture::unsafeToInlineFuture()` in this way unless you are sure you can block the thread that cancels the underlying `CancellationSource` until cancellation is complete. Additionally, remember that because the `SemiFuture` returned by `CancellationToken::onCancel()` is resolved as soon as that `CancellationToken` is canceled, if you attempt to chain a continuation on that future when the `CancellationToken` has _already_ been canceled, that continuation will be ready to run right away. Ordinarily, this just means the continuation will immediately be scheduled on the provided executor, but if `SemiFuture::unsafeToInlineFuture` is used to force the continuation to run inline, it will run inline immediately, potentially leading to deadlocks if you're not careful.
### Example of a Service Performing Cancelable, Asynchronous Work
-We'll use the WaitForMajorityService as an example of how work can be scheduled and cancelled using CancelationSources and CancelationTokens. First, we'll see how a consumer might use a service implementing cancelation. Then, we'll see how a service might implement cancelation internally.
+We'll use the WaitForMajorityService as an example of how work can be scheduled and cancelled using CancellationSources and CancellationTokens. First, we'll see how a consumer might use a service implementing cancellation. Then, we'll see how a service might implement cancellation internally.
#### Using a Cancelable Service
-The WaitForMajorityService allows consumers to asynchronously wait until an `opTime` is majority committed. Consumers can request a wait on a specific `opTime` by calling the function `WaitForMajorityService::waitUntilMajority(OpTime opTime, CancelationToken token)`. This call will return a future that will be resolved when that `opTime` has been majority committed or otherwise set with an error.
+The WaitForMajorityService allows consumers to asynchronously wait until an `opTime` is majority committed. Consumers can request a wait on a specific `opTime` by calling the function `WaitForMajorityService::waitUntilMajority(OpTime opTime, CancellationToken token)`. This call will return a future that will be resolved when that `opTime` has been majority committed or otherwise set with an error.
In some cases, though, a consumer might realize that it no longer wants to wait on an `opTime`. For example, the consumer might be going through shut-down, and it needs to clean up all of its resources cleanly right away. Or, it might just realize that the `opTime` is no longer relevant, and it would like to tell the `WaitForMajorityService` that it no longer needs to wait on it, so the `WaitForMajorityService` can conserve its own resources.
-Consumers can easily cancel existing requests to wait on `opTime`s in situations like these by making use of the `CancelationToken` argument accepted by `WaitForMajorityService::waitUntilMajority`. For any `opTime` waits that should be cancelled together, they simply pass `CancelationTokens` from the same `CancelationSource` into the requests to wait on those `opTimes`:
+Consumers can easily cancel existing requests to wait on `opTime`s in situations like these by making use of the `CancellationToken` argument accepted by `WaitForMajorityService::waitUntilMajority`. For any `opTime` waits that should be cancelled together, they simply pass `CancellationTokens` from the same `CancellationSource` into the requests to wait on those `opTimes`:
```c++
-CancelationSource source;
+CancellationSource source;
auto opTimeFuture = WaitForMajorityService::waitUntilMajority(opTime, source.token());
auto opTimeFuture2 = WaitForMajorityService::waitUntilMajority(opTime2, source.token());
```
@@ -53,15 +53,15 @@ And whenever they want to cancel the waits on those `opTime`s, they can simply c
```c++
source.cancel()
```
-After this call, the `WaitForMajorityService` will stop waiting on `opTime` and `opTime2`. And the futures returned by all calls to `waitUntilMajority` that were passed `CancelationToken`s from the `CancelationSource source` (in this case, `opTimeFuture` and `opTimeFuture2`) will be resolved with `ErrorCodes::CallbackCanceled`.
+After this call, the `WaitForMajorityService` will stop waiting on `opTime` and `opTime2`. And the futures returned by all calls to `waitUntilMajority` that were passed `CancellationToken`s from the `CancellationSource source` (in this case, `opTimeFuture` and `opTimeFuture2`) will be resolved with `ErrorCodes::CallbackCanceled`.
#### Implementing a Cancelable Service
-Now we'll see how `WaitForMajorityService` might be implemented, at a high level, to support the cancelable API we saw in the last section. The `WaitForMajorityService` will need to ensure that calls to `WaitForMajorityService::waitUntilMajority(opTime, token)` schedule work to wait until `opTime` is majority committed, and that this scheduled work will stop if `token` has been canceled. To do so, it can use either of the functions `CancelationToken` has that expose the underlying cancelation state: it can either call `CancelationToken::isCanceled()` at some appropriate time on `token` and conditionally stop waiting on `opTime`, or it can chain a continuation onto the future returned by `CancelationToken:onCancel()` that will stop the wait. This continuation will run when the `token` is canceled, as cancelation resolves the future returned by `CancelationToken::onCancel()`.
+Now we'll see how `WaitForMajorityService` might be implemented, at a high level, to support the cancelable API we saw in the last section. The `WaitForMajorityService` will need to ensure that calls to `WaitForMajorityService::waitUntilMajority(opTime, token)` schedule work to wait until `opTime` is majority committed, and that this scheduled work will stop if `token` has been canceled. To do so, it can use either of the functions `CancellationToken` has that expose the underlying cancellation state: it can either call `CancellationToken::isCanceled()` at some appropriate time on `token` and conditionally stop waiting on `opTime`, or it can chain a continuation onto the future returned by `CancellationToken:onCancel()` that will stop the wait. This continuation will run when the `token` is canceled, as cancellation resolves the future returned by `CancellationToken::onCancel()`.
-To keep the example general, we're going to elide some details: for now, assume that calling `stopWaiting(opTime)` performs all the work needed for the `WaitForMajorityService` to stop waiting on `opTime`. Additionally, assume that `opTimePromise` is the promise that resolves the future returned by the call to `waitUntilMajority(opTime, token)`. Then, to implement cancelation for some request to wait on `opTime` with an associated token `token`, the `WaitForMajorityService` can add something like the following code to the function it uses to accept requests:
+To keep the example general, we're going to elide some details: for now, assume that calling `stopWaiting(opTime)` performs all the work needed for the `WaitForMajorityService` to stop waiting on `opTime`. Additionally, assume that `opTimePromise` is the promise that resolves the future returned by the call to `waitUntilMajority(opTime, token)`. Then, to implement cancellation for some request to wait on `opTime` with an associated token `token`, the `WaitForMajorityService` can add something like the following code to the function it uses to accept requests:
``` c++
-SemiFuture<void> WaitForMajorityService::waitUntilMajority(OpTime opTime, CancelationToken token) {
+SemiFuture<void> WaitForMajorityService::waitUntilMajority(OpTime opTime, CancellationToken token) {
// ... Create request that will be processed by a background thread
token.onCancel().thenRunOn(executor).getAsync([](Status s) {
@@ -72,25 +72,25 @@ SemiFuture<void> WaitForMajorityService::waitUntilMajority(OpTime opTime, Cancel
});
}
```
-Whenever `token` is canceled, the continuation above will run, which will stop the `WaitForMajorityService` from waiting on `opTime`, and resolve the future originally returned from the call to `waitUntilMajority(opTime, token)` with an error. There's just one more detail -- we don't want the cancelation and ordinary completion of the work to race. If `token` is canceled _after_ we've finished waiting for opTime to be majority committed, there's no work to cancel, and we can't set opTimePromise twice! To fix this, we can simply protect opTimePromise with an atomic, ensuring that it will be set exactly once. Then, before we perform either cancelation work or fulfilling the promise by ordinary means, we can use the atomic to check that the promise has not already been completed. This is the gist of what it takes to write a service performing cancelable work! To see the full details of making a cancelable `WaitForMajorityService`, see this [commit](https://github.com/mongodb/mongo/commit/4fa2fcb16107c860448b58cd66798bae140e7263).
+Whenever `token` is canceled, the continuation above will run, which will stop the `WaitForMajorityService` from waiting on `opTime`, and resolve the future originally returned from the call to `waitUntilMajority(opTime, token)` with an error. There's just one more detail -- we don't want the cancellation and ordinary completion of the work to race. If `token` is canceled _after_ we've finished waiting for opTime to be majority committed, there's no work to cancel, and we can't set opTimePromise twice! To fix this, we can simply protect opTimePromise with an atomic, ensuring that it will be set exactly once. Then, before we perform either cancellation work or fulfilling the promise by ordinary means, we can use the atomic to check that the promise has not already been completed. This is the gist of what it takes to write a service performing cancelable work! To see the full details of making a cancelable `WaitForMajorityService`, see this [commit](https://github.com/mongodb/mongo/commit/4fa2fcb16107c860448b58cd66798bae140e7263).
-### Cancelation Hierarchies
-In the above example, we saw how we can use a single `CancelationSource` and associated tokens to cancel work. This works well when we can associate a specific group of asynchronous tasks with a single `CancelationSource`. Sometimes, we may want sub-tasks of a larger operation to be individually cancelable, but also to be able to cancel _all_ tasks associated with the larger operation at once. CancelationSources can be managed hierarchically to make this sort of situation manageable. A hierarchy between CancelationSources is created by passing a `CancelationToken` associated with one `CancelationSource` to the constructor of a newly-created `CancelationSource` as follows:
+### Cancellation Hierarchies
+In the above example, we saw how we can use a single `CancellationSource` and associated tokens to cancel work. This works well when we can associate a specific group of asynchronous tasks with a single `CancellationSource`. Sometimes, we may want sub-tasks of a larger operation to be individually cancelable, but also to be able to cancel _all_ tasks associated with the larger operation at once. CancellationSources can be managed hierarchically to make this sort of situation manageable. A hierarchy between CancellationSources is created by passing a `CancellationToken` associated with one `CancellationSource` to the constructor of a newly-created `CancellationSource` as follows:
```c++
-CancelationSource parentSource;
-CancelationSource childSource(parentSource.token());
+CancellationSource parentSource;
+CancellationSource childSource(parentSource.token());
```
-As the naming suggests, we say that the `parentSource` and `childSource` `CancelationSources` are in a hierarchy, with `parentSource` higher up in the hierarchy. When a `CancelationSource` higher up in a cancelation hierarchy is canceled, all descendant `CancelationSources` are automatically canceled as well. Conversely, the `CancelationSources` lower down in a cancelation hierarchy can be canceled without affecting any other `CancelationSources` higher up or at the same level of the hierarchy.
+As the naming suggests, we say that the `parentSource` and `childSource` `CancellationSources` are in a hierarchy, with `parentSource` higher up in the hierarchy. When a `CancellationSource` higher up in a cancellation hierarchy is canceled, all descendant `CancellationSources` are automatically canceled as well. Conversely, the `CancellationSources` lower down in a cancellation hierarchy can be canceled without affecting any other `CancellationSources` higher up or at the same level of the hierarchy.
-As an example of this sort of hierarchy of cancelable operations, let's consider the case of [hedged reads](https://docs.mongodb.com/manual/core/read-preference-hedge-option/). Note that hedged reads do not currently use `CancelationTokens` in their implementation; this is for example purposes only. When a query is specified with the "hedged read" option on a sharded cluster, mongos will route the read to two replica set members for each shard targeted by the query return the first response it receives per-shard. Therefore, as soon as it receives a response from one replica set member on a shard, it can cancel the other request it made to a different member on the same shard. We can use the pattern discussed above to do this sort of cancelation. At a high level, the code might look something like this:
+As an example of this sort of hierarchy of cancelable operations, let's consider the case of [hedged reads](https://docs.mongodb.com/manual/core/read-preference-hedge-option/). Note that hedged reads do not currently use `CancellationTokens` in their implementation; this is for example purposes only. When a query is specified with the "hedged read" option on a sharded cluster, mongos will route the read to two replica set members for each shard targeted by the query return the first response it receives per-shard. Therefore, as soon as it receives a response from one replica set member on a shard, it can cancel the other request it made to a different member on the same shard. We can use the pattern discussed above to do this sort of cancellation. At a high level, the code might look something like this:
```c++
-// Assuming we already have a CancelationSource called hedgedReadSource for the entire
-// hedged-read operation, we create child CancelationSources used to manage the cancelation
+// Assuming we already have a CancellationSource called hedgedReadSource for the entire
+// hedged-read operation, we create child CancellationSources used to manage the cancellation
// state for each request to a replica set member of the shard
-CancelationSource hostOneSource(hedgedReadSource.token());
-CancelationSource hostTwoSource(hedgedReadSource.token());
+CancellationSource hostOneSource(hedgedReadSource.token());
+CancellationSource hostTwoSource(hedgedReadSource.token());
// Send the read query to two different replica set members of some shard
auto readOneFuture = routeRequestToHost(query, host1, hostOneSource.token());
@@ -116,7 +116,7 @@ auto firstResponse = whenAny(std::move(readOneFuture), std::move(readTwoFuture))
}
});
```
-We can see the utility of the hierarchy of `CancelationSources` by examining the case where the client indicates that it would like to kill the entire hedged read operation. Rather than having to track every `CancelationSource` used to manage different requests performed throughout the operation, we can call
+We can see the utility of the hierarchy of `CancellationSources` by examining the case where the client indicates that it would like to kill the entire hedged read operation. Rather than having to track every `CancellationSource` used to manage different requests performed throughout the operation, we can call
```c++
hedgedReadSource.cancel()
@@ -124,13 +124,13 @@ hedgedReadSource.cancel()
and all of the operations taking place as a part of the hedged read will be canceled!
-There's also a performance benefit to cancelation hierarchies: since the requests to each host is only a part of the work performed by the larger hedged-read operation, at least one request will complete well before the entire operation does. Since all of the cancelation callback state for work done by, say, the request to `host1`, is owned by `hostOneSource`, rather than the parent `hedgedReadSource`, it can independently be cleaned up and the relevant memory freed before the entire hedged read operation is complete. For more details, see the comment for the constructor `CancelationSource(const CancelationToken& token)` in [cancelation.h](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/cancelation.h#L216-L229).
+There's also a performance benefit to cancellation hierarchies: since the requests to each host is only a part of the work performed by the larger hedged-read operation, at least one request will complete well before the entire operation does. Since all of the cancellation callback state for work done by, say, the request to `host1`, is owned by `hostOneSource`, rather than the parent `hedgedReadSource`, it can independently be cleaned up and the relevant memory freed before the entire hedged read operation is complete. For more details, see the comment for the constructor `CancellationSource(const CancellationToken& token)` in [cancellation.h](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/cancellation.h#L216-L229).
### Integration With Future Types
-`CancelationSources` and `CancelationTokens` integrate neatly with the variety of `Future` types to make it easy to cancel work chained onto `Future` continuations.
+`CancellationSources` and `CancellationTokens` integrate neatly with the variety of `Future` types to make it easy to cancel work chained onto `Future` continuations.
#### ExecutorFutures
-Integration with `ExecutorFutures` is provided primarily by the `CancelableExecutor` type. If you have some work that you'd like to run on an Executor `exec`, but want to cancel that work if a `CancelationToken` `token` is canceled, you can simply use `CancelableExecutor::make(exec, token)` to get an executor that will run work on `exec` only if `token` has not been canceled when that work is ready to run. As an example, take the following code snippet:
+Integration with `ExecutorFutures` is provided primarily by the `CancelableExecutor` type. If you have some work that you'd like to run on an Executor `exec`, but want to cancel that work if a `CancellationToken` `token` is canceled, you can simply use `CancelableExecutor::make(exec, token)` to get an executor that will run work on `exec` only if `token` has not been canceled when that work is ready to run. As an example, take the following code snippet:
```c++
ExecutorFuture(exec).then([] { doThing1(); })
.thenRunOn(CancelableExecutor::make(exec, token))
@@ -145,22 +145,22 @@ In this example, `doThing1()` will run on the executor `exec`; when it has compl
The primary interface for waiting on futures in a cancelable manner is the free function template:
```c++
template <typename FutureT, typename Value = typename FutureT::value_type>
-SemiFuture<Value> future_util::withCancelation(FutureT&& f, const CancelationToken& token);
+SemiFuture<Value> future_util::withCancellation(FutureT&& f, const CancellationToken& token);
```
-Note that this function also works with executor futures. This function returns a SemiFuture<T> that is resolved when either the input future `f` is resolved or the input `CancelationToken token` is canceled - whichever comes first. The returned `SemiFuture` is set with the result of the input future when it resolves first, and with an `ErrorCodes::CallbackCanceled` status if cancelation occurs first.
+Note that this function also works with executor futures. This function returns a SemiFuture<T> that is resolved when either the input future `f` is resolved or the input `CancellationToken token` is canceled - whichever comes first. The returned `SemiFuture` is set with the result of the input future when it resolves first, and with an `ErrorCodes::CallbackCanceled` status if cancellation occurs first.
-For example, if we have a `Future<Request> requestFuture` and `CancelationToken token`, and we want to do some work when _either_ `requestFuture` is resolved _or_ `token` is canceled, we can simply do the following:
+For example, if we have a `Future<Request> requestFuture` and `CancellationToken token`, and we want to do some work when _either_ `requestFuture` is resolved _or_ `token` is canceled, we can simply do the following:
```c++
-future_util::withCancelation(requestFuture, token)
+future_util::withCancellation(requestFuture, token)
.then([](Request r) { /* requestFuture was fulfilled; handle it */ })
- .onError<ErrorCodes::CallbackCanceled>([](Status s) { /* handle cancelation */ })
+ .onError<ErrorCodes::CallbackCanceled>([](Status s) { /* handle cancellation */ })
.onError([](Status s) {/* handle other errors */})
```
### Links to Relevant Code + Example Tests
-- [CancelationSource/CancelationToken implementations](https://github.com/mongodb/mongo/blob/master/src/mongo/util/cancelation.h)
-- [CancelationSource/CancelationToken unit tests](https://github.com/mongodb/mongo/blob/master/src/mongo/util/cancelation_test.cpp)
+- [CancellationSource/CancellationToken implementations](https://github.com/mongodb/mongo/blob/master/src/mongo/util/cancellation.h)
+- [CancellationSource/CancellationToken unit tests](https://github.com/mongodb/mongo/blob/master/src/mongo/util/cancellation_test.cpp)
- [CancelableExecutor implementation](https://github.com/mongodb/mongo/blob/master/src/mongo/executor/cancelable_executor.h)
- [CancelableExecutor unit tests](https://github.com/mongodb/mongo/blob/master/src/mongo/executor/cancelable_executor_test.cpp)
-- [future_util::withCancelation implementation](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/future_util.h#L658)
-- [future_util::withCancelation unit tests](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/future_util_test.cpp#L1268-L1343)
+- [future_util::withCancellation implementation](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/future_util.h#L658)
+- [future_util::withCancellation unit tests](https://github.com/mongodb/mongo/blob/99d28dd184ada37720d0dae1f3d8c35fec85bd4b/src/mongo/util/future_util_test.cpp#L1268-L1343)
diff --git a/src/mongo/util/SConscript b/src/mongo/util/SConscript
index f3aa885ab3c..681207594db 100644
--- a/src/mongo/util/SConscript
+++ b/src/mongo/util/SConscript
@@ -613,7 +613,7 @@ icuEnv.CppUnitTest(
'background_job_test.cpp',
'background_thread_clock_source_test.cpp',
'base64_test.cpp',
- 'cancelation_test.cpp',
+ 'cancellation_test.cpp',
'clock_source_mock_test.cpp',
'concepts_test.cpp',
'container_size_helper_test.cpp',
@@ -776,8 +776,8 @@ env.Benchmark(
)
env.Benchmark(
- target='cancelation_bm',
- source='cancelation_bm.cpp',
+ target='cancellation_bm',
+ source='cancellation_bm.cpp',
)
env.Benchmark(
diff --git a/src/mongo/util/cancelation.h b/src/mongo/util/cancellation.h
index a527b11ef1e..a90837b91d0 100644
--- a/src/mongo/util/cancelation.h
+++ b/src/mongo/util/cancellation.h
@@ -38,52 +38,52 @@ namespace detail {
inline Status getCancelNeverCalledOnSourceError() {
static const StaticImmortal<Status> cancelNeverCalledOnSourceError{
ErrorCodes::CallbackCanceled,
- "Cancel was never called on the CancelationSource for this token."};
+ "Cancel was never called on the CancellationSource for this token."};
return *cancelNeverCalledOnSourceError;
}
/**
- * Holds the main state shared between CancelationSource/CancelationToken.
+ * Holds the main state shared between CancellationSource/CancellationToken.
*
- * CancelationState objects are held by intrusive_ptr, and the ownership of a CancelationState
- * object is shared between all CancelationSource objects and CancelationToken objects which point
+ * CancellationState objects are held by intrusive_ptr, and the ownership of a CancellationState
+ * object is shared between all CancellationSource objects and CancellationToken objects which point
* to it.
*
- * When the last CancelationSource that points to a CancelationState object is destroyed,
- * CancelationState::dismiss() is called, which sets an error on its cancelation promise if
- * CancelationState::cancel() has not already been called. This serves to clean up the memory for
+ * When the last CancellationSource that points to a CancellationState object is destroyed,
+ * CancellationState::dismiss() is called, which sets an error on its cancellation promise if
+ * CancellationState::cancel() has not already been called. This serves to clean up the memory for
* all callbacks associated with that promise once it is no longer possible for cancel() to be
* called on the source.
*/
-class CancelationState : public RefCountable {
+class CancellationState : public RefCountable {
enum class State : int { kInit, kCanceled, kDismissed };
public:
- CancelationState() = default;
+ CancellationState() = default;
- ~CancelationState() {
+ ~CancellationState() {
auto state = _state.load();
invariant(state == State::kCanceled || state == State::kDismissed);
- invariant(_cancelationPromise.getFuture().isReady());
+ invariant(_cancellationPromise.getFuture().isReady());
}
- CancelationState(const CancelationState& other) = delete;
- CancelationState& operator=(const CancelationState& other) = delete;
+ CancellationState(const CancellationState& other) = delete;
+ CancellationState& operator=(const CancellationState& other) = delete;
- CancelationState(CancelationState&& other) = delete;
- CancelationState& operator=(CancelationState&& other) = delete;
+ CancellationState(CancellationState&& other) = delete;
+ CancellationState& operator=(CancellationState&& other) = delete;
void dismiss() {
State precondition{State::kInit};
if (_state.compareAndSwap(&precondition, State::kDismissed)) {
- _cancelationPromise.setError(getCancelNeverCalledOnSourceError());
+ _cancellationPromise.setError(getCancelNeverCalledOnSourceError());
}
}
void cancel() {
State precondition{State::kInit};
if (_state.compareAndSwap(&precondition, State::kCanceled)) {
- _cancelationPromise.emplaceValue();
+ _cancellationPromise.emplaceValue();
}
}
@@ -92,7 +92,7 @@ public:
}
SharedSemiFuture<void> onCancel() const {
- return _cancelationPromise.getFuture();
+ return _cancellationPromise.getFuture();
}
/**
@@ -112,67 +112,67 @@ private:
* A promise that will be signaled with success when cancel() is called and with an error when
* dismiss() is called.
*/
- SharedPromise<void> _cancelationPromise;
+ SharedPromise<void> _cancellationPromise;
};
/**
- * Wrapper around an intrusive_ptr<CancelationState> which, when destroyed, dismisses the
- * CancelationState. These used to track how many CancelationSource objects point to the same
- * CancelationState and call dismiss() on the CancelationState when the last CancelationSource
+ * Wrapper around an intrusive_ptr<CancellationState> which, when destroyed, dismisses the
+ * CancellationState. These used to track how many CancellationSource objects point to the same
+ * CancellationState and call dismiss() on the CancellationState when the last CancellationSource
* pointing to it is destroyed.
*/
-class CancelationStateHolder : public RefCountable {
+class CancellationStateHolder : public RefCountable {
public:
- CancelationStateHolder() = default;
+ CancellationStateHolder() = default;
- ~CancelationStateHolder() {
+ ~CancellationStateHolder() {
_state->dismiss();
}
- CancelationStateHolder(const CancelationStateHolder&) = delete;
- CancelationStateHolder& operator=(const CancelationStateHolder&) = delete;
+ CancellationStateHolder(const CancellationStateHolder&) = delete;
+ CancellationStateHolder& operator=(const CancellationStateHolder&) = delete;
- CancelationStateHolder(CancelationStateHolder&&) = delete;
- CancelationStateHolder& operator=(CancelationStateHolder&&) = delete;
+ CancellationStateHolder(CancellationStateHolder&&) = delete;
+ CancellationStateHolder& operator=(CancellationStateHolder&&) = delete;
- boost::intrusive_ptr<CancelationState> get() const {
+ boost::intrusive_ptr<CancellationState> get() const {
return _state;
}
private:
- boost::intrusive_ptr<CancelationState> _state{make_intrusive<CancelationState>()};
+ boost::intrusive_ptr<CancellationState> _state{make_intrusive<CancellationState>()};
};
} // namespace detail
/**
- * Type used to check for cancelation of a task. Tokens are normally obtained through an associated
- * CancelationSource by calling CancelationSource::token(), but an uncancelable token can also be
- * constructed by using the CancelationToken::uncancelable() static factory function.
+ * Type used to check for cancellation of a task. Tokens are normally obtained through an associated
+ * CancellationSource by calling CancellationSource::token(), but an uncancelable token can also be
+ * constructed by using the CancellationToken::uncancelable() static factory function.
*/
-class CancelationToken {
+class CancellationToken {
public:
// Constructs an uncancelable token, i.e. a token without an associated source.
- static CancelationToken uncancelable() {
- auto state = make_intrusive<detail::CancelationState>();
+ static CancellationToken uncancelable() {
+ auto state = make_intrusive<detail::CancellationState>();
// Make the state uncancelable.
state->dismiss();
- return CancelationToken(std::move(state));
+ return CancellationToken(std::move(state));
}
- explicit CancelationToken(boost::intrusive_ptr<const detail::CancelationState> state)
+ explicit CancellationToken(boost::intrusive_ptr<const detail::CancellationState> state)
: _state(std::move(state)) {}
- ~CancelationToken() = default;
+ ~CancellationToken() = default;
- CancelationToken(const CancelationToken& other) = default;
- CancelationToken& operator=(const CancelationToken& other) = default;
+ CancellationToken(const CancellationToken& other) = default;
+ CancellationToken& operator=(const CancellationToken& other) = default;
- CancelationToken(CancelationToken&& other) = default;
- CancelationToken& operator=(CancelationToken&& other) = default;
+ CancellationToken(CancellationToken&& other) = default;
+ CancellationToken& operator=(CancellationToken&& other) = default;
/**
- * Returns whether or not cancel() has been called on the CancelationSource object from which
+ * Returns whether or not cancel() has been called on the CancellationSource object from which
* this token was constructed.
*/
bool isCanceled() const {
@@ -181,9 +181,9 @@ public:
/**
* Returns a future that will be resolved with success when cancel() has been called on the
- * CancelationSource object from which this token was constructed, or with an error containing
- * CallbackCanceled if that CancelationSource object is destroyed without having cancel() called
- * on it.
+ * CancellationSource object from which this token was constructed, or with an error containing
+ * CallbackCanceled if that CancellationSource object is destroyed without having cancel()
+ * called on it.
*/
SemiFuture<void> onCancel() const {
return _state->onCancel().semi();
@@ -199,38 +199,38 @@ public:
private:
/**
- * Points to the object containing the status of cancelation.
+ * Points to the object containing the status of cancellation.
*/
- boost::intrusive_ptr<const detail::CancelationState> _state;
+ boost::intrusive_ptr<const detail::CancellationState> _state;
};
/**
- * Type used to manage the cancelation of a task. CancelationSource is used to cancel a task, and
- * CancelationTokens obtained via CancelationSource::token() are used to check for and handle
- * cancelation.
+ * Type used to manage the cancellation of a task. CancellationSource is used to cancel a task, and
+ * CancellationTokens obtained via CancellationSource::token() are used to check for and handle
+ * cancellation.
*/
-class CancelationSource {
+class CancellationSource {
public:
- CancelationSource() = default;
+ CancellationSource() = default;
/**
- * Creates a CancelationSource that will be canceled when the input token is canceled. This
- * allows the construction of cancelation hierarchies.
+ * Creates a CancellationSource that will be canceled when the input token is canceled. This
+ * allows the construction of cancellation hierarchies.
*
* For example, if we have:
*
- * CancelationSource first;
- * CancelationSource second(first.token());
- * CancelationSource third(second.token());
+ * CancellationSource first;
+ * CancellationSource second(first.token());
+ * CancellationSource third(second.token());
*
* Calling third.cancel() will only cancel tokens obtained from third.
* Calling second.cancel() will cancel tokens obtained from second, and call third.cancel().
* Calling first.cancel() will thus cancel the whole hierarchy.
*/
- explicit CancelationSource(const CancelationToken& token) {
+ explicit CancellationSource(const CancellationToken& token) {
// Cancel the source when the input token is canceled.
//
- // Note that because this captures the CancelationState object directly, and not the
- // CancelationStateHolder, this will still allow callback state attached to this
+ // Note that because this captures the CancellationState object directly, and not the
+ // CancellationStateHolder, this will still allow callback state attached to this
// source's tokens to be cleaned up as soon as the last source is destroyed, even if the
// parent token still exists.. This means that long-lived tokens can have many sub-sources
// for tasks which start and complete without worrying about too much memory build-up.
@@ -242,15 +242,15 @@ public:
/**
* Destroys shared state associated with any tokens obtained from this source, and does not run
- * cancelation callbacks.
+ * cancellation callbacks.
*/
- ~CancelationSource() = default;
+ ~CancellationSource() = default;
- CancelationSource(const CancelationSource& other) = default;
- CancelationSource& operator=(const CancelationSource& other) = default;
+ CancellationSource(const CancellationSource& other) = default;
+ CancellationSource& operator=(const CancellationSource& other) = default;
- CancelationSource(CancelationSource&& other) = default;
- CancelationSource& operator=(CancelationSource&& other) = default;
+ CancellationSource(CancellationSource&& other) = default;
+ CancellationSource& operator=(CancellationSource&& other) = default;
/**
* Cancel the token. If no call to cancel has previously been made, this will cause all
@@ -261,16 +261,16 @@ public:
}
/**
- * Returns a CancelationToken which will be canceled when this source is
+ * Returns a CancellationToken which will be canceled when this source is
* canceled.
*/
- CancelationToken token() const {
- return CancelationToken{_stateHolder->get()};
+ CancellationToken token() const {
+ return CancellationToken{_stateHolder->get()};
}
private:
- boost::intrusive_ptr<detail::CancelationStateHolder> _stateHolder{
- make_intrusive<detail::CancelationStateHolder>()};
+ boost::intrusive_ptr<detail::CancellationStateHolder> _stateHolder{
+ make_intrusive<detail::CancellationStateHolder>()};
};
} // namespace mongo
diff --git a/src/mongo/util/cancelation_bm.cpp b/src/mongo/util/cancellation_bm.cpp
index eced66ec900..ce5ff68741e 100644
--- a/src/mongo/util/cancelation_bm.cpp
+++ b/src/mongo/util/cancellation_bm.cpp
@@ -32,13 +32,13 @@
#include <benchmark/benchmark.h>
#include <forward_list>
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
namespace mongo {
void BM_create_single_token_from_source(benchmark::State& state) {
- CancelationSource source;
+ CancellationSource source;
for (auto _ : state) {
benchmark::DoNotOptimize(source.token());
}
@@ -46,14 +46,14 @@ void BM_create_single_token_from_source(benchmark::State& state) {
void BM_uncancelable_token_ctor(benchmark::State& state) {
for (auto _ : state) {
- benchmark::DoNotOptimize(CancelationToken::uncancelable());
+ benchmark::DoNotOptimize(CancellationToken::uncancelable());
}
}
void BM_cancel_tokens_from_single_source(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming(); // Do not time the construction and set-up of the source + tokens.
- CancelationSource source;
+ CancellationSource source;
for (int i = 0; i < state.range(0); ++i) {
source.token().onCancel().unsafeToInlineFuture().getAsync([](auto) {});
}
@@ -63,44 +63,44 @@ void BM_cancel_tokens_from_single_source(benchmark::State& state) {
}
void BM_check_if_token_from_source_canceled(benchmark::State& state) {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
for (auto _ : state) {
benchmark::DoNotOptimize(token.isCanceled());
}
}
-void BM_cancelation_source_from_token_ctor(benchmark::State& state) {
- CancelationSource source;
+void BM_cancellation_source_from_token_ctor(benchmark::State& state) {
+ CancellationSource source;
for (auto _ : state) {
- CancelationSource child(source.token());
+ CancellationSource child(source.token());
benchmark::DoNotOptimize(child);
}
}
-void BM_cancelation_source_default_ctor(benchmark::State& state) {
+void BM_cancellation_source_default_ctor(benchmark::State& state) {
for (auto _ : state) {
- CancelationSource source;
+ CancellationSource source;
benchmark::DoNotOptimize(source);
}
}
/**
- * Constructs a cancelation 'hierarchy' of depth state.range(0), with one cancelation source at
+ * Constructs a cancellation 'hierarchy' of depth state.range(0), with one cancellation source at
* each level and one token obtained from each source. When root.cancel() is called, the whole
* hierarchy (all sources in the hierarchy, and any tokens obtained from any source in the
* hierarchy) will be canceled.
*/
-void BM_ranged_depth_cancelation_hierarchy(benchmark::State& state) {
+void BM_ranged_depth_cancellation_hierarchy(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming();
- CancelationSource root;
- CancelationSource parent = root;
- // We use list to keep every cancelation source in the hierarchy in scope.
- std::forward_list<CancelationSource> list;
+ CancellationSource root;
+ CancellationSource parent = root;
+ // We use list to keep every cancellation source in the hierarchy in scope.
+ std::forward_list<CancellationSource> list;
for (int i = 0; i < state.range(0); ++i) {
list.push_front(parent);
- CancelationSource child(parent.token());
+ CancellationSource child(parent.token());
child.token().onCancel().unsafeToInlineFuture().getAsync([](auto) {});
parent = child;
}
@@ -113,8 +113,8 @@ BENCHMARK(BM_create_single_token_from_source);
BENCHMARK(BM_uncancelable_token_ctor);
BENCHMARK(BM_cancel_tokens_from_single_source)->RangeMultiplier(10)->Range(1, 100 * 100 * 100);
BENCHMARK(BM_check_if_token_from_source_canceled);
-BENCHMARK(BM_cancelation_source_from_token_ctor);
-BENCHMARK(BM_cancelation_source_default_ctor);
-BENCHMARK(BM_ranged_depth_cancelation_hierarchy)->RangeMultiplier(10)->Range(1, 1000);
+BENCHMARK(BM_cancellation_source_from_token_ctor);
+BENCHMARK(BM_cancellation_source_default_ctor);
+BENCHMARK(BM_ranged_depth_cancellation_hierarchy)->RangeMultiplier(10)->Range(1, 1000);
} // namespace mongo
diff --git a/src/mongo/util/cancelation_test.cpp b/src/mongo/util/cancellation_test.cpp
index 7536ed5b320..605418a390c 100644
--- a/src/mongo/util/cancelation_test.cpp
+++ b/src/mongo/util/cancellation_test.cpp
@@ -31,15 +31,15 @@
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
namespace mongo {
namespace {
-TEST(CancelTest, CancelSourceDestructorDoesNotCauseCancelation) {
+TEST(CancelTest, CancelSourceDestructorDoesNotCauseCancellation) {
bool ran{false};
{
- CancelationSource source;
+ CancellationSource source;
source.token().onCancel().unsafeToInlineFuture().getAsync([&ran](Status s) mutable {
ASSERT_EQ(s, detail::getCancelNeverCalledOnSourceError());
ran = true;
@@ -49,39 +49,39 @@ TEST(CancelTest, CancelSourceDestructorDoesNotCauseCancelation) {
ASSERT_TRUE(ran);
}
-TEST(CancelTest, CancelTokenIsCanceledIsFalseBeforeCancelation) {
- CancelationSource source;
+TEST(CancelTest, CancelTokenIsCanceledIsFalseBeforeCancellation) {
+ CancellationSource source;
ASSERT_FALSE(source.token().isCanceled());
}
-TEST(CancelTest, CancelTokenOnCancelFutureIsNotReadyBeforeCancelation) {
- CancelationSource source;
+TEST(CancelTest, CancelTokenOnCancelFutureIsNotReadyBeforeCancellation) {
+ CancellationSource source;
ASSERT_FALSE(source.token().onCancel().isReady());
}
TEST(CancelTest, CancelingSourceMakesExistingCancelTokenIsCanceledTrue) {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
source.cancel();
ASSERT_TRUE(token.isCanceled());
}
-TEST(CancelTest, CancelingSourceMakesCancelTokenIsCanceledTrueForTokensCreatedAfterCancelation) {
- CancelationSource source;
+TEST(CancelTest, CancelingSourceMakesCancelTokenIsCanceledTrueForTokensCreatedAfterCancellation) {
+ CancellationSource source;
source.cancel();
ASSERT_TRUE(source.token().isCanceled());
}
-TEST(CancelTest, CallingCancelOnCancelationSourceTwiceIsSafe) {
- CancelationSource source;
+TEST(CancelTest, CallingCancelOnCancellationSourceTwiceIsSafe) {
+ CancellationSource source;
source.cancel();
source.cancel();
ASSERT_TRUE(source.token().isCanceled());
}
TEST(CancelTest,
- CallingCancelOnACopyOfACancelationSourceTriggersCancelationInTokensObtainedFromOriginal) {
- CancelationSource source;
+ CallingCancelOnACopyOfACancellationSourceTriggersCancellationInTokensObtainedFromOriginal) {
+ CancellationSource source;
auto token = source.token();
auto copy = source;
copy.cancel();
@@ -89,15 +89,16 @@ TEST(CancelTest,
}
TEST(CancelTest,
- DestroyingACopyOfACancelationSourceDoesNotSetErrorOnCancelationFutureFromOriginalSource) {
- CancelationSource source;
+ DestroyingACopyOfACancellationSourceDoesNotSetErrorOnCancellationFutureFromOriginalSource) {
+ CancellationSource source;
auto token = source.token();
{ auto copy = source; }
ASSERT_FALSE(token.onCancel().isReady());
}
-TEST(CancelTest, DestroyingACancelationSourceWithAnExistingCopyDoesNotSetErrorOnCancelationFuture) {
- boost::optional<CancelationSource> source;
+TEST(CancelTest,
+ DestroyingACancellationSourceWithAnExistingCopyDoesNotSetErrorOnCancellationFuture) {
+ boost::optional<CancellationSource> source;
source.emplace();
auto copy = *source;
source.reset();
@@ -105,7 +106,7 @@ TEST(CancelTest, DestroyingACancelationSourceWithAnExistingCopyDoesNotSetErrorOn
}
TEST(CancelTest, CancelingSourceTriggersOnCancelCallbacksOnSingleCancelToken) {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
bool cancelCallbackRan{false};
auto result = token.onCancel().unsafeToInlineFuture().then([&] { cancelCallbackRan = true; });
@@ -115,7 +116,7 @@ TEST(CancelTest, CancelingSourceTriggersOnCancelCallbacksOnSingleCancelToken) {
}
TEST(CancelTest, CancelingSourceTriggersOnCancelCallbacksOnMultipleCancelTokens) {
- CancelationSource source;
+ CancellationSource source;
auto token1 = source.token();
auto token2 = source.token();
bool cancelCallback1Ran{false};
@@ -131,8 +132,8 @@ TEST(CancelTest, CancelingSourceTriggersOnCancelCallbacksOnMultipleCancelTokens)
ASSERT_TRUE(cancelCallback2Ran);
}
-TEST(CancelTest, CancelTokenOnCancelFutureIsReadyAndRunsCallbacksOnTokensCreatedAfterCancelation) {
- CancelationSource source;
+TEST(CancelTest, CancelTokenOnCancelFutureIsReadyAndRunsCallbacksOnTokensCreatedAfterCancellation) {
+ CancellationSource source;
source.cancel();
bool cancelCallbackRan{false};
auto token = source.token();
@@ -142,10 +143,10 @@ TEST(CancelTest, CancelTokenOnCancelFutureIsReadyAndRunsCallbacksOnTokensCreated
}
TEST(CancelTest, CancelingSourceConstructedFromATokenDoesNotCancelThatToken) {
- CancelationSource parent;
+ CancellationSource parent;
auto parentToken = parent.token();
- CancelationSource child(parentToken);
+ CancellationSource child(parentToken);
child.cancel();
ASSERT_FALSE(parentToken.isCanceled());
@@ -153,8 +154,8 @@ TEST(CancelTest, CancelingSourceConstructedFromATokenDoesNotCancelThatToken) {
}
TEST(CancelTest, CancelingTokenUsedToConstructASourceCallsCancelOnThatSource) {
- CancelationSource parent;
- CancelationSource child(parent.token());
+ CancellationSource parent;
+ CancellationSource child(parent.token());
parent.cancel();
ASSERT_TRUE(parent.token().isCanceled());
@@ -163,7 +164,7 @@ TEST(CancelTest, CancelingTokenUsedToConstructASourceCallsCancelOnThatSource) {
TEST(CancelTest, CancelTokenRemembersIsCanceledForCanceledSourceEvenAfterSourceIsDestroyed) {
auto token = [] {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
source.cancel();
return token;
@@ -175,7 +176,7 @@ TEST(CancelTest, CancelTokenRemembersIsCanceledForCanceledSourceEvenAfterSourceI
TEST(CancelTest, CancelTokenRemembersNotCanceledForNotCanceledSourceEvenAfterSourceIsDestroyed) {
auto token = [] {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
return token;
}();
@@ -185,31 +186,31 @@ TEST(CancelTest, CancelTokenRemembersNotCanceledForNotCanceledSourceEvenAfterSou
}
TEST(CancelTest, UncancelableTokenReturnsFalseForIsCanceled) {
- auto token = CancelationToken::uncancelable();
+ auto token = CancellationToken::uncancelable();
ASSERT_FALSE(token.isCanceled());
}
TEST(CancelTest, UncancelableTokenNeverRunsCallbacks) {
- auto token = CancelationToken::uncancelable();
+ auto token = CancellationToken::uncancelable();
auto cancelFuture = token.onCancel();
ASSERT_TRUE(cancelFuture.isReady());
ASSERT_EQ(cancelFuture.getNoThrow(), detail::getCancelNeverCalledOnSourceError());
}
TEST(CancelTest, UncancelableTokenReturnsFalseForIsCancelable) {
- auto token = CancelationToken::uncancelable();
+ auto token = CancellationToken::uncancelable();
ASSERT_FALSE(token.isCancelable());
}
TEST(CancelTest, TokenIsCancelableReturnsTrueIfSourceIsAlreadyCanceled) {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
ASSERT_TRUE(token.isCancelable());
}
TEST(CancelTest, TokenIsCancelableReturnsFalseIfSourceHasBeenDestroyedWithoutCancelBeingCalled) {
auto token = [] {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
return token;
}();
@@ -217,7 +218,7 @@ TEST(CancelTest, TokenIsCancelableReturnsFalseIfSourceHasBeenDestroyedWithoutCan
}
TEST(CancelTest, TokenIsCancelableReturnsTrueIfSourceExistsAndIsNotYetCanceled) {
- CancelationSource source;
+ CancellationSource source;
auto token = source.token();
ASSERT_TRUE(token.isCancelable());
}
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index 24c6f98a179..1cdb02520b1 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -40,7 +40,7 @@
#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/cancelation.h"
+#include "mongo/util/cancellation.h"
#include "mongo/util/duration.h"
#include "mongo/util/interruptible.h"
#include "mongo/util/string_map.h"
@@ -221,7 +221,7 @@ private:
/** See `FailPoint::pauseWhileSetAndNotCanceled`. */
void pauseWhileSetAndNotCanceled(Interruptible* interruptible,
- const CancelationToken& token) {
+ const CancellationToken& token) {
auto alreadyCounted = AlreadyCounted{false};
while (MONGO_unlikely(_shouldFail(alreadyCounted, nullptr))) {
uassert(
@@ -535,7 +535,7 @@ public:
* This method will throw if the token is canceled, to match the behavior when the
* Interruptible* is interrupted.
*/
- void pauseWhileSetAndNotCanceled(Interruptible* interruptible, const CancelationToken& token) {
+ void pauseWhileSetAndNotCanceled(Interruptible* interruptible, const CancellationToken& token) {
_impl()->pauseWhileSetAndNotCanceled(interruptible, token);
}
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 4dafd26c6e8..cb882853548 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -487,8 +487,8 @@ TEST(FailPoint, PauseWhileSetCancelability) {
FailPoint failPoint("testFP");
failPoint.setMode(FailPoint::alwaysOn);
- CancelationSource cs;
- CancelationToken ct = cs.token();
+ CancellationSource cs;
+ CancellationToken ct = cs.token();
cs.cancel();
ASSERT_THROWS_CODE(failPoint.pauseWhileSetAndNotCanceled(Interruptible::notInterruptible(), ct),
diff --git a/src/mongo/util/future_util.h b/src/mongo/util/future_util.h
index 54ad21ff9b9..0d5e45870ec 100644
--- a/src/mongo/util/future_util.h
+++ b/src/mongo/util/future_util.h
@@ -96,7 +96,7 @@ public:
* iteration of the loop body threw an exception or otherwise returned an error status, the
* returned ExecutorFuture will contain that error.
*/
- auto on(std::shared_ptr<executor::TaskExecutor> executor, CancelationToken cancelToken)&& {
+ auto on(std::shared_ptr<executor::TaskExecutor> executor, CancellationToken cancelToken)&& {
auto loop = std::make_shared<TryUntilLoopWithDelay>(std::move(executor),
std::move(_body),
std::move(_condition),
@@ -116,7 +116,7 @@ private:
BodyCallable executeLoopBody,
ConditionCallable shouldStopIteration,
Delay delay,
- CancelationToken cancelToken)
+ CancellationToken cancelToken)
: executor(std::move(executor)),
executeLoopBody(std::move(executeLoopBody)),
shouldStopIteration(std::move(shouldStopIteration)),
@@ -187,7 +187,7 @@ private:
BodyCallable executeLoopBody;
ConditionCallable shouldStopIteration;
Delay delay;
- CancelationToken cancelToken;
+ CancellationToken cancelToken;
};
BodyCallable _body;
@@ -237,7 +237,7 @@ public:
* iteration of the loop body threw an exception or otherwise returned an error status, the
* returned ExecutorFuture will contain that error.
*/
- auto on(ExecutorPtr executor, CancelationToken cancelToken)&& {
+ auto on(ExecutorPtr executor, CancellationToken cancelToken)&& {
auto loop = std::make_shared<TryUntilLoop>(
std::move(executor), std::move(_body), std::move(_condition), std::move(cancelToken));
// Launch the recursive chain using the helper class.
@@ -279,7 +279,7 @@ private:
TryUntilLoop(ExecutorPtr executor,
BodyCallable executeLoopBody,
ConditionCallable shouldStopIteration,
- CancelationToken cancelToken)
+ CancellationToken cancelToken)
: executor(std::move(executor)),
executeLoopBody(std::move(executeLoopBody)),
shouldStopIteration(std::move(shouldStopIteration)),
@@ -338,7 +338,7 @@ private:
ExecutorPtr executor;
BodyCallable executeLoopBody;
ConditionCallable shouldStopIteration;
- CancelationToken cancelToken;
+ CancellationToken cancelToken;
};
BodyCallable _body;
@@ -648,14 +648,14 @@ SemiFuture<Result> whenAny(FuturePack&&... futures) {
namespace future_util {
/**
- * Takes an input Future, ExecutorFuture, SemiFuture, or SharedSemiFuture and a CancelationToken,
+ * Takes an input Future, ExecutorFuture, SemiFuture, or SharedSemiFuture and a CancellationToken,
* and returns a new SemiFuture that will be resolved when either the input future is resolved or
- * when the input CancelationToken is canceled. If the token is canceled before the input future is
+ * when the input CancellationToken is canceled. If the token is canceled before the input future is
* resolved, the resulting SemiFuture will be resolved with a CallbackCanceled error. Otherwise, the
* resulting SemiFuture will be resolved with the same result as the input future.
*/
template <typename FutureT, typename Value = typename FutureT::value_type>
-SemiFuture<Value> withCancelation(FutureT&& inputFuture, const CancelationToken& token) {
+SemiFuture<Value> withCancellation(FutureT&& inputFuture, const CancellationToken& token) {
/**
* A structure used to share state between the continuation we attach to the input future and
* the continuation we attach to the token's onCancel() future.
@@ -683,12 +683,12 @@ SemiFuture<Value> withCancelation(FutureT&& inputFuture, const CancelationToken&
token.onCancel().unsafeToInlineFuture().getAsync([sharedBlock](Status s) {
if (s.isOK()) {
- // If the cancelation token is canceled first, change done to true and set the value on
+ // If the cancellation token is canceled first, change done to true and set the value on
// the promise.
if (!sharedBlock->done.swap(true)) {
sharedBlock->resultPromise.setError(
{ErrorCodes::CallbackCanceled,
- "CancelationToken canceled while waiting for input future"});
+ "CancellationToken canceled while waiting for input future"});
}
}
});
diff --git a/src/mongo/util/future_util_test.cpp b/src/mongo/util/future_util_test.cpp
index 93c507d0218..d19d8044e4e 100644
--- a/src/mongo/util/future_util_test.cpp
+++ b/src/mongo/util/future_util_test.cpp
@@ -90,7 +90,7 @@ TEST_F(AsyncTryUntilTest, LoopExecutesOnceWithAlwaysTrueCondition) {
auto i = 0;
auto resultFut = AsyncTry([&] { ++i; })
.until([](Status s) { return true; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
resultFut.wait();
ASSERT_EQ(i, 1);
@@ -102,7 +102,7 @@ TEST_F(AsyncTryUntilTest, LoopDoesNotExecuteIfExecutorAlreadyShutdown) {
auto i = 0;
auto resultFut = AsyncTry([&] { ++i; })
.until([](Status s) { return true; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_THROWS_CODE(resultFut.get(), DBException, ErrorCodes::ShutdownInProgress);
@@ -116,7 +116,7 @@ TEST_F(AsyncTryUntilTest, LoopWithDelayDoesNotExecuteIfExecutorAlreadyShutdown)
auto resultFut = AsyncTry([&] { ++i; })
.until([](Status s) { return true; })
.withDelayBetweenIterations(Milliseconds(0))
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_THROWS_CODE(resultFut.get(), DBException, ErrorCodes::ShutdownInProgress);
@@ -131,7 +131,7 @@ TEST_F(AsyncTryUntilTest, LoopExecutesUntilConditionIsTrue) {
return i;
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
resultFut.wait();
ASSERT_EQ(i, numLoops);
@@ -145,7 +145,7 @@ TEST_F(AsyncTryUntilTest, LoopExecutesUntilConditionIsTrueWithFutureReturnType)
return Future<int>::makeReady(i);
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
resultFut.wait();
ASSERT_EQ(i, numLoops);
@@ -159,7 +159,7 @@ TEST_F(AsyncTryUntilTest, LoopExecutesUntilConditionIsTrueWithSemiFutureReturnTy
return SemiFuture<int>::makeReady(i);
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
resultFut.wait();
ASSERT_EQ(i, numLoops);
@@ -173,7 +173,7 @@ TEST_F(AsyncTryUntilTest, LoopExecutesUntilConditionIsTrueWithExecutorFutureRetu
return ExecutorFuture<int>(executor(), i);
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
resultFut.wait();
ASSERT_EQ(i, numLoops);
@@ -184,7 +184,7 @@ TEST_F(AsyncTryUntilTest, LoopDoesNotRespectConstDelayIfConditionIsAlreadyTrue)
auto resultFut = AsyncTry([&] { ++i; })
.until([](Status s) { return true; })
.withDelayBetweenIterations(Seconds(10000000))
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
// This would hang for a very long time if the behavior were incorrect.
resultFut.wait();
@@ -196,7 +196,7 @@ TEST_F(AsyncTryUntilTest, LoopDoesNotRespectBackoffDelayIfConditionIsAlreadyTrue
auto resultFut = AsyncTry([&] { ++i; })
.until([](Status s) { return true; })
.withBackoffBetweenIterations(TestBackoff{Seconds(10000000)})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
// This would hang for a very long time if the behavior were incorrect.
resultFut.wait();
@@ -212,7 +212,7 @@ TEST_F(AsyncTryUntilTest, LoopRespectsConstDelayAfterEvaluatingCondition) {
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
.withDelayBetweenIterations(Seconds(1000))
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_FALSE(resultFut.isReady());
// Advance the time some, but not enough to be past the delay yet.
@@ -243,7 +243,7 @@ TEST_F(AsyncTryUntilTest, LoopRespectsBackoffDelayAfterEvaluatingCondition) {
})
.until([&](StatusWith<int> swInt) { return swInt.getValue() == numLoops; })
.withBackoffBetweenIterations(TestBackoff{Seconds(1000)})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_FALSE(resultFut.isReady());
// Due to the backoff, the delays are going to be 1000 seconds and 2000 seconds.
@@ -292,7 +292,7 @@ TEST_F(AsyncTryUntilTest, LoopBodyPropagatesValueOfLastIterationToCaller) {
return i;
})
.until([&](StatusWith<int> swInt) { return i == expectedResult; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.get(), expectedResult);
}
@@ -305,7 +305,7 @@ TEST_F(AsyncTryUntilTest, FutureReturningLoopBodyPropagatesValueOfLastIterationT
return Future<int>::makeReady(i);
})
.until([&](StatusWith<int> swInt) { return i == expectedResult; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.get(), expectedResult);
}
@@ -318,7 +318,7 @@ TEST_F(AsyncTryUntilTest, SemiFutureReturningLoopBodyPropagatesValueOfLastIterat
return SemiFuture<int>::makeReady(i);
})
.until([&](StatusWith<int> swInt) { return i == expectedResult; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.get(), expectedResult);
}
@@ -331,7 +331,7 @@ TEST_F(AsyncTryUntilTest, ExecutorFutureReturningLoopBodyPropagatesValueOfLastIt
return ExecutorFuture<int>(executor(), i);
})
.until([&](StatusWith<int> swInt) { return i == expectedResult; })
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.get(), expectedResult);
}
@@ -349,7 +349,7 @@ TEST_F(AsyncTryUntilTest, LoopBodyPropagatesErrorToConditionAndCaller) {
});
return true;
})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.getNoThrow(), ErrorCodes::InternalError);
});
@@ -368,7 +368,7 @@ TEST_F(AsyncTryUntilTest, FutureReturningLoopBodyPropagatesErrorToConditionAndCa
});
return true;
})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.getNoThrow(), ErrorCodes::InternalError);
});
@@ -387,7 +387,7 @@ TEST_F(AsyncTryUntilTest, SemiFutureReturningLoopBodyPropagatesErrorToConditionA
});
return true;
})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.getNoThrow(), ErrorCodes::InternalError);
});
@@ -406,7 +406,7 @@ TEST_F(AsyncTryUntilTest, ExecutorFutureReturningLoopBodyPropagatesErrorToCondit
});
return true;
})
- .on(executor(), CancelationToken::uncancelable());
+ .on(executor(), CancellationToken::uncancelable());
ASSERT_EQ(resultFut.getNoThrow(), ErrorCodes::InternalError);
});
@@ -415,7 +415,7 @@ TEST_F(AsyncTryUntilTest, ExecutorFutureReturningLoopBodyPropagatesErrorToCondit
static const Status kCanceledStatus = {ErrorCodes::CallbackCanceled, "AsyncTry::until canceled"};
TEST_F(AsyncTryUntilTest, AsyncTryUntilCanBeCanceled) {
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto resultFut =
AsyncTry([] {}).until([](Status) { return false; }).on(executor(), cancelSource.token());
// This should hang forever if it is not canceled.
@@ -424,7 +424,7 @@ TEST_F(AsyncTryUntilTest, AsyncTryUntilCanBeCanceled) {
}
TEST_F(AsyncTryUntilTest, AsyncTryUntilWithDelayCanBeCanceled) {
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto resultFut = AsyncTry([] {})
.until([](Status) { return false; })
.withDelayBetweenIterations(Hours(1000))
@@ -437,7 +437,7 @@ TEST_F(AsyncTryUntilTest, AsyncTryUntilWithDelayCanBeCanceled) {
}
TEST_F(AsyncTryUntilTest, AsyncTryUntilWithBackoffCanBeCanceled) {
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto resultFut = AsyncTry([] {})
.until([](Status) { return false; })
.withBackoffBetweenIterations(TestBackoff{Seconds(10000000)})
@@ -448,7 +448,7 @@ TEST_F(AsyncTryUntilTest, AsyncTryUntilWithBackoffCanBeCanceled) {
TEST_F(AsyncTryUntilTest, CanceledTryUntilLoopDoesNotExecuteIfAlreadyCanceled) {
int counter{0};
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
auto canceledToken = cancelSource.token();
cancelSource.cancel();
auto resultFut = AsyncTry([&] { ++counter; })
@@ -459,7 +459,7 @@ TEST_F(AsyncTryUntilTest, CanceledTryUntilLoopDoesNotExecuteIfAlreadyCanceled) {
}
TEST_F(AsyncTryUntilTest, CanceledTryUntilLoopWithDelayDoesNotExecuteIfAlreadyCanceled) {
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
int counter{0};
auto canceledToken = cancelSource.token();
cancelSource.cancel();
@@ -472,7 +472,7 @@ TEST_F(AsyncTryUntilTest, CanceledTryUntilLoopWithDelayDoesNotExecuteIfAlreadyCa
}
TEST_F(AsyncTryUntilTest, CanceledTryUntilLoopWithBackoffDoesNotExecuteIfAlreadyCanceled) {
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
int counter{0};
auto canceledToken = cancelSource.token();
cancelSource.cancel();
@@ -1229,16 +1229,16 @@ TEST_F(WhenAnyTest, WorksWithVariadicTemplateAndExecutorFutures) {
ASSERT_EQ(idx, kWhichIdxWillBeFirst);
}
-class WithCancelationTest : public FutureUtilTest {};
+class WithCancellationTest : public FutureUtilTest {};
TEST_F(FutureUtilTest,
- WithCancelationReturnsSuccessIfInputFutureResolvedWithSuccessBeforeCancelation) {
+ WithCancellationReturnsSuccessIfInputFutureResolvedWithSuccessBeforeCancellation) {
const int kResult{5};
auto [promise, future] = makePromiseFuture<int>();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
- auto cancelableFuture = future_util::withCancelation(std::move(future), cancelSource.token());
+ auto cancelableFuture = future_util::withCancellation(std::move(future), cancelSource.token());
promise.emplaceValue(kResult);
ASSERT_EQ(cancelableFuture.get(), kResult);
@@ -1247,12 +1247,13 @@ TEST_F(FutureUtilTest,
cancelSource.cancel();
}
-TEST_F(FutureUtilTest, WithCancelationReturnsErrorIfInputFutureResolvedWithErrorBeforeCancelation) {
+TEST_F(FutureUtilTest,
+ WithCancellationReturnsErrorIfInputFutureResolvedWithErrorBeforeCancellation) {
auto [promise, future] = makePromiseFuture<int>();
- CancelationSource cancelSource;
+ CancellationSource cancelSource;
- auto cancelableFuture = future_util::withCancelation(std::move(future), cancelSource.token());
+ auto cancelableFuture = future_util::withCancellation(std::move(future), cancelSource.token());
const Status kErrorStatus{ErrorCodes::InternalError, ""};
promise.setError(kErrorStatus);
@@ -1262,11 +1263,11 @@ TEST_F(FutureUtilTest, WithCancelationReturnsErrorIfInputFutureResolvedWithError
cancelSource.cancel();
}
-TEST_F(FutureUtilTest, WithCancelationReturnsErrorIfTokenCanceledFirst) {
+TEST_F(FutureUtilTest, WithCancellationReturnsErrorIfTokenCanceledFirst) {
auto [promise, future] = makePromiseFuture<int>();
- CancelationSource cancelSource;
- auto cancelableFuture = future_util::withCancelation(std::move(future), cancelSource.token());
+ CancellationSource cancelSource;
+ auto cancelableFuture = future_util::withCancellation(std::move(future), cancelSource.token());
cancelSource.cancel();
ASSERT_THROWS_CODE(cancelableFuture.get(), DBException, ErrorCodes::CallbackCanceled);
@@ -1274,44 +1275,44 @@ TEST_F(FutureUtilTest, WithCancelationReturnsErrorIfTokenCanceledFirst) {
promise.setError(kCanceledStatus);
}
-TEST_F(FutureUtilTest, WithCancelationWorksWithVoidInput) {
+TEST_F(FutureUtilTest, WithCancellationWorksWithVoidInput) {
auto [promise, future] = makePromiseFuture<void>();
auto cancelableFuture =
- future_util::withCancelation(std::move(future), CancelationToken::uncancelable());
+ future_util::withCancellation(std::move(future), CancellationToken::uncancelable());
promise.emplaceValue();
ASSERT(cancelableFuture.isReady());
}
-TEST_F(FutureUtilTest, WithCancelationWorksWithSemiFutureInput) {
+TEST_F(FutureUtilTest, WithCancellationWorksWithSemiFutureInput) {
const int kResult{5};
auto [promise, future] = makePromiseFuture<int>();
auto cancelableFuture =
- future_util::withCancelation(std::move(future).semi(), CancelationToken::uncancelable());
+ future_util::withCancellation(std::move(future).semi(), CancellationToken::uncancelable());
promise.emplaceValue(kResult);
ASSERT_EQ(cancelableFuture.get(), kResult);
}
-TEST_F(FutureUtilTest, WithCancelationWorksWithSharedSemiFutureInput) {
+TEST_F(FutureUtilTest, WithCancellationWorksWithSharedSemiFutureInput) {
const int kResult{5};
auto [promise, future] = makePromiseFuture<int>();
- auto cancelableFuture = future_util::withCancelation(std::move(future).semi().share(),
- CancelationToken::uncancelable());
+ auto cancelableFuture = future_util::withCancellation(std::move(future).semi().share(),
+ CancellationToken::uncancelable());
promise.emplaceValue(kResult);
ASSERT_EQ(cancelableFuture.get(), kResult);
}
-TEST_F(FutureUtilTest, WithCancelationWorksWithExecutorFutureInput) {
+TEST_F(FutureUtilTest, WithCancellationWorksWithExecutorFutureInput) {
const int kResult{5};
auto [promise, future] = makePromiseFuture<int>();
- auto cancelableFuture = future_util::withCancelation(std::move(future).thenRunOn(executor()),
- CancelationToken::uncancelable());
+ auto cancelableFuture = future_util::withCancellation(std::move(future).thenRunOn(executor()),
+ CancellationToken::uncancelable());
promise.emplaceValue(kResult);
ASSERT_EQ(cancelableFuture.get(), kResult);
diff --git a/src/mongo/util/out_of_line_executor.h b/src/mongo/util/out_of_line_executor.h
index a5e7bd42483..02cd81ec8bd 100644
--- a/src/mongo/util/out_of_line_executor.h
+++ b/src/mongo/util/out_of_line_executor.h
@@ -111,7 +111,7 @@ public:
*
* The Task will be passed a Status schedStatus that is either:
* * schedStatus.isOK() if the function is run in an out-of-line context
- * * isCancelationError(schedStatus.code()) if the function is run in an inline context
+ * * isCancellationError(schedStatus.code()) if the function is run in an inline context
*
* All of this is to say: CHECK YOUR STATUS.
*/
diff --git a/src/mongo/util/read_through_cache.h b/src/mongo/util/read_through_cache.h
index 6fb9b63de14..b3ff5aeefc8 100644
--- a/src/mongo/util/read_through_cache.h
+++ b/src/mongo/util/read_through_cache.h
@@ -484,7 +484,7 @@ private:
auto& inProgressLookup = *it->second;
auto [promisesToSet, result, mustDoAnotherLoop] = [&] {
// The thread pool is shutting down, so terminate the loop
- if (ErrorCodes::isCancelationError(sw.getStatus()))
+ if (ErrorCodes::isCancellationError(sw.getStatus()))
return std::make_tuple(inProgressLookup.getAllPromisesOnError(ul),
StatusWith<ValueHandle>(sw.getStatus()),
false);
@@ -496,7 +496,7 @@ private:
StatusWith<ValueHandle>(Status(ErrorCodes::Error(461541), "")),
true);
- // Lookup resulted in an error, which is not cancelation
+ // Lookup resulted in an error, which is not cancellation
if (!sw.isOK())
return std::make_tuple(inProgressLookup.getAllPromisesOnError(ul),
StatusWith<ValueHandle>(sw.getStatus()),