diff options
author | Louis Williams <louis.williams@mongodb.com> | 2018-03-01 11:44:30 -0500 |
---|---|---|
committer | Louis Williams <louis.williams@mongodb.com> | 2018-03-01 11:44:30 -0500 |
commit | 9cec63d67ed2f63d26303a1c44189e1ac73d167a (patch) | |
tree | 50909ea4c350d0693f081891fdf30e611b6a4af9 /src/mongo/db | |
parent | 7d7969eb7439b08207e29437e94fc1db5459c205 (diff) | |
download | mongo-9cec63d67ed2f63d26303a1c44189e1ac73d167a.tar.gz |
SERVER-32638 Rename interruptable to interruptible
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/catalog/drop_database.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/commands/mr.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/concurrency/d_concurrency_test.cpp | 14 | ||||
-rw-r--r-- | src/mongo/db/concurrency/lock_state.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/concurrency/lock_state.h | 2 | ||||
-rw-r--r-- | src/mongo/db/concurrency/locker.h | 30 | ||||
-rw-r--r-- | src/mongo/db/index/index_access_method.h | 2 | ||||
-rw-r--r-- | src/mongo/db/pipeline/document_source_cursor.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/query/find.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/query/query_yield.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/master_slave.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_coordinator_impl.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/storage_interface_impl.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/collection_range_deleter.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/migration_destination_manager.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/s/migration_source_manager.cpp | 24 | ||||
-rw-r--r-- | src/mongo/db/transaction_reaper.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/ttl.cpp | 2 |
18 files changed, 60 insertions, 60 deletions
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp index b4637e0da34..b2e4853eb8f 100644 --- a/src/mongo/db/catalog/drop_database.cpp +++ b/src/mongo/db/catalog/drop_database.cpp @@ -117,7 +117,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) { using Result = boost::optional<Status>; // Get an optional result--if it's there, early return; otherwise, wait for collections to drop. auto result = writeConflictRetry(opCtx, "dropDatabase_collection", dbName, [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); Lock::GlobalWrite lk(opCtx); AutoGetDb autoDB(opCtx, dbName, MODE_X); Database* const db = autoDB.getDb(); @@ -198,7 +198,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) { // If waitForWriteConcern() returns an error or throws an exception, we should reset the // drop-pending state on Database. auto dropPendingGuardWhileAwaitingReplication = MakeGuard([dbName, opCtx] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); Lock::GlobalWrite lk(opCtx); AutoGetDb autoDB(opCtx, dbName, MODE_X); if (auto db = autoDB.getDb()) { @@ -249,7 +249,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) { dropPendingGuardWhileAwaitingReplication.Dismiss(); return writeConflictRetry(opCtx, "dropDatabase_database", dbName, [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); Lock::GlobalWrite lk(opCtx); AutoGetDb autoDB(opCtx, dbName, MODE_X); auto db = autoDB.getDb(); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 80f31bc4021..5b60dd056e7 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -364,8 +364,8 @@ Config::Config(const string& _dbname, const BSONObj& cmdObj) { * Clean up the temporary and incremental collections */ void State::dropTempCollections() { - // The cleanup handler should not be interruptable. - UninterruptableLockGuard noInterrupt(_opCtx->lockState()); + // The cleanup handler should not be interruptible. + UninterruptibleLockGuard noInterrupt(_opCtx->lockState()); if (!_config.tempNamespace.isEmpty()) { writeConflictRetry(_opCtx, "M/R dropTempCollections", _config.tempNamespace.ns(), [this] { @@ -1014,7 +1014,7 @@ void State::bailFromJS() { Collection* State::getCollectionOrUassert(OperationContext* opCtx, Database* db, const NamespaceString& nss) { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); Collection* out = db ? db->getCollection(opCtx, nss) : NULL; uassert(18697, "Collection unexpectedly disappeared: " + nss.ns(), out); return out; @@ -1402,7 +1402,7 @@ public: BSONObjBuilder& result) { Timer t; // Don't let a lock acquisition in map-reduce get interrupted. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); boost::optional<DisableDocumentValidation> maybeDisableValidation; if (shouldBypassDocumentValidationForCommand(cmd)) @@ -1736,7 +1736,7 @@ public: } // Don't let any lock acquisitions get interrupted. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); boost::optional<DisableDocumentValidation> maybeDisableValidation; if (shouldBypassDocumentValidationForCommand(cmdObj)) diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp index 1f4fb976f06..b8560cd6129 100644 --- a/src/mongo/db/concurrency/d_concurrency_test.cpp +++ b/src/mongo/db/concurrency/d_concurrency_test.cpp @@ -656,7 +656,7 @@ TEST_F(DConcurrencyTestFixture, TempReleaseRecursive) { ASSERT(lockState->isW()); } -TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptable) { +TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptible) { auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2); auto opCtx1 = clients[0].second.get(); auto opCtx2 = clients[1].second.get(); @@ -673,7 +673,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptable) { ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted); } -TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptableMMAP) { +TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptibleMMAP) { auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2); auto opCtx1 = clients[0].second.get(); @@ -693,7 +693,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptableMMAP) { ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted); } -TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptable) { +TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptible) { auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2); auto opCtx1 = clients[0].second.get(); auto opCtx2 = clients[1].second.get(); @@ -710,7 +710,7 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptable) { ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted); } -TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptableWithLockGuard) { +TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptibleWithLockGuard) { auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2); auto opCtx1 = clients[0].second.get(); @@ -722,7 +722,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptableWithLockGuard) { // Killing the lock wait should not interrupt it. auto result = runTaskAndKill(opCtx2, [&]() { - UninterruptableLockGuard noInterrupt(opCtx2->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx2->lockState()); Lock::GlobalLock g(opCtx2, MODE_S, Date_t::max()); }, [&]() { globalLock.reset(); }); @@ -730,7 +730,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsNotInterruptableWithLockGuard) { result.get(); } -TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptableWithLockGuard) { +TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptibleWithLockGuard) { auto clients = makeKClientsWithLockers<DefaultLockerImpl>(2); auto opCtx1 = clients[0].second.get(); auto opCtx2 = clients[1].second.get(); @@ -742,7 +742,7 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptableWithLockGuard) { // Killing the lock wait should not interrupt it. auto result = runTaskAndKill(opCtx2, [&]() { - UninterruptableLockGuard noInterrupt(opCtx2->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx2->lockState()); Lock::DBLock d(opCtx2, "db", MODE_S); }, [&] { dbLock.reset(); }); diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp index 42d9d553c80..0451ff82963 100644 --- a/src/mongo/db/concurrency/lock_state.cpp +++ b/src/mongo/db/concurrency/lock_state.cpp @@ -786,10 +786,10 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete( while (true) { // It is OK if this call wakes up spuriously, because we re-evaluate the remaining // wait time anyways. - // If we have an operation context, we want to use its interruptable wait so that + // If we have an operation context, we want to use its interruptible wait so that // pending lock acquisitions can be cancelled, so long as no callers have requested an - // uninterruptable lock. - if (opCtx && _uninterruptableLocksRequested == 0) { + // uninterruptible lock. + if (opCtx && _uninterruptibleLocksRequested == 0) { result = _notify.wait(opCtx, waitTime); } else { result = _notify.wait(waitTime); diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h index 71448987d92..170cdc1c999 100644 --- a/src/mongo/db/concurrency/lock_state.h +++ b/src/mongo/db/concurrency/lock_state.h @@ -208,7 +208,7 @@ public: * Waits for the completion of a lock, previously requested through lockBegin or * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING. * - * @param opCtx Operation context that, if not null, will be used to allow interruptable lock + * @param opCtx Operation context that, if not null, will be used to allow interruptible lock * acquisition. * @param resId Resource id which was passed to an earlier lockBegin call. Must match. * @param mode Mode which was passed to an earlier lockBegin call. Must match. diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h index 913d47eac9e..ea6a9f6f688 100644 --- a/src/mongo/db/concurrency/locker.h +++ b/src/mongo/db/concurrency/locker.h @@ -47,7 +47,7 @@ namespace mongo { class Locker { MONGO_DISALLOW_COPYING(Locker); - friend class UninterruptableLockGuard; + friend class UninterruptibleLockGuard; public: virtual ~Locker() {} @@ -388,11 +388,11 @@ protected: /** * The number of callers that are guarding from lock interruptions. - * When 0, all lock acquisitions are interruptable. When positive, no lock acquisitions - * are interruptable. This is only true for database and global locks. Collection locks are - * never interruptable. + * When 0, all lock acquisitions are interruptible. When positive, no lock acquisitions + * are interruptible. This is only true for database and global locks. Collection locks are + * never interruptible. */ - int _uninterruptableLocksRequested = 0; + int _uninterruptibleLocksRequested = 0; private: bool _shouldConflictWithSecondaryBatchApplication = true; @@ -407,25 +407,25 @@ private: * Lock acquisitions can still return LOCK_TIMEOUT, just not if the parent operation * context is killed first. * - * It is possible that multiple callers are requesting uninterruptable behavior, so the guard + * It is possible that multiple callers are requesting uninterruptible behavior, so the guard * increments a counter on the Locker class to indicate how may guards are active. */ -class UninterruptableLockGuard { +class UninterruptibleLockGuard { public: /* - * Accepts a Locker, and increments the _uninterruptableLocksRequested. Decrements the + * Accepts a Locker, and increments the _uninterruptibleLocksRequested. Decrements the * counter when destoyed. */ - explicit UninterruptableLockGuard(Locker* locker) : _locker(locker) { + explicit UninterruptibleLockGuard(Locker* locker) : _locker(locker) { invariant(_locker); - invariant(_locker->_uninterruptableLocksRequested >= 0); - invariant(_locker->_uninterruptableLocksRequested < std::numeric_limits<int>::max()); - _locker->_uninterruptableLocksRequested += 1; + invariant(_locker->_uninterruptibleLocksRequested >= 0); + invariant(_locker->_uninterruptibleLocksRequested < std::numeric_limits<int>::max()); + _locker->_uninterruptibleLocksRequested += 1; } - ~UninterruptableLockGuard() { - invariant(_locker->_uninterruptableLocksRequested > 0); - _locker->_uninterruptableLocksRequested -= 1; + ~UninterruptibleLockGuard() { + invariant(_locker->_uninterruptibleLocksRequested > 0); + _locker->_uninterruptibleLocksRequested -= 1; } private: diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index 807d5d73f7f..5035a636085 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -254,7 +254,7 @@ public: * Call this when you are ready to finish your bulk work. * Pass in the BulkBuilder returned from initiateBulk. * @param bulk - something created from initiateBulk - * @param mayInterrupt - is this commit interruptable (will cancel) + * @param mayInterrupt - is this commit interruptible (will cancel) * @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found * @param dups - if NULL, error out on dups if not allowed * if not NULL, put the bad RecordIds there diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index 9e6f2f0bd50..b090f195bbe 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -265,7 +265,7 @@ void DocumentSourceCursor::cleanupExecutor() { // return nullptr if the collection has since turned into a view. In this case, '_exec' will // already have been marked as killed when the collection was dropped, and we won't need to // access the CursorManager to properly dispose of it. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetDb dbLock(opCtx, _exec->nss().db(), MODE_IS); Lock::CollectionLock collLock(opCtx->lockState(), _exec->nss().ns(), MODE_IS); auto collection = dbLock.getDb() ? dbLock.getDb()->getCollection(opCtx, _exec->nss()) : nullptr; diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index 8c7856907fd..cf2a8fcfe36 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -262,7 +262,7 @@ Message getMore(OperationContext* opCtx, // Note that we acquire our locks before our ClientCursorPin, in order to ensure that the pin's // destructor is called before the lock's destructor (if there is one) so that the cursor // cleanup can occur under the lock. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); boost::optional<AutoGetCollectionForRead> readLock; boost::optional<AutoStatsTracker> statsTracker; CursorManager* cursorManager; diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp index cd5d513517c..d7ee0271f16 100644 --- a/src/mongo/db/query/query_yield.cpp +++ b/src/mongo/db/query/query_yield.cpp @@ -83,7 +83,7 @@ void QueryYield::yieldAllLocks(OperationContext* opCtx, whileYieldingFn(); } - UninterruptableLockGuard noInterrupt(locker); + UninterruptibleLockGuard noInterrupt(locker); locker->restoreLockState(opCtx, snapshot); } diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 8437283e60c..e458e63c6d4 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -883,7 +883,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) { oplogReader.tailCheck(); // Due to the lack of exception handlers, don't allow lock interrupts. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); bool initial = syncedTo.isNull(); @@ -1304,7 +1304,7 @@ static void replMasterThread() { OperationContext& opCtx = *opCtxPtr; AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization(); - UninterruptableLockGuard noInterrupt(opCtx.lockState()); + UninterruptibleLockGuard noInterrupt(opCtx.lockState()); Lock::GlobalWrite globalWrite(&opCtx, Date_t::now() + Milliseconds(1)); if (globalWrite.isLocked()) { toSleep = 10; diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index ea7cf1d03e0..a073fd70354 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -1585,7 +1585,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx, const Date_t stepDownUntil = startTime + stepdownTime; const Date_t waitUntil = startTime + waitTime; - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); if (!getMemberState().primary()) { // Note this check is inherently racy - it's always possible for the node to // stepdown from some other path before we acquire the global exclusive lock. This check diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 89276f8b31d..eb1503f611e 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -398,7 +398,7 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { return writeConflictRetry(opCtx, "StorageInterfaceImpl::createCollection", nss.ns(), [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_X); auto db = databaseWriteGuard.getDb(); invariant(db); @@ -421,7 +421,7 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx, Status StorageInterfaceImpl::dropCollection(OperationContext* opCtx, const NamespaceString& nss) { return writeConflictRetry(opCtx, "StorageInterfaceImpl::dropCollection", nss.ns(), [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetDb autoDB(opCtx, nss.db(), MODE_X); if (!autoDB.getDb()) { // Database does not exist - nothing to do. diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp index 24e4d784085..4ae50230f6d 100644 --- a/src/mongo/db/s/collection_range_deleter.cpp +++ b/src/mongo/db/s/collection_range_deleter.cpp @@ -102,7 +102,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange( auto notification = DeleteNotification(); { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, nss, MODE_IX); auto* const collection = autoColl.getCollection(); @@ -242,7 +242,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange( << redact(range->toString()) << " : " << redact(status.reason()); // Don't allow lock interrupts while cleaning up. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, nss, MODE_IX); auto* const css = CollectionShardingState::get(opCtx, nss); diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index a1d8568c2c4..6da2d5c121a 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -1103,7 +1103,7 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx, return; // no documents can have been moved in, so there is nothing to clean up. } - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X); auto css = CollectionShardingState::get(opCtx, nss); auto metadata = css->getMetadata(); diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 6a5d2f35462..c4091b19f0a 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -151,7 +151,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, // Snapshot the committed metadata from the time the migration starts const auto collectionMetadataAndUUID = [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IS); uassert(ErrorCodes::InvalidOptions, "cannot move chunks for a collection that doesn't exist", @@ -233,7 +233,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) { { // Register for notifications from the replication subsystem - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); auto css = CollectionShardingState::get(opCtx, getNss()); @@ -290,7 +290,7 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) { { const auto metadata = [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS); return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(); }(); @@ -316,7 +316,7 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) { // The critical section must be entered with collection X lock in order to ensure there are // no writes which could have entered and passed the version check just before we entered // the crticial section, but managed to complete after we left it. - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); // IMPORTANT: After this line, the critical section is in place and needs to be signaled @@ -386,7 +386,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC { const auto metadata = [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS); return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(); }(); @@ -423,7 +423,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC // Read operations must begin to wait on the critical section just before we send the commit // operation to the config server { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); _readsShouldWaitOnCritSec = true; } @@ -484,7 +484,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC // metadata for this collection, forcing subsequent callers to do a full refresh. Check if // this node can accept writes for this collection as a proxy for it being primary. if (!status.isOK()) { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, getNss())) { CollectionShardingState::get(opCtx, getNss())->refreshMetadata(opCtx, nullptr); @@ -521,7 +521,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC }(); if (!refreshStatus.isOK()) { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); CollectionShardingState::get(opCtx, getNss())->refreshMetadata(opCtx, nullptr); @@ -542,7 +542,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC } auto refreshedMetadata = [&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IS); return CollectionShardingState::get(opCtx, getNss())->getMetadata(); }(); @@ -592,7 +592,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC auto notification = [&] { auto const whenToClean = _args.getWaitForDelete() ? CollectionShardingState::kNow : CollectionShardingState::kDelayed; - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IS); return CollectionShardingState::get(opCtx, getNss())->cleanUpRange(range, whenToClean); }(); @@ -670,7 +670,7 @@ void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk( auto const serviceContext = opCtx->getClient()->getServiceContext(); - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, NamespaceString::kRsOplogNamespace, MODE_IX); writeConflictRetry( opCtx, "migrateChunkToNewShard", NamespaceString::kRsOplogNamespace.ns(), [&] { @@ -686,7 +686,7 @@ void MigrationSourceManager::_cleanup(OperationContext* opCtx) { auto cloneDriver = [&]() { // Unregister from the collection's sharding state - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X); auto css = CollectionShardingState::get(opCtx, getNss()); diff --git a/src/mongo/db/transaction_reaper.cpp b/src/mongo/db/transaction_reaper.cpp index 12516d034f1..9d41635d5ff 100644 --- a/src/mongo/db/transaction_reaper.cpp +++ b/src/mongo/db/transaction_reaper.cpp @@ -155,7 +155,7 @@ int handleBatchHelper(SessionsCollection* sessionsCollection, invariant(locker->saveLockStateAndUnlock(&snapshot)); const auto guard = MakeGuard([&] { - UninterruptableLockGuard noInterrupt(opCtx->lockState()); + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); locker->restoreLockState(opCtx, snapshot); }); diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp index 2bf3e717535..da37aecd0cb 100644 --- a/src/mongo/db/ttl.cpp +++ b/src/mongo/db/ttl.cpp @@ -134,7 +134,7 @@ private: // Get all TTL indexes from every collection. for (const std::string& collectionNS : ttlCollections) { - UninterruptableLockGuard noInterrupt(opCtx.lockState()); + UninterruptibleLockGuard noInterrupt(opCtx.lockState()); NamespaceString collectionNSS(collectionNS); AutoGetCollection autoGetCollection(&opCtx, collectionNSS, MODE_IS); Collection* coll = autoGetCollection.getCollection(); |