diff options
author | Gregory Wlodarek <gregory.wlodarek@mongodb.com> | 2020-08-03 14:25:13 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-08-04 01:58:47 +0000 |
commit | 9102bf998537e744227fe2a28cb22838388bb3f6 (patch) | |
tree | ba5d31a3385522fc382a2704135fa7ec835222e2 /src/mongo | |
parent | e07c2d29aded5a30ff08b5ce6a436b6ef6f44014 (diff) | |
download | mongo-9102bf998537e744227fe2a28cb22838388bb3f6.tar.gz |
SERVER-48312 Remove support for 'supportsDocLocking = false'
Diffstat (limited to 'src/mongo')
52 files changed, 96 insertions, 367 deletions
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index ec63533c342..9d808376785 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -245,8 +245,7 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx, _catalogId(catalogId), _uuid(uuid), _recordStore(std::move(recordStore)), - _needCappedLock(supportsDocLocking() && _recordStore && _recordStore->isCapped() && - _ns.db() != "local"), + _needCappedLock(_recordStore && _recordStore->isCapped() && _ns.db() != "local"), _indexCatalog(std::make_unique<IndexCatalogImpl>(this)), _cappedNotifier(_recordStore && _recordStore->isCapped() ? std::make_shared<CappedInsertNotifier>() diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 1bce91141fa..25a5eea8819 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -649,7 +649,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, // reserve oplog slots here if it is run outside of a multi-document transaction. Multi- // document transactions reserve the appropriate oplog slots at commit time. OplogSlot createOplogSlot; - if (canAcceptWrites && supportsDocLocking() && !coordinator->isOplogDisabledFor(opCtx, nss) && + if (canAcceptWrites && !coordinator->isOplogDisabledFor(opCtx, nss) && !opCtx->inMultiDocumentTransaction()) { createOplogSlot = repl::getNextOpTime(opCtx); } diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp index 20fec3ab116..dd8591814ff 100644 --- a/src/mongo/db/catalog_raii.cpp +++ b/src/mongo/db/catalog_raii.cpp @@ -201,11 +201,6 @@ AutoGetOplog::AutoGetOplog(OperationContext* opCtx, OplogAccessMode mode, Date_t _globalLock.emplace(opCtx, lockMode, deadline, Lock::InterruptBehavior::kThrow); } - // Obtain database and collection intent locks for non-document-locking storage engines. - if (!opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()) { - _dbWriteLock.emplace(opCtx, NamespaceString::kLocalDb, lockMode, deadline); - _collWriteLock.emplace(opCtx, NamespaceString::kRsOplogNamespace, lockMode, deadline); - } _oplogInfo = repl::LocalOplogInfo::get(opCtx); _oplog = _oplogInfo->getCollection(); } diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h index 7a54a02cc04..62f46f0ac1e 100644 --- a/src/mongo/db/catalog_raii.h +++ b/src/mongo/db/catalog_raii.h @@ -248,18 +248,12 @@ private: /** * RAII-style class to acquire proper locks using special oplog locking rules for oplog accesses. * - * If storage engine supports document-level locking, only global lock is acquired: + * Only the global lock is acquired: * | OplogAccessMode | Global Lock | * +-----------------+-------------| * | kRead | MODE_IS | * | kWrite | MODE_IX | * - * Otherwise, database and collection intent locks are also acquired: - * | OplogAccessMode | Global Lock | 'local' DB Lock | 'oplog.rs' Collection Lock | - * +-----------------+-------------+-----------------+----------------------------| - * | kRead | MODE_IS | MODE_IS | MODE_IS | - * | kWrite/kLogOp | MODE_IX | MODE_IX | MODE_IX | - * * kLogOp is a special mode for replication operation logging and it behaves similar to kWrite. The * difference between kWrite and kLogOp is that kLogOp invariants that global IX lock is already * held. It is the caller's responsibility to ensure the global lock already held is still valid diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp index 918ec3237ea..b8e627fa6b3 100644 --- a/src/mongo/db/commands/dbhash.cpp +++ b/src/mongo/db/commands/dbhash.cpp @@ -143,18 +143,12 @@ public: " commands are enabled", getTestCommandsEnabled()); - auto* replCoord = repl::ReplicationCoordinator::get(opCtx); + auto replCoord = repl::ReplicationCoordinator::get(opCtx); uassert(ErrorCodes::InvalidOptions, "The '$_internalReadAtClusterTime' option is only supported when replication is" " enabled", replCoord->isReplEnabled()); - auto* storageEngine = opCtx->getServiceContext()->getStorageEngine(); - uassert(ErrorCodes::InvalidOptions, - "The '$_internalReadAtClusterTime' option is only supported by storage engines" - " that support document-level concurrency", - storageEngine->supportsDocLocking()); - uassert(ErrorCodes::TypeMismatch, "The '$_internalReadAtClusterTime' option must be a Timestamp", elem.type() == BSONType::bsonTimestamp); @@ -185,6 +179,7 @@ public: // down. This isn't an actual concern because the testing infrastructure won't use the // $_internalReadAtClusterTime option in any test suite where clean shutdown is expected // to occur concurrently with tests running. + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); auto allDurableTime = storageEngine->getAllDurableTimestamp(); invariant(!allDurableTime.isNull()); diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp index d2f9a8972ba..a48d176689e 100644 --- a/src/mongo/db/concurrency/d_concurrency.cpp +++ b/src/mongo/db/concurrency/d_concurrency.cpp @@ -245,11 +245,6 @@ Lock::CollectionLock::CollectionLock(OperationContext* opCtx, LockMode mode, Date_t deadline) : _opCtx(opCtx) { - LockMode actualLockMode = mode; - if (!supportsDocLocking()) { - actualLockMode = isSharedLockMode(mode) ? MODE_S : MODE_X; - } - if (nssOrUUID.nss()) { auto& nss = *nssOrUUID.nss(); _id = {RESOURCE_COLLECTION, nss.ns()}; @@ -258,7 +253,7 @@ Lock::CollectionLock::CollectionLock(OperationContext* opCtx, dassert(_opCtx->lockState()->isDbLockedForMode(nss.db(), isSharedLockMode(mode) ? MODE_IS : MODE_IX)); - _opCtx->lockState()->lock(_opCtx, _id, actualLockMode, deadline); + _opCtx->lockState()->lock(_opCtx, _id, mode, deadline); return; } @@ -282,7 +277,7 @@ Lock::CollectionLock::CollectionLock(OperationContext* opCtx, } _id = ResourceId(RESOURCE_COLLECTION, nss.ns()); - _opCtx->lockState()->lock(_opCtx, _id, actualLockMode, deadline); + _opCtx->lockState()->lock(_opCtx, _id, mode, deadline); locked = true; // We looked up UUID without a collection lock so it's possible that the diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h index a392c0c2f2d..187cb3d5a41 100644 --- a/src/mongo/db/concurrency/d_concurrency.h +++ b/src/mongo/db/concurrency/d_concurrency.h @@ -289,7 +289,7 @@ public: }; /** - * Database lock with support for collection- and document-level locking + * Database lock. * * This lock supports four modes (see Lock_Mode): * MODE_IS: concurrent database access, requiring further collection read locks @@ -342,18 +342,16 @@ public: }; /** - * Collection lock with support for document-level locking + * Collection lock. * * This lock supports four modes (see Lock_Mode): - * MODE_IS: concurrent collection access, requiring document level locking read locks - * MODE_IX: concurrent collection access, requiring document level read or write locks + * MODE_IS: concurrent collection access, requiring read locks + * MODE_IX: concurrent collection access, requiring read or write locks * MODE_S: shared read access to the collection, blocking any writers * MODE_X: exclusive access to the collection, blocking all other readers and writers * * An appropriate DBLock must already be held before locking a collection: it is an error, - * checked with a dassert(), to not have a suitable database lock before locking the - * collection. For storage engines that do not support document-level locking, MODE_IS - * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X. + * checked with a dassert(), to not have a suitable database lock before locking the collection. */ class CollectionLock { CollectionLock(const CollectionLock&) = delete; diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp index 4541ad0f9f6..f9c29b849e0 100644 --- a/src/mongo/db/concurrency/d_concurrency_bm.cpp +++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp @@ -91,11 +91,8 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexExclusive)(benchmark::State } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentSharedLock)(benchmark::State& state) { - std::unique_ptr<ForceSupportsDocLocking> supportDocLocking; - if (state.thread_index == 0) { makeKClientsWithLockers(state.threads); - supportDocLocking = std::make_unique<ForceSupportsDocLocking>(true); } for (auto keepRunning : state) { @@ -110,11 +107,8 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentSharedLock)(benchmark::S } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentExclusiveLock)(benchmark::State& state) { - std::unique_ptr<ForceSupportsDocLocking> supportDocLocking; - if (state.thread_index == 0) { makeKClientsWithLockers(state.threads); - supportDocLocking = std::make_unique<ForceSupportsDocLocking>(true); } for (auto keepRunning : state) { @@ -128,12 +122,9 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentExclusiveLock)(benchmark } } -BENCHMARK_DEFINE_F(DConcurrencyTest, BM_MMAPv1CollectionSharedLock)(benchmark::State& state) { - std::unique_ptr<ForceSupportsDocLocking> supportDocLocking; - +BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionSharedLock)(benchmark::State& state) { if (state.thread_index == 0) { makeKClientsWithLockers(state.threads); - supportDocLocking = std::make_unique<ForceSupportsDocLocking>(false); } for (auto keepRunning : state) { @@ -147,12 +138,9 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_MMAPv1CollectionSharedLock)(benchmark::S } } -BENCHMARK_DEFINE_F(DConcurrencyTest, BM_MMAPv1CollectionExclusiveLock)(benchmark::State& state) { - std::unique_ptr<ForceSupportsDocLocking> supportDocLocking; - +BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionExclusiveLock)(benchmark::State& state) { if (state.thread_index == 0) { makeKClientsWithLockers(state.threads); - supportDocLocking = std::make_unique<ForceSupportsDocLocking>(false); } for (auto keepRunning : state) { @@ -176,10 +164,8 @@ BENCHMARK_REGISTER_F(DConcurrencyTest, BM_CollectionIntentSharedLock) BENCHMARK_REGISTER_F(DConcurrencyTest, BM_CollectionIntentExclusiveLock) ->ThreadRange(1, kMaxPerfThreads); -BENCHMARK_REGISTER_F(DConcurrencyTest, BM_MMAPv1CollectionSharedLock) - ->ThreadRange(1, kMaxPerfThreads); -BENCHMARK_REGISTER_F(DConcurrencyTest, BM_MMAPv1CollectionExclusiveLock) - ->ThreadRange(1, kMaxPerfThreads); +BENCHMARK_REGISTER_F(DConcurrencyTest, BM_CollectionSharedLock)->ThreadRange(1, kMaxPerfThreads); +BENCHMARK_REGISTER_F(DConcurrencyTest, BM_CollectionExclusiveLock)->ThreadRange(1, kMaxPerfThreads); } // namespace } // namespace mongo diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h index d8b1c68b4e0..bfd0cd273b2 100644 --- a/src/mongo/db/concurrency/lock_manager_test_help.h +++ b/src/mongo/db/concurrency/lock_manager_test_help.h @@ -71,23 +71,4 @@ public: } }; -/** - * A RAII object that temporarily forces setting of the _supportsDocLocking global variable (defined - * in db/service_context.cpp and returned by mongo::supportsDocLocking()) for testing purposes. - */ -extern bool _supportsDocLocking; -class ForceSupportsDocLocking { -public: - explicit ForceSupportsDocLocking(bool supported) : _oldSupportsDocLocking(_supportsDocLocking) { - _supportsDocLocking = supported; - } - - ~ForceSupportsDocLocking() { - _supportsDocLocking = _oldSupportsDocLocking; - } - -private: - const bool _oldSupportsDocLocking; -}; - } // namespace mongo diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp index d8df85c6d00..95cfcb7c6c5 100644 --- a/src/mongo/db/index_build_entry_helpers.cpp +++ b/src/mongo/db/index_build_entry_helpers.cpp @@ -204,21 +204,15 @@ Status addIndexBuildEntry(OperationContext* opCtx, const IndexBuildEntry& indexB WriteUnitOfWork wuow(opCtx); - Status status = Status::OK(); - if (supportsDocLocking()) { - // Reserve a slot in the oplog. This must only be done for document level locking - // storage engines, which are allowed to insert oplog documents out-of-order into - // the oplog. - auto oplogInfo = repl::LocalOplogInfo::get(opCtx); - auto oplogSlot = oplogInfo->getNextOpTimes(opCtx, 1U)[0]; - status = collection->insertDocument( - opCtx, - InsertStatement(kUninitializedStmtId, indexBuildEntry.toBSON(), oplogSlot), - nullptr); - } else { - status = collection->insertDocument( - opCtx, InsertStatement(indexBuildEntry.toBSON()), nullptr); - } + // Reserve a slot in the oplog as the storage engine is allowed to insert oplog + // documents out-of-order into the oplog. + auto oplogInfo = repl::LocalOplogInfo::get(opCtx); + auto oplogSlot = oplogInfo->getNextOpTimes(opCtx, 1U)[0]; + Status status = collection->insertDocument( + opCtx, + InsertStatement(kUninitializedStmtId, indexBuildEntry.toBSON(), oplogSlot), + nullptr); + if (!status.isOK()) { return status; } diff --git a/src/mongo/db/initialize_operation_session_info.cpp b/src/mongo/db/initialize_operation_session_info.cpp index 2059dae0623..afa0c739b6f 100644 --- a/src/mongo/db/initialize_operation_session_info.cpp +++ b/src/mongo/db/initialize_operation_session_info.cpp @@ -42,8 +42,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* const BSONObj& requestBody, bool requiresAuth, bool attachToOpCtx, - bool isReplSetMemberOrMongos, - bool supportsDocLocking) { + bool isReplSetMemberOrMongos) { auto osi = OperationSessionInfoFromClient::parse("OperationSessionInfo"_sd, requestBody); if (opCtx->getClient()->isInDirectClient()) { @@ -110,10 +109,6 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* uassert(ErrorCodes::IllegalOperation, "Transaction numbers are only allowed on a replica set member or mongos", isReplSetMemberOrMongos); - uassert(ErrorCodes::IllegalOperation, - "Transaction numbers are only allowed on storage engines that support " - "document-level locking", - supportsDocLocking); uassert(ErrorCodes::InvalidOptions, "Transaction number cannot be negative", *osi.getTxnNumber() >= 0); diff --git a/src/mongo/db/initialize_operation_session_info.h b/src/mongo/db/initialize_operation_session_info.h index cf366dd1dfd..c1fed0ede7b 100644 --- a/src/mongo/db/initialize_operation_session_info.h +++ b/src/mongo/db/initialize_operation_session_info.h @@ -46,14 +46,13 @@ namespace mongo { * authorization or not. This can be determined by invoking ->requiresAuth() on the parsed command. * If it does not require authorization, return boost::none. * - * Both isReplSetMemberOrMongos and supportsDocLocking need to be true if the command contains a - * transaction number, otherwise this function will throw. + * isReplSetMemberOrMongos needs to be true if the command contains a transaction number, otherwise + * this function will throw. */ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* opCtx, const BSONObj& requestBody, bool requiresAuth, bool attachToOpCtx, - bool isReplSetMemberOrMongos, - bool supportsDocLocking); + bool isReplSetMemberOrMongos); } // namespace mongo diff --git a/src/mongo/db/logical_session_id_test.cpp b/src/mongo/db/logical_session_id_test.cpp index 91a9efdbf8e..de37f424f3f 100644 --- a/src/mongo/db/logical_session_id_test.cpp +++ b/src/mongo/db/logical_session_id_test.cpp @@ -239,7 +239,7 @@ TEST_F(LogicalSessionIdTest, GenWithoutAuthedUser) { TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_NoSessionIdNoTransactionNumber) { addSimpleUser(UserName("simple", "test")); - initializeOperationSessionInfo(_opCtx.get(), BSON("TestCmd" << 1), true, true, true, true); + initializeOperationSessionInfo(_opCtx.get(), BSON("TestCmd" << 1), true, true, true); ASSERT(!_opCtx->getLogicalSessionId()); ASSERT(!_opCtx->getTxnNumber()); @@ -255,7 +255,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdNoTransacti << "TestField"), true, true, - true, true); ASSERT(_opCtx->getLogicalSessionId()); @@ -272,7 +271,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_MissingSessionIdWith << "TestField"), true, true, - true, true), AssertionException, ErrorCodes::InvalidOptions); @@ -289,7 +287,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransact << "TestField"), true, true, - true, true); ASSERT(_opCtx->getLogicalSessionId()); @@ -311,25 +308,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IsReplSetMemberOrMon << "TestField"), true, true, - false, - true), - AssertionException, - ErrorCodes::IllegalOperation); -} - -TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SupportsDocLockingFalse) { - addSimpleUser(UserName("simple", "test")); - LogicalSessionFromClient lsid; - lsid.setId(UUID::gen()); - - ASSERT_THROWS_CODE( - initializeOperationSessionInfo(_opCtx.get(), - BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" - << 100LL << "OtherField" - << "TestField"), - true, - true, - true, false), AssertionException, ErrorCodes::IllegalOperation); @@ -348,7 +326,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IgnoresInfoIfNoCache << "TestField"), true, true, - true, true); ASSERT(sessionInfo.getSessionId() == boost::none); ASSERT(sessionInfo.getTxnNumber() == boost::none); @@ -367,7 +344,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IgnoresInfoIfDoNotAt << "TestField"), true, false, - true, true); ASSERT(sessionInfo.getSessionId() == boost::none); @@ -392,7 +368,6 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_VerifyUIDEvenIfDoNot BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL), true, false, - true, true), AssertionException, ErrorCodes::Unauthorized); @@ -413,10 +388,10 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SendingInfoFailsInDi << "foo"); commandBuilder.appendElements(param); - ASSERT_THROWS_CODE(initializeOperationSessionInfo( - _opCtx.get(), commandBuilder.obj(), true, true, true, true), - AssertionException, - 50891); + ASSERT_THROWS_CODE( + initializeOperationSessionInfo(_opCtx.get(), commandBuilder.obj(), true, true, true), + AssertionException, + 50891); } _opCtx->getClient()->setInDirectClient(false); @@ -445,7 +420,6 @@ TEST_F(LogicalSessionIdTest, MultipleUsersPerSessionIsNotAllowed) { BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL), true, true, - true, true), AssertionException, ErrorCodes::Unauthorized); diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 7fa9c49d362..d79fe941414 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -295,18 +295,16 @@ void insertDocuments(OperationContext* opCtx, // to be written. Multidocument transactions should not generate opTimes because they are // generated at the time of commit. auto batchSize = std::distance(begin, end); - if (supportsDocLocking()) { - auto replCoord = repl::ReplicationCoordinator::get(opCtx); - auto inTransaction = opCtx->inMultiDocumentTransaction(); - - if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection->ns())) { - // Populate 'slots' with new optimes for each insert. - // This also notifies the storage engine of each new timestamp. - auto oplogSlots = repl::getNextOpTimes(opCtx, batchSize); - auto slot = oplogSlots.begin(); - for (auto it = begin; it != end; it++) { - it->oplogSlot = *slot++; - } + auto replCoord = repl::ReplicationCoordinator::get(opCtx); + auto inTransaction = opCtx->inMultiDocumentTransaction(); + + if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection->ns())) { + // Populate 'slots' with new optimes for each insert. + // This also notifies the storage engine of each new timestamp. + auto oplogSlots = repl::getNextOpTimes(opCtx, batchSize); + auto slot = oplogSlots.begin(); + for (auto it = begin; it != end; it++) { + it->oplogSlot = *slot++; } } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 203b68f7e96..d3749f0be47 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -958,13 +958,11 @@ Status applyOperation_inlock(OperationContext* opCtx, << uuid.get() << "): " << redact(opOrGroupedInserts.toBSON()), collection); requestNss = collection->ns(); - dassert(opCtx->lockState()->isCollectionLockedForMode( - requestNss, supportsDocLocking() ? MODE_IX : MODE_X)); + dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX)); } else { requestNss = op.getNss(); invariant(requestNss.coll().size()); - dassert(opCtx->lockState()->isCollectionLockedForMode( - requestNss, supportsDocLocking() ? MODE_IX : MODE_X), + dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX), requestNss.ns()); collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, requestNss); } diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp index bde193887be..b0b79a33940 100644 --- a/src/mongo/db/repl/oplog_applier_impl.cpp +++ b/src/mongo/db/repl/oplog_applier_impl.cpp @@ -183,16 +183,14 @@ void processCrudOp(OperationContext* opCtx, uint32_t* hash, StringMapHashedKey* hashedNs, CachedCollectionProperties* collPropertiesCache) { - const bool supportsDocLocking = - opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking(); auto collProperties = collPropertiesCache->getCollectionProperties(opCtx, *hashedNs); - // For doc locking engines, include the _id of the document in the hash so we get - // parallelism even if all writes are to a single collection. + // Include the _id of the document in the hash so we get parallelism even if all writes are to a + // single collection. // // For capped collections, this is illegal, since capped collections must preserve // insertion order. - if (supportsDocLocking && !collProperties.isCapped) { + if (!collProperties.isCapped) { BSONElement id = op->getIdElement(); BSONElementComparator elementHasher(BSONElementComparator::FieldNamesMode::kIgnore, collProperties.collator); @@ -579,13 +577,9 @@ void scheduleWritesToOplog(OperationContext* opCtx, const bool enoughToMultiThread = ops.size() >= kMinOplogEntriesPerThread * writerPool->getStats().numThreads; - // Only doc-locking engines support parallel writes to the oplog because they are required to - // ensure that oplog entries are ordered correctly, even if inserted out-of-order. Additionally, - // there would be no way to take advantage of multiple threads if a storage engine doesn't - // support document locking. - if (!enoughToMultiThread || - !opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()) { - + // Storage engines support parallel writes to the oplog because they are required to ensure that + // oplog entries are ordered correctly, even if inserted out-of-order. + if (!enoughToMultiThread) { writerPool->schedule(makeOplogWriterForRange(0, ops.size())); return; } diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp index be182acef87..ef804311365 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test.cpp +++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp @@ -632,9 +632,8 @@ protected: } else if (nss == _nss1 || nss == _nss2 || nss == NamespaceString::kSessionTransactionsTableNamespace) { // Storing the inserted documents in a sorted data structure to make checking - // for valid results easier. On a document level locking storage engine the - // inserts will be performed by different threads and there's no guarantee of - // the order. + // for valid results easier. The inserts will be performed by different threads + // and there's no guarantee of the order. _insertedDocs[nss].insert(docs.begin(), docs.end()); } else FAIL("Unexpected insert") << " into " << nss << " first doc: " << docs.front(); diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp index d6e23d6c3ac..b19713167bf 100644 --- a/src/mongo/db/repl/oplog_test.cpp +++ b/src/mongo/db/repl/oplog_test.cpp @@ -233,7 +233,7 @@ OpTime _logOpNoopWithMsg(OperationContext* opCtx, return opTime; } -TEST_F(OplogTest, ConcurrentLogOpWithoutDocLockingSupport) { +TEST_F(OplogTest, ConcurrentLogOp) { OpTimeNamespaceStringMap opTimeNssMap; std::vector<OplogEntry> oplogEntries; @@ -249,42 +249,9 @@ TEST_F(OplogTest, ConcurrentLogOpWithoutDocLockingSupport) { _logOpNoopWithMsg(opCtx.get(), mtx, opTimeNssMap, nss); - // In a storage engine that does not support doc locking, upon returning from - // logOp(), this thread still holds an implicit MODE_X lock on the oplog collection - // until it commits the WriteUnitOfWork. Therefore, we must wait on the barrier - // after the WUOW is committed. - wunit.commit(); - barrier->countDownAndWait(); - }; - }, - &opTimeNssMap, - &oplogEntries, - 2U); - - _checkOplogEntry(oplogEntries[0], *(opTimeNssMap.begin())); - _checkOplogEntry(oplogEntries[1], *(opTimeNssMap.rbegin())); -} - -TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupport) { - OpTimeNamespaceStringMap opTimeNssMap; - std::vector<OplogEntry> oplogEntries; - - ForceSupportsDocLocking support(true); - _testConcurrentLogOp( - [](const NamespaceString& nss, - Mutex* mtx, - OpTimeNamespaceStringMap* opTimeNssMap, - unittest::Barrier* barrier) { - return [=] { - auto opCtx = cc().makeOperationContext(); - AutoGetDb autoDb(opCtx.get(), nss.db(), MODE_X); - WriteUnitOfWork wunit(opCtx.get()); - - _logOpNoopWithMsg(opCtx.get(), mtx, opTimeNssMap, nss); - - // In a storage engine that supports doc locking, it is okay for multiple threads to - // maintain uncommitted WUOWs upon returning from logOp() because each thread will - // hold an implicit MODE_IX lock on the oplog collection. + // It is okay for multiple threads to maintain uncommitted WUOWs upon returning from + // logOp() because each thread will hold an implicit MODE_IX lock on the oplog + // collection. barrier->countDownAndWait(); wunit.commit(); }; @@ -297,11 +264,10 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupport) { _checkOplogEntry(oplogEntries[1], *(opTimeNssMap.rbegin())); } -TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) { +TEST_F(OplogTest, ConcurrentLogOpRevertFirstOplogEntry) { OpTimeNamespaceStringMap opTimeNssMap; std::vector<OplogEntry> oplogEntries; - ForceSupportsDocLocking support(true); _testConcurrentLogOp( [](const NamespaceString& nss, Mutex* mtx, @@ -314,9 +280,9 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) { auto opTime = _logOpNoopWithMsg(opCtx.get(), mtx, opTimeNssMap, nss); - // In a storage engine that supports doc locking, it is okay for multiple threads to - // maintain uncommitted WUOWs upon returning from logOp() because each thread will - // hold an implicit MODE_IX lock on the oplog collection. + // It is okay for multiple threads to maintain uncommitted WUOWs upon returning from + // logOp() because each thread will hold an implicit MODE_IX lock on the oplog + // collection. barrier->countDownAndWait(); // Revert the first logOp() call and confirm that there are no holes in the @@ -344,11 +310,10 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) { _checkOplogEntry(oplogEntries[0], *(opTimeNssMap.crbegin())); } -TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) { +TEST_F(OplogTest, ConcurrentLogOpRevertLastOplogEntry) { OpTimeNamespaceStringMap opTimeNssMap; std::vector<OplogEntry> oplogEntries; - ForceSupportsDocLocking support(true); _testConcurrentLogOp( [](const NamespaceString& nss, Mutex* mtx, @@ -361,9 +326,9 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) { auto opTime = _logOpNoopWithMsg(opCtx.get(), mtx, opTimeNssMap, nss); - // In a storage engine that supports doc locking, it is okay for multiple threads to - // maintain uncommitted WUOWs upon returning from logOp() because each thread will - // hold an implicit MODE_IX lock on the oplog collection. + // It is okay for multiple threads to maintain uncommitted WUOWs upon returning from + // logOp() because each thread will hold an implicit MODE_IX lock on the oplog + // collection. barrier->countDownAndWait(); // Revert the last logOp() call and confirm that there are no holes in the diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index c521d3609c4..8354d81f085 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -4815,16 +4815,15 @@ OpTime ReplicationCoordinatorImpl::_recalculateStableOpTime(WithLock lk) { // and secondary nodes due to their distinct behaviors, as described below. // - // On a primary node that supports document level locking, oplog writes may commit out of - // timestamp order, which can lead to the creation of oplog "holes". On a primary the - // all_durable timestamp tracks the newest timestamp T such that no future transactions will - // commit behind T. Since all_durable is a timestamp, however, without a term, we need to - // construct an optime with a proper term. If we are primary, then the all_durable should always - // correspond to a timestamp at or newer than the first write completed by this node as primary, - // since we write down a new oplog entry before allowing writes as a new primary. Thus, it can - // be assigned the current term of this primary. + // On a primary node, oplog writes may commit out of timestamp order, which can lead to the + // creation of oplog "holes". On a primary the all_durable timestamp tracks the newest timestamp + // T such that no future transactions will commit behind T. Since all_durable is a timestamp, + // however, without a term, we need to construct an optime with a proper term. If we are + // primary, then the all_durable should always correspond to a timestamp at or newer than the + // first write completed by this node as primary, since we write down a new oplog entry before + // allowing writes as a new primary. Thus, it can be assigned the current term of this primary. OpTime allDurableOpTime = OpTime::max(); - if (_readWriteAbility->canAcceptNonLocalWrites(lk) && _storage->supportsDocLocking(_service)) { + if (_readWriteAbility->canAcceptNonLocalWrites(lk)) { allDurableOpTime = OpTime(_storage->getAllDurableTimestamp(getServiceContext()), _topCoord->getTerm()); } diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp index 55e0b5c277d..00ebb66e380 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp @@ -948,6 +948,7 @@ TEST_F( auto opCtx = makeOperationContext(); // Nothing satisfied + getStorageInterface()->allDurableTimestamp = time1.getTimestamp(); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(100)); ReplicationCoordinator::StatusAndDuration statusAndDur = @@ -983,6 +984,7 @@ TEST_F( ASSERT_OK(statusAndDur.status); // multiDC satisfied but not majority or multiRack + getStorageInterface()->allDurableTimestamp = time2.getTimestamp(); replCoordSetMyLastAppliedOpTime(time2, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(time2, Date_t() + Seconds(100)); getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2).transitional_ignore(); @@ -4819,6 +4821,7 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) { time_t majorityWriteDate = lastWriteDate; OpTime majorityOpTime = opTime; + getStorageInterface()->allDurableTimestamp = opTime.getTimestamp(); replCoordSetMyLastAppliedOpTime(opTime, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(opTime, Date_t() + Seconds(100)); ASSERT_EQUALS(majorityOpTime, getReplCoord()->getCurrentCommittedSnapshotOpTime()); @@ -5480,6 +5483,7 @@ TEST_F(ReplCoordTest, HostAndPort("node1", 12345)); ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); OpTime time(Timestamp(100, 1), 1); + getStorageInterface()->allDurableTimestamp = time.getTimestamp(); replCoordSetMyLastAppliedOpTime(time, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(time, Date_t() + Seconds(100)); simulateSuccessfulV1Election(); @@ -5596,7 +5600,6 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) { Timestamp stableTimestamp; - getStorageInterface()->supportsDocLockingBool = true; ASSERT_EQUALS(Timestamp::min(), getStorageInterface()->getStableTimestamp()); ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -5609,7 +5612,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) { replCoordAdvanceCommitPoint(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100), false); ASSERT_EQUALS(Timestamp(1, 1), getStorageInterface()->getStableTimestamp()); - // Check that the stable timestamp is not updated if the all-committed timestamp is behind. + // Check that the stable timestamp is not updated if the all durable timestamp is behind. replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(1, 2), Date_t() + Seconds(100)); stableTimestamp = getStorageInterface()->getStableTimestamp(); ASSERT_EQUALS(Timestamp(1, 1), getStorageInterface()->getStableTimestamp()); @@ -5652,7 +5655,6 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorageDisableMajori << "test3:1234"))), HostAndPort("test2", 1234)); - getStorageInterface()->supportsDocLockingBool = true; ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); // Initially the stable timestamp is unset. @@ -5690,7 +5692,6 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) { Timestamp stableTimestamp; long long term = 1; - getStorageInterface()->supportsDocLockingBool = true; getStorageInterface()->allDurableTimestamp = Timestamp(2, 1); // Add three stable optime candidates. @@ -5705,7 +5706,7 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) { stableTimestamp = getStorageInterface()->getStableTimestamp(); ASSERT_EQUALS(Timestamp(2, 1), stableTimestamp); - // Check that the stable timestamp is not updated if the all-committed timestamp is behind. + // Check that the stable timestamp is not updated if the all durable timestamp is behind. replCoordAdvanceCommitPoint(OpTime({2, 2}, term), Date_t() + Seconds(2), false); ASSERT_EQUALS(getReplCoord()->getLastCommittedOpTimeAndWallTime().wallTime, Date_t() + Seconds(2)); @@ -5742,7 +5743,6 @@ TEST_F(StableOpTimeTest, << "test3:1234"))), HostAndPort("test2", 1234)); - getStorageInterface()->supportsDocLockingBool = true; ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); // Initially the stable timestamp and commit point are unset. @@ -5751,8 +5751,8 @@ TEST_F(StableOpTimeTest, // Advance the stable timestamp a bit. In this test we simulate a case where timestamp (1,3) is // getting rolled back and timestamp (1,2) is the rollback common point. Note that when - // EMRC=false, the stable timestamp is always advanced to the newest all-committed/all-durable - // timestamp i.e. it is not required to be behind the majority commit point. + // EMRC=false, the stable timestamp is always advanced to the newest all durable timestamp i.e. + // it is not required to be behind the majority commit point. getStorageInterface()->allDurableTimestamp = Timestamp(1, 3); replCoordSetMyLastAppliedOpTime(OpTime({1, 1}, 1), Date_t() + Seconds(1)); replCoordSetMyLastAppliedOpTime(OpTime({1, 2}, 1), Date_t() + Seconds(2)); @@ -5954,6 +5954,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) { auto opCtx = makeOperationContext(); runSingleNodeElection(opCtx.get()); + getStorageInterface()->allDurableTimestamp = Timestamp(100, 1); replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 1), Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 1), Date_t() + Seconds(100)); @@ -5974,6 +5975,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) { runSingleNodeElection(opCtx.get()); OpTime time(Timestamp(100, 1), 1); + getStorageInterface()->allDurableTimestamp = time.getTimestamp(); replCoordSetMyLastAppliedOpTime(time, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(time, Date_t() + Seconds(100)); @@ -5992,6 +5994,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) { auto opCtx = makeOperationContext(); runSingleNodeElection(opCtx.get()); + getStorageInterface()->allDurableTimestamp = Timestamp(100, 1); replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 1), Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 1), Date_t() + Seconds(100)); OpTime committedOpTime(Timestamp(200, 1), 1); @@ -6023,6 +6026,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) { auto pseudoLogOp = stdx::async(stdx::launch::async, [this, &opTimeToWait]() { // Not guaranteed to be scheduled after waitUntil blocks... + getStorageInterface()->allDurableTimestamp = opTimeToWait.getTimestamp(); replCoordSetMyLastAppliedOpTime(opTimeToWait, Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(opTimeToWait, Date_t() + Seconds(100)); }); @@ -6841,6 +6845,7 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) { ASSERT_EQUALS(OpTime(), getReplCoord()->getCurrentCommittedSnapshotOpTime()); replCoordSetMyLastAppliedOpTime(time2, Date_t() + Seconds(100)); ASSERT_EQUALS(OpTime(), getReplCoord()->getCurrentCommittedSnapshotOpTime()); + getStorageInterface()->allDurableTimestamp = time2.getTimestamp(); replCoordSetMyLastDurableOpTime(time2, Date_t() + Seconds(100)); ASSERT_EQUALS(time2, getReplCoord()->getCurrentCommittedSnapshotOpTime()); } diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h index 0bb9bf0ea58..bb722d04fcb 100644 --- a/src/mongo/db/repl/storage_interface.h +++ b/src/mongo/db/repl/storage_interface.h @@ -423,8 +423,7 @@ public: /** * Returns the all_durable timestamp. All transactions with timestamps earlier than the - * all_durable timestamp are committed. Only storage engines that support document level locking - * must provide an implementation. Other storage engines may provide a no-op implementation. + * all_durable timestamp are committed. * * The all_durable timestamp only includes non-prepared transactions that have been given a * commit_timestamp and prepared transactions that have been given a durable_timestamp. @@ -441,11 +440,6 @@ public: virtual Timestamp getOldestOpenReadTimestamp(ServiceContext* serviceCtx) const = 0; /** - * Returns true if the storage engine supports document level locking. - */ - virtual bool supportsDocLocking(ServiceContext* serviceCtx) const = 0; - - /** * Registers a timestamp with the storage engine so that it can enforce oplog visiblity rules. * orderedCommit - specifies whether the timestamp provided is ordered w.r.t. commits; that is, * all commits with older timestamps have already occurred, and any commits with newer diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 07e1b21ea33..0f9c3aa4969 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -1379,10 +1379,6 @@ boost::optional<Timestamp> StorageInterfaceImpl::getLastStableRecoveryTimestamp( return ret; } -bool StorageInterfaceImpl::supportsDocLocking(ServiceContext* serviceCtx) const { - return serviceCtx->getStorageEngine()->supportsDocLocking(); -} - Timestamp StorageInterfaceImpl::getAllDurableTimestamp(ServiceContext* serviceCtx) const { return serviceCtx->getStorageEngine()->getAllDurableTimestamp(); } diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h index 6d6152369f3..ac5a4497f9b 100644 --- a/src/mongo/db/repl/storage_interface_impl.h +++ b/src/mongo/db/repl/storage_interface_impl.h @@ -177,8 +177,6 @@ public: boost::optional<Timestamp> getRecoveryTimestamp(ServiceContext* serviceCtx) const override; - bool supportsDocLocking(ServiceContext* serviceCtx) const override; - Timestamp getAllDurableTimestamp(ServiceContext* serviceCtx) const override; Timestamp getOldestOpenReadTimestamp(ServiceContext* serviceCtx) const override; diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp index 745ed9b8dc4..d9c01c7602d 100644 --- a/src/mongo/db/repl/storage_interface_mock.cpp +++ b/src/mongo/db/repl/storage_interface_mock.cpp @@ -98,10 +98,6 @@ Timestamp StorageInterfaceMock::getOldestOpenReadTimestamp(ServiceContext* servi return oldestOpenReadTimestamp; } -bool StorageInterfaceMock::supportsDocLocking(ServiceContext* serviceCtx) const { - return supportsDocLockingBool; -} - Status CollectionBulkLoaderMock::init(const std::vector<BSONObj>& secondaryIndexSpecs) { LOGV2_DEBUG(21757, 1, "CollectionBulkLoaderMock::init called"); stats->initCalled = true; diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h index eed01a324af..3c4ec8e9176 100644 --- a/src/mongo/db/repl/storage_interface_mock.h +++ b/src/mongo/db/repl/storage_interface_mock.h @@ -327,8 +327,6 @@ public: Timestamp getOldestOpenReadTimestamp(ServiceContext* serviceCtx) const override; - bool supportsDocLocking(ServiceContext* serviceCtx) const override; - Status isAdminDbValid(OperationContext* opCtx) override { return isAdminDbValidFn(opCtx); }; @@ -416,7 +414,6 @@ public: return Status{ErrorCodes::IllegalOperation, "GetCollectionUUIDFn not implemented."}; }; - bool supportsDocLockingBool = false; Timestamp allDurableTimestamp = Timestamp::min(); Timestamp oldestOpenReadTimestamp = Timestamp::min(); diff --git a/src/mongo/db/s/chunk_move_write_concern_options.cpp b/src/mongo/db/s/chunk_move_write_concern_options.cpp index 036cb5e7638..c41db7ad8ec 100644 --- a/src/mongo/db/s/chunk_move_write_concern_options.cpp +++ b/src/mongo/db/s/chunk_move_write_concern_options.cpp @@ -69,11 +69,7 @@ StatusWith<WriteConcernOptions> ChunkMoveWriteConcernOptions::getEffectiveWriteC OperationContext* opCtx, const MigrationSecondaryThrottleOptions& options) { auto secondaryThrottle = options.getSecondaryThrottle(); if (secondaryThrottle == MigrationSecondaryThrottleOptions::kDefault) { - if (opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()) { - secondaryThrottle = MigrationSecondaryThrottleOptions::kOff; - } else { - secondaryThrottle = MigrationSecondaryThrottleOptions::kOn; - } + secondaryThrottle = MigrationSecondaryThrottleOptions::kOff; } if (secondaryThrottle == MigrationSecondaryThrottleOptions::kOff) { diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp index 4350deff917..deffdfe9731 100644 --- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp @@ -242,11 +242,7 @@ public: Client::initThread("test-insert-thread"); auto innerOpCtx = Client::getCurrent()->makeOperationContext(); - // The ephemeral for test storage engine doesn't support document-level locking, so - // requests with txnNumbers aren't allowed. To get around this, we have to manually set - // up the session state and perform the insert. - initializeOperationSessionInfo( - innerOpCtx.get(), insertBuilder.obj(), true, true, true, true); + initializeOperationSessionInfo(innerOpCtx.get(), insertBuilder.obj(), true, true, true); MongoDOperationContextSession sessionTxnState(innerOpCtx.get()); auto txnParticipant = TransactionParticipant::get(innerOpCtx.get()); txnParticipant.beginOrContinue( diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index 9d9f907aa1b..4e01b46c3b4 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -96,12 +96,6 @@ void setGlobalServiceContext(ServiceContext::UniqueServiceContext&& serviceConte globalServiceContext = serviceContext.release(); } -bool _supportsDocLocking = false; - -bool supportsDocLocking() { - return _supportsDocLocking; -} - ServiceContext::ServiceContext() : _tickSource(std::make_unique<SystemTickSource>()), _fastClockSource(std::make_unique<SystemClockSource>()), diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h index 49cb7a602ba..bad89b6cc58 100644 --- a/src/mongo/db/service_context.h +++ b/src/mongo/db/service_context.h @@ -729,11 +729,4 @@ ServiceContext* getCurrentServiceContext(); */ void setGlobalServiceContext(ServiceContext::UniqueServiceContext&& serviceContext); -/** - * Shortcut for querying the storage engine about whether it supports document-level locking. - * If this call becomes too expensive, we could cache the value somewhere so we don't have to - * fetch the storage engine every time. - */ -bool supportsDocLocking(); - } // namespace mongo diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index b41c891223d..612139a2bb9 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -959,8 +959,7 @@ void execCommandDatabase(OperationContext* opCtx, request.body, command->requiresAuth(), command->attachLogicalSessionsToOpCtx(), - replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet, - opCtx->getServiceContext()->getStorageEngine()->supportsDocLocking()); + replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet); CommandHelpers::evaluateFailCommandFailPoint(opCtx, invocation.get()); diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h index dd1854e0256..6e9b8c4c140 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.h +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h @@ -78,10 +78,6 @@ public: return Status::OK(); } - virtual bool supportsDocLocking() const { - return true; - } - virtual bool supportsDirectoryPerDB() const { return false; } diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h index 727c445bbe2..5c184eef824 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_kv_engine.h @@ -83,10 +83,6 @@ public: virtual Status dropIdent(OperationContext* opCtx, mongo::RecoveryUnit* ru, StringData ident); - virtual bool supportsDocLocking() const { - return true; - } - virtual bool supportsDirectoryPerDB() const { return false; // Not persistant so no Directories } diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp index 5889fb4c813..ec4c5db41ed 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp @@ -85,10 +85,6 @@ public: std::unique_ptr<mongo::RecoveryUnit> newRecoveryUnit() final { return std::make_unique<RecoveryUnit>(&_kvEngine); } - - bool supportsDocLocking() final { - return true; - } }; std::unique_ptr<mongo::RecordStoreHarnessHelper> makeRecordStoreHarnessHelper() { diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp index 7f187993d7a..581918db1e6 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp @@ -103,9 +103,6 @@ public: bool isEphemeral() const override { return false; } - bool supportsDocLocking() const override { - return false; - } bool supportsDirectoryPerDB() const override { return false; } diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h index ff7b14c23cd..01c5630a300 100644 --- a/src/mongo/db/storage/kv/kv_engine.h +++ b/src/mongo/db/storage/kv/kv_engine.h @@ -270,11 +270,6 @@ public: /** * This must not change over the lifetime of the engine. */ - virtual bool supportsDocLocking() const = 0; - - /** - * This must not change over the lifetime of the engine. - */ virtual bool supportsCappedCollections() const { return true; } diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp index e0cb1d86eb8..93b037e4f0c 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp @@ -296,8 +296,6 @@ TEST(KVEngineTestHarness, TemporaryRecordStoreSimple) { TEST(KVEngineTestHarness, AllDurableTimestamp) { std::unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); - if (!engine->supportsDocLocking()) - return; std::unique_ptr<RecordStore> rs; { @@ -454,8 +452,6 @@ TEST(KVEngineTestHarness, PinningOldestWithAnotherSession) { TEST(KVEngineTestHarness, AllDurable) { std::unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); - if (!engine->supportsDocLocking()) - return; std::string ns = "a.b"; std::unique_ptr<RecordStore> rs; diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 351f95e961a..5d8b8db7804 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -196,8 +196,8 @@ public: * all RecordStore specific transaction information, as well as the LockState. Methods that take * an OperationContext may throw a WriteConflictException. * - * This class must be thread-safe for document-level locking storage engines. In addition, for - * storage engines implementing the KVEngine some methods must be thread safe, see DurableCatalog. + * This class must be thread-safe. In addition, for storage engines implementing the KVEngine some + * methods must be thread safe, see DurableCatalog. */ class RecordStore { RecordStore(const RecordStore&) = delete; @@ -471,9 +471,8 @@ public: } /** - * When we write to an oplog, we call this so that if the storage engine - * supports doc locking, it can manage the visibility of oplog entries to ensure - * they are ordered. + * When we write to an oplog, we call this so that that the storage engine can manage the + * visibility of oplog entries to ensure they are ordered. * * Since this is called inside of a WriteUnitOfWork while holding a std::mutex, it is * illegal to acquire any LockManager locks inside of this function. diff --git a/src/mongo/db/storage/record_store_test_capped_delete.cpp b/src/mongo/db/storage/record_store_test_capped_delete.cpp index c83daf8a924..8e7e68ce812 100644 --- a/src/mongo/db/storage/record_store_test_capped_delete.cpp +++ b/src/mongo/db/storage/record_store_test_capped_delete.cpp @@ -48,8 +48,6 @@ using std::unique_ptr; // Insert a record in a store with capped max docs 1, and try to delete it by inserting another. TEST(RecordStoreTestHarness, CappedDeleteRecord) { const auto harness(newRecordStoreHarnessHelper()); - if (!harness->supportsDocLocking()) - return; auto rs(harness->newCappedRecordStore(RecordStoreHarnessHelper::kDefaultCapedSizeBytes, /*cappedMaxDocs*/ 1)); @@ -95,8 +93,6 @@ TEST(RecordStoreTestHarness, CappedDeleteRecord) { // Insert multiple records at once, requiring multiple deletes. TEST(RecordStoreTestHarness, DeleteMultipleRecords) { const auto harness(newRecordStoreHarnessHelper()); - if (!harness->supportsDocLocking()) - return; const int cappedMaxDocs = 10; auto rs(harness->newCappedRecordStore(RecordStoreHarnessHelper::kDefaultCapedSizeBytes, cappedMaxDocs)); diff --git a/src/mongo/db/storage/record_store_test_capped_visibility.cpp b/src/mongo/db/storage/record_store_test_capped_visibility.cpp index a6abd887613..46efb5e7b2b 100644 --- a/src/mongo/db/storage/record_store_test_capped_visibility.cpp +++ b/src/mongo/db/storage/record_store_test_capped_visibility.cpp @@ -55,9 +55,6 @@ RecordId doInsert(unowned_ptr<OperationContext> opCtx, unowned_ptr<RecordStore> TEST(RecordStore_CappedVisibility, EmptyInitialState) { const auto harness = newRecordStoreHarnessHelper(); - if (!harness->supportsDocLocking()) - return; - auto rs = harness->newCappedRecordStore(); auto longLivedClient = harness->serviceContext()->makeClient("longLived"); @@ -107,9 +104,6 @@ TEST(RecordStore_CappedVisibility, EmptyInitialState) { TEST(RecordStore_CappedVisibility, NonEmptyInitialState) { const auto harness = newRecordStoreHarnessHelper(); - if (!harness->supportsDocLocking()) - return; - auto rs = harness->newCappedRecordStore(); auto longLivedClient = harness->serviceContext()->makeClient("longLived"); diff --git a/src/mongo/db/storage/record_store_test_harness.h b/src/mongo/db/storage/record_store_test_harness.h index 5bd3ce6776e..a620cf1aa31 100644 --- a/src/mongo/db/storage/record_store_test_harness.h +++ b/src/mongo/db/storage/record_store_test_harness.h @@ -54,13 +54,6 @@ public: virtual std::unique_ptr<RecordStore> newCappedRecordStore(const std::string& ns, int64_t cappedSizeBytes, int64_t cappedMaxDocs) = 0; - - /** - * Currently this requires that it is possible to have two independent open write operations - * at the same time one the same thread (with separate Clients, OperationContexts, and - * RecoveryUnits). - */ - virtual bool supportsDocLocking() = 0; }; void registerRecordStoreHarnessHelperFactory( diff --git a/src/mongo/db/storage/record_store_test_oplog.cpp b/src/mongo/db/storage/record_store_test_oplog.cpp index 1957ae409cb..34b68dbc7f9 100644 --- a/src/mongo/db/storage/record_store_test_oplog.cpp +++ b/src/mongo/db/storage/record_store_test_oplog.cpp @@ -65,8 +65,6 @@ RecordId _oplogOrderInsertOplog(OperationContext* opCtx, TEST(RecordStoreTestHarness, OplogHack) { std::unique_ptr<RecordStoreHarnessHelper> harnessHelper = newRecordStoreHarnessHelper(); - if (!harnessHelper->supportsDocLocking()) - return; // Use a large enough cappedMaxSize so that the limit is not reached by doing the inserts within // the test itself. @@ -161,8 +159,6 @@ TEST(RecordStoreTestHarness, OplogHack) { TEST(RecordStoreTestHarness, OplogInsertOutOfOrder) { std::unique_ptr<RecordStoreHarnessHelper> harnessHelper = newRecordStoreHarnessHelper(); - if (!harnessHelper->supportsDocLocking()) - return; const int64_t cappedMaxSize = 10 * 1024; // Large enough to not exceed. std::unique_ptr<RecordStore> rs( @@ -188,9 +184,6 @@ TEST(RecordStoreTestHarness, OplogInsertOutOfOrder) { TEST(RecordStoreTestHarness, OplogHackOnNonOplog) { std::unique_ptr<RecordStoreHarnessHelper> harnessHelper = newRecordStoreHarnessHelper(); - if (!harnessHelper->supportsDocLocking()) - return; - std::unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore("local.NOT_oplog.foo")); ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -208,9 +201,6 @@ TEST(RecordStoreTestHarness, OplogHackOnNonOplog) { TEST(RecordStoreTestHarness, OplogOrder) { std::unique_ptr<RecordStoreHarnessHelper> harnessHelper(newRecordStoreHarnessHelper()); - if (!harnessHelper->supportsDocLocking()) - return; - std::unique_ptr<RecordStore> rs( harnessHelper->newCappedRecordStore("local.oplog.rs", 100000, -1)); diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h index b7be1372d36..6ce5eeb3add 100644 --- a/src/mongo/db/storage/storage_engine.h +++ b/src/mongo/db/storage/storage_engine.h @@ -186,15 +186,6 @@ public: virtual std::vector<std::string> listDatabases() const = 0; /** - * Returns whether the storage engine supports its own locking locking below the collection - * level. If the engine returns true, MongoDB will acquire intent locks down to the - * collection level and will assume that the engine will ensure consistency at the level of - * documents. If false, MongoDB will lock the entire collection in Shared/Exclusive mode - * for read/write operations respectively. - */ - virtual bool supportsDocLocking() const = 0; - - /** * Returns whether the storage engine supports capped collections. */ virtual bool supportsCappedCollections() const = 0; @@ -564,8 +555,7 @@ public: /** * Returns the all_durable timestamp. All transactions with timestamps earlier than the - * all_durable timestamp are committed. Only storage engines that support document level locking - * must provide an implementation. Other storage engines may provide a no-op implementation. + * all_durable timestamp are committed. * * The all_durable timestamp is the in-memory no holes point. That does not mean that there are * no holes behind it on disk. The all_durable timestamp also might not correspond with any diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index 1823a83e953..a4590dd850f 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -73,7 +73,6 @@ StorageEngineImpl::StorageEngineImpl(std::unique_ptr<KVEngine> engine, StorageEn _minOfCheckpointAndOldestTimestampListener( TimestampMonitor::TimestampType::kMinOfCheckpointAndOldest, [this](Timestamp timestamp) { _onMinOfCheckpointAndOldestTimestampChanged(timestamp); }), - _supportsDocLocking(_engine->supportsDocLocking()), _supportsCappedCollections(_engine->supportsCappedCollections()) { uassert(28601, "Storage engine does not support --directoryperdb", diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h index a00ff2b2369..926d3900649 100644 --- a/src/mongo/db/storage/storage_engine_impl.h +++ b/src/mongo/db/storage/storage_engine_impl.h @@ -74,10 +74,6 @@ public: virtual std::vector<std::string> listDatabases() const override; - virtual bool supportsDocLocking() const override { - return _supportsDocLocking; - } - virtual bool supportsCappedCollections() const override { return _supportsCappedCollections; } @@ -402,7 +398,6 @@ private: // Listener for min of checkpoint and oldest timestamp changes. TimestampMonitor::TimestampListener _minOfCheckpointAndOldestTimestampListener; - const bool _supportsDocLocking; const bool _supportsCappedCollections; std::unique_ptr<RecordStore> _catalogRecordStore; diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp index 85c1c75cb89..b55fb203aa9 100644 --- a/src/mongo/db/storage/storage_engine_init.cpp +++ b/src/mongo/db/storage/storage_engine_init.cpp @@ -59,8 +59,6 @@ namespace { void createLockFile(ServiceContext* service); } // namespace -extern bool _supportsDocLocking; - LastStorageEngineShutdownState initializeStorageEngine(ServiceContext* service, const StorageEngineInitFlags initFlags) { // This should be set once. @@ -173,8 +171,6 @@ LastStorageEngineShutdownState initializeStorageEngine(ServiceContext* service, guard.dismiss(); - _supportsDocLocking = service->getStorageEngine()->supportsDocLocking(); - if (lockFile && lockFile->createdByUncleanShutdown()) { return LastStorageEngineShutdownState::kUnclean; } else { diff --git a/src/mongo/db/storage/storage_engine_mock.h b/src/mongo/db/storage/storage_engine_mock.h index b1d24776235..a363c11f800 100644 --- a/src/mongo/db/storage/storage_engine_mock.h +++ b/src/mongo/db/storage/storage_engine_mock.h @@ -45,9 +45,6 @@ public: std::vector<std::string> listDatabases() const final { return {}; } - bool supportsDocLocking() const final { - return false; - } bool supportsCappedCollections() const final { return true; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index 476f1cd6ccb..5e688b96d51 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -1891,10 +1891,6 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() { } } -bool WiredTigerKVEngine::supportsDocLocking() const { - return true; -} - bool WiredTigerKVEngine::supportsDirectoryPerDB() const { return true; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h index 09152ec80fe..b07918e9e92 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h @@ -108,8 +108,6 @@ public: void setRecordStoreExtraOptions(const std::string& options); void setSortedDataInterfaceExtraOptions(const std::string& options); - bool supportsDocLocking() const override; - bool supportsDirectoryPerDB() const override; /** diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp index 43448ea85d1..ed8f8b72d36 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp @@ -185,10 +185,6 @@ public: checked_cast<WiredTigerRecoveryUnit*>(_engine->newRecoveryUnit())); } - virtual bool supportsDocLocking() final { - return true; - } - virtual WT_CONNECTION* conn() const { return _engine->getConnection(); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp index b0b61c4bba3..43e6b615cc8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp @@ -181,10 +181,6 @@ public: return std::unique_ptr<RecoveryUnit>(_engine.newRecoveryUnit()); } - virtual bool supportsDocLocking() final { - return true; - } - virtual WT_CONNECTION* conn() { return _engine.getConnection(); } diff --git a/src/mongo/dbtests/catalogtests.cpp b/src/mongo/dbtests/catalogtests.cpp index 80873116042..460e0904e82 100644 --- a/src/mongo/dbtests/catalogtests.cpp +++ b/src/mongo/dbtests/catalogtests.cpp @@ -53,9 +53,6 @@ class ConcurrentCreateCollectionTest { public: void run() { auto serviceContext = getGlobalServiceContext(); - if (!serviceContext->getStorageEngine()->supportsDocLocking()) { - return; - } NamespaceString competingNss("test.competingCollection"); diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp index 2b7607cfcdc..81eee3ef4bc 100644 --- a/src/mongo/s/commands/strategy.cpp +++ b/src/mongo/s/commands/strategy.cpp @@ -355,7 +355,6 @@ void runCommand(OperationContext* opCtx, request.body, command->requiresAuth(), command->attachLogicalSessionsToOpCtx(), - true, true); // TODO SERVER-28756: Change allowTransactionsOnConfigDatabase to true once we fix the bug |