diff options
Diffstat (limited to 'src')
50 files changed, 819 insertions, 469 deletions
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 581cb065ba1..91bd5d63962 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -394,25 +394,29 @@ void CmdFindAndModify::Invocation::explain(OperationContext* opCtx, // Explain calls of the findAndModify command are read-only, but we take write // locks so that the timing information is more accurate. - AutoGetCollection collection(opCtx, nss, MODE_IX); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::OperationType::kWrite), + MODE_IX); uassert(ErrorCodes::NamespaceNotFound, str::stream() << "database " << dbName.toStringForErrorMsg() << " does not exist", - collection.getDb()); + DatabaseHolder::get(opCtx)->getDb(opCtx, nss.dbName())); const ExtensionsCallbackReal extensionsCallback(opCtx, &updateRequest.getNamespaceString()); ParsedUpdate parsedUpdate( - opCtx, &updateRequest, extensionsCallback, collection.getCollection()); + opCtx, &updateRequest, extensionsCallback, collection.getCollectionPtr()); uassertStatusOK(parsedUpdate.parseRequest()); CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) ->checkShardVersionOrThrow(opCtx); - const auto exec = uassertStatusOK( - getExecutorUpdate(opDebug, &collection.getCollection(), &parsedUpdate, verbosity)); + const auto exec = + uassertStatusOK(getExecutorUpdate(opDebug, collection, &parsedUpdate, verbosity)); auto bodyBuilder = result->getBodyBuilder(); Explain::explainStages( - exec.get(), collection.getCollection(), verbosity, BSONObj(), cmdObj, &bodyBuilder); + exec.get(), collection.getCollectionPtr(), verbosity, BSONObj(), cmdObj, &bodyBuilder); } } diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp index 005c545e96e..60e56da7180 100644 --- a/src/mongo/db/commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands.cpp @@ -591,7 +591,7 @@ public: uassertStatusOK(parsedUpdate.parseRequest()); auto exec = uassertStatusOK(getExecutorUpdate( - &CurOp::get(opCtx)->debug(), &collection, &parsedUpdate, verbosity)); + &CurOp::get(opCtx)->debug(), collection, &parsedUpdate, verbosity)); auto bodyBuilder = result->getBodyBuilder(); Explain::explainStages(exec.get(), collection.getCollectionPtr(), diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 4e552e01dbe..72971a44f84 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -271,24 +271,24 @@ bool Helpers::getLast(OperationContext* opCtx, const NamespaceString& nss, BSONO } UpdateResult Helpers::upsert(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& o, bool fromMigrate) { BSONElement e = o["_id"]; verify(e.type()); BSONObj id = e.wrap(); - return upsert(opCtx, nss, id, o, fromMigrate); + return upsert(opCtx, coll, id, o, fromMigrate); } UpdateResult Helpers::upsert(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate) { - OldClientContext context(opCtx, nss); + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setQuery(filter); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod)); @@ -298,18 +298,18 @@ UpdateResult Helpers::upsert(OperationContext* opCtx, } request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::NO_YIELD); - return ::mongo::update(opCtx, context.db(), request); + return ::mongo::update(opCtx, coll, request); } void Helpers::update(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate) { - OldClientContext context(opCtx, nss); + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setQuery(filter); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod)); @@ -318,19 +318,21 @@ void Helpers::update(OperationContext* opCtx, } request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::NO_YIELD); - ::mongo::update(opCtx, context.db(), request); + ::mongo::update(opCtx, coll, request); } -void Helpers::putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj) { - OldClientContext context(opCtx, nss); +void Helpers::putSingleton(OperationContext* opCtx, + ScopedCollectionAcquisition& coll, + BSONObj obj) { + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(obj)); request.setUpsert(); - ::mongo::update(opCtx, context.db(), request); + ::mongo::update(opCtx, coll, request); CurOp::get(opCtx)->done(); } diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h index 90dbb5d88e4..1efdc9daa7c 100644 --- a/src/mongo/db/dbhelpers.h +++ b/src/mongo/db/dbhelpers.h @@ -122,7 +122,9 @@ struct Helpers { * Performs an upsert of "obj" into the collection "ns", with an empty update predicate. * Callers must have "ns" locked. */ - static void putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj); + static void putSingleton(OperationContext* opCtx, + ScopedCollectionAcquisition& coll, + BSONObj obj); /** * Callers are expected to hold the collection lock. @@ -130,7 +132,7 @@ struct Helpers { * o has to have an _id field or will assert */ static UpdateResult upsert(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& o, bool fromMigrate = false); @@ -141,7 +143,7 @@ struct Helpers { * on the same storage snapshot. */ static UpdateResult upsert(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate = false); @@ -153,7 +155,7 @@ struct Helpers { * on the same storage snapshot. */ static void update(OperationContext* opCtx, - const NamespaceString& nss, + ScopedCollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate = false); diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp index 191a66a7b43..8b3a192463c 100644 --- a/src/mongo/db/exec/update_stage.cpp +++ b/src/mongo/db/exec/update_stage.cpp @@ -93,7 +93,7 @@ CollectionUpdateArgs::StoreDocOption getStoreDocMode(const UpdateRequest& update UpdateStage::UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + const ScopedCollectionAcquisition& collection, PlanStage* child) : UpdateStage(expCtx, params, ws, collection) { // We should never reach here if the request is an upsert. @@ -105,16 +105,16 @@ UpdateStage::UpdateStage(ExpressionContext* expCtx, UpdateStage::UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection) - : RequiresMutableCollectionStage(kStageType.rawData(), expCtx, collection), + const ScopedCollectionAcquisition& collection) + : RequiresMutableCollectionStage(kStageType.rawData(), expCtx, collection.getCollectionPtr()), _params(params), _ws(ws), _doc(params.driver->getDocument()), - _cachedShardingCollectionDescription(collection->ns()), + _cachedShardingCollectionDescription(collection.nss()), _idRetrying(WorkingSet::INVALID_ID), _idReturning(WorkingSet::INVALID_ID), _updatedRecordIds(params.request->isMulti() ? new RecordIdSet() : nullptr), - _preWriteFilter(opCtx(), collection->ns()) { + _preWriteFilter(opCtx(), collection.nss()) { // Should the modifiers validate their embedded docs via storage_validation::scanDocument()? // Only user updates should be checked. Any system or replication stuff should pass through. diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h index 5120ffccfe4..9c9bcc483ad 100644 --- a/src/mongo/db/exec/update_stage.h +++ b/src/mongo/db/exec/update_stage.h @@ -95,7 +95,7 @@ public: UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + const ScopedCollectionAcquisition& collection, PlanStage* child); bool isEOF() override; @@ -117,7 +117,7 @@ protected: UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection); + const ScopedCollectionAcquisition& collection); void doSaveStateRequiresCollection() final { _preWriteFilter.saveState(); diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp index 502b68b07a4..a17f9266205 100644 --- a/src/mongo/db/exec/upsert_stage.cpp +++ b/src/mongo/db/exec/upsert_stage.cpp @@ -58,7 +58,7 @@ const FieldRef idFieldRef(idFieldName); UpsertStage::UpsertStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + const ScopedCollectionAcquisition& collection, PlanStage* child) : UpdateStage(expCtx, params, ws, collection) { // We should never create this stage for a non-upsert request. diff --git a/src/mongo/db/exec/upsert_stage.h b/src/mongo/db/exec/upsert_stage.h index d3a1b1671c4..11a5497c125 100644 --- a/src/mongo/db/exec/upsert_stage.h +++ b/src/mongo/db/exec/upsert_stage.h @@ -54,7 +54,7 @@ public: UpsertStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + const ScopedCollectionAcquisition& collection, PlanStage* child); bool isEOF() final; diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp index c462bf91a92..42f05b454c1 100644 --- a/src/mongo/db/index_build_entry_helpers.cpp +++ b/src/mongo/db/index_build_entry_helpers.cpp @@ -33,13 +33,13 @@ #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/index_build_entry_gen.h" #include "mongo/db/catalog/local_oplog_info.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/util/fail_point.h" #include "mongo/util/str.h" @@ -55,27 +55,33 @@ MONGO_FAIL_POINT_DEFINE(hangBeforeGettingIndexBuildEntry); Status upsert(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) { - return writeConflictRetry(opCtx, - "upsertIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), - [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { - str::stream ss; - ss << "Collection not found: " - << NamespaceString::kIndexBuildEntryNamespace.ns(); - return Status(ErrorCodes::NamespaceNotFound, ss); - } - - WriteUnitOfWork wuow(opCtx); - Helpers::upsert(opCtx, - NamespaceString::kIndexBuildEntryNamespace, - indexBuildEntry.toBSON(), - /*fromMigrate=*/false); - wuow.commit(); - return Status::OK(); - }); + return writeConflictRetry( + opCtx, + "upsertIndexBuildEntry", + NamespaceString::kIndexBuildEntryNamespace.ns(), + [&]() -> Status { + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kIndexBuildEntryNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + str::stream ss; + ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); + return Status(ErrorCodes::NamespaceNotFound, ss); + } + + WriteUnitOfWork wuow(opCtx); + Helpers::upsert(opCtx, + collection, + indexBuildEntry.toBSON(), + /*fromMigrate=*/false); + wuow.commit(); + return Status::OK(); + }); } std::pair<const BSONObj, const BSONObj> buildIndexBuildEntryFilterAndUpdate( @@ -110,53 +116,68 @@ std::pair<const BSONObj, const BSONObj> buildIndexBuildEntryFilterAndUpdate( } Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& updateMod) { - return writeConflictRetry(opCtx, - "upsertIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), - [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { - str::stream ss; - ss << "Collection not found: " - << NamespaceString::kIndexBuildEntryNamespace.ns(); - return Status(ErrorCodes::NamespaceNotFound, ss); - } - - WriteUnitOfWork wuow(opCtx); - Helpers::upsert(opCtx, - NamespaceString::kIndexBuildEntryNamespace, - filter, - updateMod, - /*fromMigrate=*/false); - wuow.commit(); - return Status::OK(); - }); + return writeConflictRetry( + opCtx, + "upsertIndexBuildEntry", + NamespaceString::kIndexBuildEntryNamespace.ns(), + [&]() -> Status { + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kIndexBuildEntryNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { + str::stream ss; + ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); + return Status(ErrorCodes::NamespaceNotFound, ss); + } + + WriteUnitOfWork wuow(opCtx); + Helpers::upsert(opCtx, + collection, + filter, + updateMod, + /*fromMigrate=*/false); + wuow.commit(); + return Status::OK(); + }); } Status update(OperationContext* opCtx, const BSONObj& filter, const BSONObj& updateMod) { - return writeConflictRetry(opCtx, - "updateIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), - [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { - str::stream ss; - ss << "Collection not found: " - << NamespaceString::kIndexBuildEntryNamespace.ns(); - return Status(ErrorCodes::NamespaceNotFound, ss); - } - - WriteUnitOfWork wuow(opCtx); - Helpers::update(opCtx, - NamespaceString::kIndexBuildEntryNamespace, - filter, - updateMod, - /*fromMigrate=*/false); - wuow.commit(); - return Status::OK(); - }); + return writeConflictRetry( + opCtx, + "updateIndexBuildEntry", + NamespaceString::kIndexBuildEntryNamespace.ns(), + [&]() -> Status { + ; + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kIndexBuildEntryNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { + str::stream ss; + ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); + return Status(ErrorCodes::NamespaceNotFound, ss); + } + + WriteUnitOfWork wuow(opCtx); + Helpers::update(opCtx, + collection, + filter, + updateMod, + /*fromMigrate=*/false); + wuow.commit(); + return Status::OK(); + }); } } // namespace diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp index b260dbb82e9..805541e77fd 100644 --- a/src/mongo/db/keys_collection_cache_test.cpp +++ b/src/mongo/db/keys_collection_cache_test.cpp @@ -39,6 +39,7 @@ #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/shard_role.h" #include "mongo/db/time_proof_service.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/grid.h" @@ -72,8 +73,11 @@ protected: } void insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - auto updateResult = Helpers::upsert(opCtx, nss, doc); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + auto updateResult = Helpers::upsert(opCtx, collection, doc); ASSERT_EQ(0, updateResult.numDocsModified); } diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp index 707541e4211..5bf2f567ef5 100644 --- a/src/mongo/db/op_observer/op_observer_impl.cpp +++ b/src/mongo/db/op_observer/op_observer_impl.cpp @@ -288,12 +288,16 @@ void writeToImageCollection(OperationContext* opCtx, // stronger lock acquisition is taken on this namespace is during step up to create the // collection. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection imageCollectionRaii( - opCtx, NamespaceString::kConfigImagesNamespace, LockMode::MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(NamespaceString::kConfigImagesNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); auto curOp = CurOp::get(opCtx); const auto existingNs = curOp->getNSS(); - UpdateResult res = - Helpers::upsert(opCtx, NamespaceString::kConfigImagesNamespace, imageEntry.toBSON()); + UpdateResult res = Helpers::upsert(opCtx, collection, imageEntry.toBSON()); { stdx::lock_guard<Client> clientLock(*opCtx->getClient()); curOp->setNS_inlock(existingNs); diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index a48758491ca..e8e04a57fa8 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -56,51 +56,48 @@ namespace mongo { -UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request) { - invariant(db); - +UpdateResult update(OperationContext* opCtx, + ScopedCollectionAcquisition& coll, + const UpdateRequest& request) { // Explain should never use this helper. invariant(!request.explain()); const NamespaceString& nsString = request.getNamespaceString(); invariant(opCtx->lockState()->isCollectionLockedForMode(nsString, MODE_IX)); - CollectionPtr collection; - // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. writeConflictRetry(opCtx, "createCollection", nsString.ns(), [&] { - collection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - if (collection || !request.isUpsert()) { - return; - } - - const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && - !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString); - - if (userInitiatedWritesAndNotPrimary) { - uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown, - str::stream() - << "Not primary while creating collection " - << nsString.toStringForErrorMsg() << " during upsert")); + if (!coll.exists() && request.isUpsert()) { + const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && + !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString); + + if (userInitiatedWritesAndNotPrimary) { + uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown, + str::stream() + << "Not primary while creating collection " + << nsString.toStringForErrorMsg() << " during upsert")); + } + WriteUnitOfWork wuow(opCtx); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &coll); + auto db = DatabaseHolder::get(opCtx)->openDb(opCtx, coll.nss().dbName()); + auto newCollectionPtr = db->createCollection(opCtx, nsString, CollectionOptions()); + invariant(newCollectionPtr); + wuow.commit(); } - WriteUnitOfWork wuow(opCtx); - collection = CollectionPtr(db->createCollection(opCtx, nsString, CollectionOptions())); - invariant(collection); - wuow.commit(); }); - collection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, collection)); + // If this is an upsert, at this point the collection must exist. + invariant(coll.exists() || !request.isUpsert()); // Parse the update, get an executor for it, run the executor, get stats out. const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback, collection); + ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback, coll.getCollectionPtr()); uassertStatusOK(parsedUpdate.parseRequest()); OpDebug* const nullOpDebug = nullptr; auto exec = uassertStatusOK( - getExecutorUpdate(nullOpDebug, &collection, &parsedUpdate, boost::none /* verbosity */)); + getExecutorUpdate(nullOpDebug, coll, &parsedUpdate, boost::none /* verbosity */)); PlanExecutor::ExecState state = PlanExecutor::ADVANCED; BSONObj image; diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h index cc0d1c42027..4e174842166 100644 --- a/src/mongo/db/ops/update.h +++ b/src/mongo/db/ops/update.h @@ -38,8 +38,8 @@ namespace mongo { class CanonicalQuery; -class Database; class OperationContext; +class ScopedCollectionAcquisition; class UpdateDriver; /** @@ -47,6 +47,8 @@ class UpdateDriver; * * Caller must hold the appropriate database locks. */ -UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request); +UpdateResult update(OperationContext* opCtx, + ScopedCollectionAcquisition& coll, + const UpdateRequest& request); } // namespace mongo diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 732b5e7181b..1c3efeaf7cf 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -703,8 +703,14 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, bool upsert, boost::optional<BSONObj>& docFound, const UpdateRequest* updateRequest) { - AutoGetCollection autoColl(opCtx, nsString, MODE_IX); - Database* db = autoColl.ensureDbExists(opCtx); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nsString, AcquisitionPrerequisites::kWrite), + MODE_IX); + Database* db = [&]() { + AutoGetDb autoDb(opCtx, nsString.dbName(), MODE_IX); + return autoDb.ensureDbExists(opCtx); + }(); { stdx::lock_guard<Client> lk(*opCtx->getClient()); @@ -714,43 +720,25 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, assertCanWrite_inlock(opCtx, nsString); - CollectionPtr createdCollection; - const CollectionPtr* collectionPtr = &autoColl.getCollection(); - // TODO SERVER-50983: Create abstraction for creating collection when using // AutoGetCollection Create the collection if it does not exist when performing an upsert // because the update stage does not create its own collection - if (!*collectionPtr && upsert) { - assertCanWrite_inlock(opCtx, nsString); - - createdCollection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - - // If someone else beat us to creating the collection, do nothing - if (!createdCollection) { - uassertStatusOK(userAllowedCreateNS(opCtx, nsString)); - OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE - unsafeCreateCollection(opCtx); - WriteUnitOfWork wuow(opCtx); - CollectionOptions defaultCollectionOptions; - uassertStatusOK(db->userCreateNS(opCtx, nsString, defaultCollectionOptions)); - wuow.commit(); - - createdCollection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - } - - invariant(createdCollection); - createdCollection.makeYieldable(opCtx, - LockedCollectionYieldRestore(opCtx, createdCollection)); - collectionPtr = &createdCollection; - } - const auto& collection = *collectionPtr; - - if (collection && collection->isCapped()) { + if (!collection.exists() && upsert) { + CollectionWriter collectionWriter(opCtx, &collection); + uassertStatusOK(userAllowedCreateNS(opCtx, nsString)); + OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection( + opCtx); + WriteUnitOfWork wuow(opCtx); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &collection); + CollectionOptions defaultCollectionOptions; + uassertStatusOK(db->userCreateNS(opCtx, nsString, defaultCollectionOptions)); + wuow.commit(); + } + + if (collection.exists() && collection.getCollectionPtr()->isCapped()) { uassert( ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Collection '" << collection->ns().toStringForErrorMsg() + str::stream() << "Collection '" << collection.nss().toStringForErrorMsg() << "' is a capped collection. Writes in transactions are not allowed on " "capped collections.", !inTransaction); @@ -758,11 +746,12 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, const ExtensionsCallbackReal extensionsCallback(opCtx, &updateRequest->getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, updateRequest, extensionsCallback, collection); + ParsedUpdate parsedUpdate( + opCtx, updateRequest, extensionsCallback, collection.getCollectionPtr()); uassertStatusOK(parsedUpdate.parseRequest()); const auto exec = uassertStatusOK( - getExecutorUpdate(opDebug, &collection, &parsedUpdate, boost::none /* verbosity */)); + getExecutorUpdate(opDebug, collection, &parsedUpdate, boost::none /* verbosity */)); { stdx::lock_guard<Client> lk(*opCtx->getClient()); @@ -777,8 +766,9 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, PlanSummaryStats summaryStats; auto&& explainer = exec->getPlanExplainer(); explainer.getSummaryStats(&summaryStats); - if (collection) { - CollectionQueryInfo::get(collection).notifyOfQuery(opCtx, collection, summaryStats); + if (collection.exists()) { + CollectionQueryInfo::get(collection.getCollectionPtr()) + .notifyOfQuery(opCtx, collection.getCollectionPtr(), summaryStats); } auto updateResult = exec->getUpdateResult(); write_ops_exec::recordUpdateResultInOpDebug(updateResult, opDebug); @@ -1220,7 +1210,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, }(); auto exec = uassertStatusOK(getExecutorUpdate(&curOp.debug(), - &collection, + collection, &parsedUpdate, boost::none /* verbosity */, std::move(documentCounter))); diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 1d2312c6bd4..b400bea6c69 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -1966,7 +1966,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate( OpDebug* opDebug, - VariantCollectionPtrOrAcquisition coll, + const ScopedCollectionAcquisition& coll, ParsedUpdate* parsedUpdate, boost::optional<ExplainOptions::Verbosity> verbosity, UpdateStageParams::DocumentCounter&& documentCounter) { @@ -1989,7 +1989,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda // If there is no collection and this is an upsert, callers are supposed to create // the collection prior to calling this method. Explain, however, will never do // collection or database creation. - if (!collectionPtr && request->isUpsert()) { + if (!coll.exists() && request->isUpsert()) { invariant(request->explain()); } @@ -2014,7 +2014,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda // should have already enforced upstream that in this case either the upsert flag is false, or // we are an explain. If the collection doesn't exist, we're not an explain, and the upsert flag // is true, we expect the caller to have created the collection already. - if (!collectionPtr) { + if (!coll.exists()) { LOGV2_DEBUG(20929, 2, "Collection does not exist. Using EOF stage", @@ -2106,7 +2106,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda const bool isUpsert = updateStageParams.request->isUpsert(); if (isUpsert) { root = std::make_unique<UpsertStage>( - cq->getExpCtxRaw(), updateStageParams, ws.get(), collectionPtr, root.release()); + cq->getExpCtxRaw(), updateStageParams, ws.get(), coll, root.release()); } else if (parsedUpdate->isEligibleForArbitraryTimeseriesUpdate()) { if (request->isMulti()) { // If this is a multi-update, we need to spool the data before beginning to apply @@ -2123,7 +2123,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda parsedUpdate->releaseResidualExpr()); } else { root = std::make_unique<UpdateStage>( - cq->getExpCtxRaw(), updateStageParams, ws.get(), collectionPtr, root.release()); + cq->getExpCtxRaw(), updateStageParams, ws.get(), coll, root.release()); } if (projection) { @@ -2136,7 +2136,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda return plan_executor_factory::make(std::move(cq), std::move(ws), std::move(root), - coll, + &coll, policy, defaultPlannerOptions, NamespaceString(), diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h index 01a3ba23410..db56a55121d 100644 --- a/src/mongo/db/query/get_executor.h +++ b/src/mongo/db/query/get_executor.h @@ -338,7 +338,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele */ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate( OpDebug* opDebug, - VariantCollectionPtrOrAcquisition collection, + const ScopedCollectionAcquisition& coll, ParsedUpdate* parsedUpdate, boost::optional<ExplainOptions::Verbosity> verbosity, UpdateStageParams::DocumentCounter&& documentCounter = nullptr); diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index 9bb6acfb298..defe5ff324a 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -446,12 +446,12 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWithIdHack( OperationContext* opCtx, - VariantCollectionPtrOrAcquisition coll, + const ScopedCollectionAcquisition& collection, const UpdateStageParams& params, const IndexDescriptor* descriptor, const BSONObj& key, PlanYieldPolicy::YieldPolicy yieldPolicy) { - const auto& collectionPtr = coll.getCollectionPtr(); + const auto& collectionPtr = collection.getCollectionPtr(); invariant(collectionPtr); auto ws = std::make_unique<WorkingSet>(); @@ -462,16 +462,15 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith std::make_unique<IDHackStage>(expCtx.get(), key, ws.get(), collectionPtr, descriptor); const bool isUpsert = params.request->isUpsert(); - auto root = - (isUpsert ? std::make_unique<UpsertStage>( - expCtx.get(), params, ws.get(), collectionPtr, idHackStage.release()) - : std::make_unique<UpdateStage>( - expCtx.get(), params, ws.get(), collectionPtr, idHackStage.release())); + auto root = (isUpsert ? std::make_unique<UpsertStage>( + expCtx.get(), params, ws.get(), collection, idHackStage.release()) + : std::make_unique<UpdateStage>( + expCtx.get(), params, ws.get(), collection, idHackStage.release())); auto executor = plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - coll, + &collection, yieldPolicy, false /* whether owned BSON must be returned */); invariant(executor.getStatus()); diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h index 96bfbdd3da7..5168a2a80c4 100644 --- a/src/mongo/db/query/internal_plans.h +++ b/src/mongo/db/query/internal_plans.h @@ -180,7 +180,7 @@ public: */ static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> updateWithIdHack( OperationContext* opCtx, - VariantCollectionPtrOrAcquisition collection, + const ScopedCollectionAcquisition& collection, const UpdateStageParams& params, const IndexDescriptor* descriptor, const BSONObj& key, diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index 33f241fab53..44b41a5525d 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -150,7 +150,7 @@ Status _applyOps(OperationContext* opCtx, } } - const auto collection = acquireCollection( + auto collection = acquireCollection( opCtx, CollectionAcquisitionRequest(nss, AcquisitionPrerequisites::kPretendUnsharded, @@ -177,7 +177,6 @@ Status _applyOps(OperationContext* opCtx, // application in the future. const bool isDataConsistent = true; return repl::applyOperation_inlock(opCtx, - ctx.db(), collection, ApplierOperation{&entry}, alwaysUpsert, diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 56a084137a3..01137d570b9 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -360,7 +360,13 @@ void writeToImageCollection(OperationContext* opCtx, // stronger lock acquisition is taken on this namespace is during step up to create the // collection. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection autoColl(opCtx, NamespaceString::kConfigImagesNamespace, LockMode::MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(NamespaceString::kConfigImagesNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); repl::ImageEntry imageEntry; imageEntry.set_id(sessionId); imageEntry.setTxnNumber(txnNum); @@ -385,7 +391,7 @@ void writeToImageCollection(OperationContext* opCtx, request.setFromOplogApplication(true); try { // This code path can also be hit by things such as `applyOps` and tenant migrations. - ::mongo::update(opCtx, autoColl.getDb(), request); + ::mongo::update(opCtx, collection, request); } catch (const ExceptionFor<ErrorCodes::DuplicateKey>&) { // We can get a duplicate key when two upserts race on inserting a document. *upsertConfigImage = false; @@ -1385,8 +1391,7 @@ void logOplogConstraintViolation(OperationContext* opCtx, // @return failure status if an update should have happened and the document DNE. // See replset initial sync code. Status applyOperation_inlock(OperationContext* opCtx, - Database* db, - const ScopedCollectionAcquisition& collectionAcquisition, + ScopedCollectionAcquisition& collectionAcquisition, const OplogEntryOrGroupedInserts& opOrGroupedInserts, bool alwaysUpsert, OplogApplication::Mode mode, @@ -1742,7 +1747,7 @@ Status applyOperation_inlock(OperationContext* opCtx, uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); } - UpdateResult res = update(opCtx, db, request); + UpdateResult res = update(opCtx, collectionAcquisition, request); if (res.numMatched == 0 && res.upsertedId.isEmpty()) { LOGV2_ERROR(21257, "No document was updated even though we got a DuplicateKey " @@ -1869,7 +1874,7 @@ Status applyOperation_inlock(OperationContext* opCtx, invariant(documentFound); } - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, collectionAcquisition, request); if (ur.numMatched == 0 && ur.upsertedId.isEmpty()) { if (collection && collection->isCapped() && mode == OplogApplication::Mode::kSecondary) { diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h index 10967e08dbb..de9d7fb3593 100644 --- a/src/mongo/db/repl/oplog.h +++ b/src/mongo/db/repl/oplog.h @@ -242,8 +242,7 @@ void logOplogConstraintViolation(OperationContext* opCtx, * Returns failure status if the op was an update that could not be applied. */ Status applyOperation_inlock(OperationContext* opCtx, - Database* db, - const ScopedCollectionAcquisition& collectionAcquisition, + ScopedCollectionAcquisition& collectionAcquisition, const OplogEntryOrGroupedInserts& opOrGroupedInserts, bool alwaysUpsert, OplogApplication::Mode mode, diff --git a/src/mongo/db/repl/oplog_applier_utils.cpp b/src/mongo/db/repl/oplog_applier_utils.cpp index 1b106173072..b1364cade01 100644 --- a/src/mongo/db/repl/oplog_applier_utils.cpp +++ b/src/mongo/db/repl/oplog_applier_utils.cpp @@ -466,7 +466,6 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon( bool shouldAlwaysUpsert = !oplogApplicationEnforcesSteadyStateConstraints && oplogApplicationMode == OplogApplication::Mode::kSecondary; Status status = applyOperation_inlock(opCtx, - db, *coll, entryOrGroupedInserts, shouldAlwaysUpsert, diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index c9836560a8c..1863add879c 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -425,11 +425,18 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati // Permit writing to the oplog before we step up to primary. AllowNonLocalWritesBlock allowNonLocalWrites(opCtx); Lock::GlobalWrite globalWrite(opCtx); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kSystemReplSetNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); { // Writes to 'local.system.replset' must be untimestamped. WriteUnitOfWork wuow(opCtx); - Helpers::putSingleton( - opCtx, NamespaceString::kSystemReplSetNamespace, config); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); } { @@ -592,8 +599,15 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(Operati { // Writes to 'local.system.replset' must be untimestamped. WriteUnitOfWork wuow(opCtx); - AutoGetCollection coll(opCtx, NamespaceString::kSystemReplSetNamespace, MODE_X); - Helpers::putSingleton(opCtx, NamespaceString::kSystemReplSetNamespace, config); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kSystemReplSetNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); } @@ -622,7 +636,7 @@ Status ReplicationCoordinatorExternalStateImpl::replaceLocalConfigDocument( writeConflictRetry( opCtx, "replace replica set config", NamespaceString::kSystemReplSetNamespace.ns(), [&] { WriteUnitOfWork wuow(opCtx); - const auto coll = + auto coll = acquireCollection(opCtx, CollectionAcquisitionRequest( NamespaceString(NamespaceString::kSystemReplSetNamespace), @@ -631,7 +645,7 @@ Status ReplicationCoordinatorExternalStateImpl::replaceLocalConfigDocument( AcquisitionPrerequisites::kWrite), MODE_X); Helpers::emptyCollection(opCtx, coll); - Helpers::putSingleton(opCtx, NamespaceString::kSystemReplSetNamespace, config); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); }); return Status::OK(); @@ -652,21 +666,26 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection( // Make sure there's always a last vote document. try { - writeConflictRetry( - opCtx, - "create initial replica set lastVote", - NamespaceString::kLastVoteNamespace.toString(), - [opCtx] { - AutoGetCollection coll(opCtx, NamespaceString::kLastVoteNamespace, MODE_X); - BSONObj result; - bool exists = - Helpers::getSingleton(opCtx, NamespaceString::kLastVoteNamespace, result); - if (!exists) { - LastVote lastVote{OpTime::kInitialTerm, -1}; - Helpers::putSingleton( - opCtx, NamespaceString::kLastVoteNamespace, lastVote.toBSON()); - } - }); + writeConflictRetry(opCtx, + "create initial replica set lastVote", + NamespaceString::kLastVoteNamespace.toString(), + [opCtx] { + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kLastVoteNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + + BSONObj result; + bool exists = Helpers::getSingleton(opCtx, coll.nss(), result); + if (!exists) { + LastVote lastVote{OpTime::kInitialTerm, -1}; + Helpers::putSingleton(opCtx, coll, lastVote.toBSON()); + } + }); } catch (const DBException& ex) { return ex.toStatus(); } @@ -730,7 +749,14 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument( ShouldNotConflictWithSecondaryBatchApplicationBlock shouldNotConflictBlock( opCtx->lockState()); - AutoGetCollection coll(opCtx, NamespaceString::kLastVoteNamespace, MODE_IX); + auto coll = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kLastVoteNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); WriteUnitOfWork wunit(opCtx); // We only want to replace the last vote document if the new last vote document @@ -747,7 +773,7 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument( return oldLastVoteDoc.getStatus(); } if (lastVote.getTerm() > oldLastVoteDoc.getValue().getTerm()) { - Helpers::putSingleton(opCtx, NamespaceString::kLastVoteNamespace, lastVoteObj); + Helpers::putSingleton(opCtx, coll, lastVoteObj); } wunit.commit(); return Status::OK(); diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index 171c500a2fd..701f1de097d 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -531,13 +531,21 @@ void RollbackImpl::_restoreTxnsTableEntryFromRetryableWrites(OperationContext* o const auto nss = NamespaceString::kSessionTransactionsTableNamespace; writeConflictRetry(opCtx, "updateSessionTransactionsTableInRollback", nss.ns(), [&] { opCtx->recoveryUnit()->allowOneUntimestampedWrite(); - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); auto filter = BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON()); UnreplicatedWritesBlock uwb(opCtx); // Perform an untimestamped write so that it will not be rolled back on recovering // to the 'stableTimestamp' if we were to crash. This is safe because this update is // meant to be consistent with the 'stableTimestamp' and not the common point. - Helpers::upsert(opCtx, nss, filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false); + Helpers::upsert( + opCtx, collection, filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false); }); } // Take a stable checkpoint so that writes to the 'config.transactions' table are diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index ba3730f9851..3a398b1b41e 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -1778,7 +1778,7 @@ void syncFixUp(OperationContext* opCtx, request.setGod(); request.setUpsert(); - update(opCtx, ctx.db(), request); + update(opCtx, collection, request); } } catch (const DBException& e) { LOGV2(21713, diff --git a/src/mongo/db/repl/shard_merge_recipient_service.cpp b/src/mongo/db/repl/shard_merge_recipient_service.cpp index 718f9172629..c1d3a40bc50 100644 --- a/src/mongo/db/repl/shard_merge_recipient_service.cpp +++ b/src/mongo/db/repl/shard_merge_recipient_service.cpp @@ -72,6 +72,7 @@ #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/wiredtiger/wiredtiger_import.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/vector_clock_mutable.h" @@ -2017,11 +2018,17 @@ void ShardMergeRecipientService::Instance::_writeStateDoc( OpType opType, const RegisterChangeCbk& registerChange) { const auto& nss = NamespaceString::kShardMergeRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::NamespaceNotFound, str::stream() << nss.toStringForErrorMsg() << " does not exist", - collection); + collection.exists()); writeConflictRetry(opCtx, "writeShardMergeRecipientStateDoc", nss.ns(), [&]() { WriteUnitOfWork wunit(opCtx); @@ -2032,7 +2039,7 @@ void ShardMergeRecipientService::Instance::_writeStateDoc( const auto filter = BSON(TenantMigrationRecipientDocument::kIdFieldName << stateDoc.getId()); auto updateResult = Helpers::upsert(opCtx, - nss, + collection, filter, stateDoc.toBSON(), /*fromMigrate=*/false); diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 306ee030f4d..8cfe6307ed2 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -984,22 +984,24 @@ Status _updateWithQuery(OperationContext* opCtx, auto& nss = request.getNamespaceString(); return writeConflictRetry(opCtx, "_updateWithQuery", nss.ns(), [&] { - AutoGetCollection autoColl(opCtx, nss, MODE_IX); - auto collectionResult = getCollection( - autoColl, - nss, - str::stream() << "Unable to update documents in " << nss.toStringForErrorMsg() - << " using query " << request.getQuery()); - if (!collectionResult.isOK()) { - return collectionResult.getStatus(); + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection [" << nss.toString() << "] not found. " + << "Unable to update documents in " << nss.toStringForErrorMsg() + << " using query " << request.getQuery()}; } - const auto& collection = *collectionResult.getValue(); // ParsedUpdate needs to be inside the write conflict retry loop because it may create a // CanonicalQuery whose ownership will be transferred to the plan executor in // getExecutorUpdate(). const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback, collection); + ParsedUpdate parsedUpdate( + opCtx, &request, extensionsCallback, collection.getCollectionPtr()); auto parsedUpdateStatus = parsedUpdate.parseRequest(); if (!parsedUpdateStatus.isOK()) { return parsedUpdateStatus; @@ -1012,7 +1014,7 @@ Status _updateWithQuery(OperationContext* opCtx, } auto planExecutorResult = mongo::getExecutorUpdate( - nullptr, &collection, &parsedUpdate, boost::none /* verbosity */); + nullptr, collection, &parsedUpdate, boost::none /* verbosity */); if (!planExecutorResult.isOK()) { return planExecutorResult.getStatus(); } @@ -1047,17 +1049,21 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, auto query = queryResult.getValue(); return writeConflictRetry(opCtx, "StorageInterfaceImpl::upsertById", nsOrUUID.toString(), [&] { - AutoGetCollection autoColl(opCtx, nsOrUUID, MODE_IX); - auto collectionResult = getCollection(autoColl, nsOrUUID, "Unable to update document."); - if (!collectionResult.isOK()) { - return collectionResult.getStatus(); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nsOrUUID, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() << "Collection [" << nsOrUUID.toString() << "] not found. " + << "Unable to update document."}; } - const auto& collection = *collectionResult.getValue(); // We can create an UpdateRequest now that the collection's namespace has been resolved, in // the event it was specified as a UUID. auto request = UpdateRequest(); - request.setNamespaceString(collection->ns()); + request.setNamespaceString(collection.nss()); request.setQuery(query); request.setUpdateModification( write_ops::UpdateModification::parseFromClassicUpdate(update)); @@ -1069,7 +1075,8 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, // ParsedUpdate needs to be inside the write conflict retry loop because it contains // the UpdateDriver whose state may be modified while we are applying the update. const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback, collection); + ParsedUpdate parsedUpdate( + opCtx, &request, extensionsCallback, collection.getCollectionPtr()); auto parsedUpdateStatus = parsedUpdate.parseRequest(); if (!parsedUpdateStatus.isOK()) { return parsedUpdateStatus; @@ -1077,7 +1084,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, // We're using the ID hack to perform the update so we have to disallow collections // without an _id index. - auto descriptor = collection->getIndexCatalog()->findIdIndex(opCtx); + auto descriptor = collection.getCollectionPtr()->getIndexCatalog()->findIdIndex(opCtx); if (!descriptor) { return Status(ErrorCodes::IndexNotFound, "Unable to update document in a collection without an _id index."); @@ -1086,7 +1093,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, UpdateStageParams updateStageParams( parsedUpdate.getRequest(), parsedUpdate.getDriver(), nullptr); auto planExecutor = InternalPlanner::updateWithIdHack(opCtx, - &collection, + collection, updateStageParams, descriptor, idKey.wrap(""), diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp index aba18b5ecd2..f6449a77f41 100644 --- a/src/mongo/db/repl/storage_timestamp_test.cpp +++ b/src/mongo/db/repl/storage_timestamp_test.cpp @@ -79,6 +79,7 @@ #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/storage/storage_engine_impl.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" @@ -2601,8 +2602,12 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary) NamespaceString::createNamespaceString_forTest("unittests.timestampIndexBuilds"); create(nss); - AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X); - CollectionWriter collection(_opCtx, autoColl); + auto collectionAcquisition = acquireCollection( + _opCtx, + CollectionAcquisitionRequest::fromOpCtx(_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_X); + + CollectionWriter collection(_opCtx, &collectionAcquisition); // Indexing of parallel arrays is not allowed, so these are deemed "bad". const auto badDoc1 = BSON("_id" << 0 << "a" << BSON_ARRAY(0 << 1) << "b" << BSON_ARRAY(0 << 1)); @@ -2710,12 +2715,15 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary) // Update one documents to be valid, and delete the other. These modifications are written // to the side writes table and must be drained. - Helpers::upsert(_opCtx, collection->ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1)); + Helpers::upsert(_opCtx, collectionAcquisition, BSON("_id" << 0 << "a" << 1 << "b" << 1)); { RecordId badRecord = Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1)); WriteUnitOfWork wuow(_opCtx); - collection_internal::deleteDocument( - _opCtx, *autoColl, kUninitializedStmtId, badRecord, nullptr); + collection_internal::deleteDocument(_opCtx, + collectionAcquisition.getCollectionPtr(), + kUninitializedStmtId, + badRecord, + nullptr); wuow.commit(); } diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp index c432b856d4e..997e920370b 100644 --- a/src/mongo/db/repl/tenant_migration_donor_service.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp @@ -34,6 +34,7 @@ #include "mongo/client/connection_string.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/config.h" +#include "mongo/db//shard_role.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" #include "mongo/db/commands/tenant_migration_recipient_cmds_gen.h" @@ -528,7 +529,14 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_insertState pauseTenantMigrationBeforeInsertingDonorStateDoc.pauseWhileSet(opCtx); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( opCtx, "TenantMigrationDonorInsertStateDoc", _stateDocumentsNS.ns(), [&] { @@ -539,7 +547,7 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_insertState return BSON("$setOnInsert" << _stateDoc.toBSON()); }(); auto updateResult = Helpers::upsert( - opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false); + opCtx, collection, filter, updateMod, /*fromMigrate=*/false); // '$setOnInsert' update operator can never modify an existing on-disk state // doc. @@ -698,7 +706,14 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable( pauseTenantMigrationDonorBeforeMarkingStateGarbageCollectable.pauseWhileSet(opCtx); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( opCtx, @@ -712,7 +727,7 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable( return _stateDoc.toBSON(); }(); auto updateResult = Helpers::upsert( - opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false); + opCtx, collection, filter, updateMod, /*fromMigrate=*/false); invariant(updateResult.numDocsModified == 1); }); diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp index 22f096f396c..2ab83a7b44b 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp @@ -56,7 +56,13 @@ namespace tenantMigrationRecipientEntryHelpers { Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDocument& stateDoc) { const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Sanity check uassert(ErrorCodes::PrimarySteppedDown, @@ -76,7 +82,7 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc << BSON("$exists" << false)); const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON()); auto updateResult = - Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false); + Helpers::upsert(opCtx, collection, filter, updateMod, /*fromMigrate=*/false); // '$setOnInsert' update operator can no way modify the existing on-disk state doc. invariant(!updateResult.numDocsModified); @@ -93,9 +99,15 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDocument& stateDoc) { const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); - if (!collection) { + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, str::stream() << nss.toStringForErrorMsg() << " does not exist"); } @@ -103,7 +115,7 @@ Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc return writeConflictRetry( opCtx, "updateTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> Status { auto updateResult = - Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false); + Helpers::upsert(opCtx, collection, stateDoc.toBSON(), /*fromMigrate=*/false); if (updateResult.numMatched == 0) { return {ErrorCodes::NoSuchKey, str::stream() diff --git a/src/mongo/db/repl/tenant_migration_util.cpp b/src/mongo/db/repl/tenant_migration_util.cpp index 9f1109b7b72..511a16d3405 100644 --- a/src/mongo/db/repl/tenant_migration_util.cpp +++ b/src/mongo/db/repl/tenant_migration_util.cpp @@ -49,6 +49,7 @@ #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/shard_role.h" #include "mongo/util/cancellation.h" #include "mongo/util/future_util.h" @@ -129,7 +130,13 @@ repl::OpTime storeExternalClusterTimeKeyDocs(std::vector<ExternalKeysCollectionD pauseTenantMigrationBeforeStoringExternalClusterTimeKeyDocs.pauseWhileSet(opCtx); for (auto& keyDoc : keyDocs) { - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry(opCtx, "CloneExternalKeyDocs", nss.ns(), [&] { // Note that each external key's _id is generated by the migration, so this upsert can @@ -139,7 +146,7 @@ repl::OpTime storeExternalClusterTimeKeyDocs(std::vector<ExternalKeysCollectionD const auto updateMod = keyDoc.toBSON(); Helpers::upsert(opCtx, - nss, + collection, filter, updateMod, /*fromMigrate=*/false); @@ -602,7 +609,14 @@ ExecutorFuture<void> markExternalKeysAsGarbageCollectable( opCtx); const auto& nss = NamespaceString::kExternalKeysCollectionNamespace; - AutoGetCollection coll(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( opCtx, "TenantMigrationMarkExternalKeysAsGarbageCollectable", nss.ns(), [&] { @@ -623,7 +637,7 @@ ExecutorFuture<void> markExternalKeysAsGarbageCollectable( // may fail to match any keys if they were previously marked garbage // collectable and deleted by the TTL monitor. Because of this we can't // assert on the update result's numMatched or numDocsModified. - update(opCtx, coll.getDb(), request); + update(opCtx, collection, request); }); }); }) diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp index 7506a9b2122..4fef613e8e7 100644 --- a/src/mongo/db/repl/transaction_oplog_application.cpp +++ b/src/mongo/db/repl/transaction_oplog_application.cpp @@ -124,20 +124,15 @@ Status _applyOperationsForTransaction(OperationContext* opCtx, // Presently, it is not allowed to run a prepared transaction with a command // inside. TODO(SERVER-46105) invariant(!op.isCommand()); - const auto coll = acquireCollection( + auto coll = acquireCollection( opCtx, CollectionAcquisitionRequest(op.getNss(), AcquisitionPrerequisites::kPretendUnsharded, repl::ReadConcernArgs::get(opCtx), AcquisitionPrerequisites::kWrite), MODE_IX); - const auto db = [opCtx, &coll]() { - AutoGetDb autoDb(opCtx, coll.nss().dbName(), MODE_IX); - return autoDb.getDb(); - }(); const bool isDataConsistent = true; auto status = repl::applyOperation_inlock(opCtx, - db, coll, ApplierOperation{&op}, false /*alwaysUpsert*/, diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 9210fc9cc2c..c81b3bed882 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -1731,11 +1731,17 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const BSONObjIterator i(xfer["reload"].Obj()); while (i.more()) { totalDocs++; - AutoGetCollection autoColl(opCtx, _nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(_nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::ConflictingOperationInProgress, str::stream() << "Collection " << _nss.toStringForErrorMsg() << " was dropped in the middle of the migration", - autoColl.getCollection()); + collection.exists()); BSONObj updatedDoc = i.next().Obj(); @@ -1765,7 +1771,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const // We are in write lock here, so sure we aren't killing writeConflictRetry(opCtx, "transferModsUpdates", _nss.ns(), [&] { - auto res = Helpers::upsert(opCtx, _nss, updatedDoc, true); + auto res = Helpers::upsert(opCtx, collection, updatedDoc, true); if (!res.upsertedId.isEmpty()) { changeInOrphans++; } diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp index 33016bf5bf6..995a1db5eb9 100644 --- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp +++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp @@ -43,6 +43,7 @@ #include "mongo/db/s/sharding_write_router.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" @@ -277,8 +278,11 @@ protected: const BSONObj& filter, const BSONObj& update, const ReshardingEnv& env) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - Helpers::update(opCtx, nss, filter, update); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + Helpers::update(opCtx, coll, filter, update); } void deleteDoc(OperationContext* opCtx, diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp index 09aa5aa0845..e3a82f24d7f 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp @@ -997,15 +997,21 @@ void ReshardingDonorService::DonorStateMachine::_updateDonorDocument( const auto& nss = NamespaceString::kDonorReshardingOperationsNamespace; writeConflictRetry(opCtx.get(), "DonorStateMachine::_updateDonorDocument", nss.toString(), [&] { - AutoGetCollection coll(opCtx.get(), nss, MODE_X); + auto coll = acquireCollection( + opCtx.get(), + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_X); uassert(ErrorCodes::NamespaceNotFound, str::stream() << nss.toStringForErrorMsg() << " does not exist", - coll); + coll.exists()); WriteUnitOfWork wuow(opCtx.get()); Helpers::update(opCtx.get(), - nss, + coll, BSON(ReshardingDonorDocument::kReshardingUUIDFieldName << _metadata.getReshardingUUID()), BSON("$set" << BSON(ReshardingDonorDocument::kMutableStateFieldName diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp index c25c379139a..154c6b457ba 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp @@ -152,7 +152,7 @@ Status ReshardingOplogApplicationRules::applyOperation( const auto outputDb = AutoGetDb(opCtx, _outputNss.dbName(), MODE_IX); - const auto outputColl = + auto outputColl = opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { return acquireCollection( opCtx, @@ -167,7 +167,7 @@ Status ReshardingOplogApplicationRules::applyOperation( << _outputNss.toStringForErrorMsg(), outputColl.exists()); - const auto stashColl = + auto stashColl = opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { return acquireCollection( opCtx, @@ -185,20 +185,12 @@ Status ReshardingOplogApplicationRules::applyOperation( auto opType = op.getOpType(); switch (opType) { case repl::OpTypeEnum::kInsert: - _applyInsert_inlock(opCtx, - outputDb.getDb(), - outputColl.getCollectionPtr(), - stashColl.getCollectionPtr(), - op); + _applyInsert_inlock(opCtx, outputColl, stashColl, op); _applierMetrics->onInsertApplied(); break; case repl::OpTypeEnum::kUpdate: - _applyUpdate_inlock(opCtx, - outputDb.getDb(), - outputColl.getCollectionPtr(), - stashColl.getCollectionPtr(), - op); + _applyUpdate_inlock(opCtx, outputColl, stashColl, op); _applierMetrics->onUpdateApplied(); break; case repl::OpTypeEnum::kDelete: { @@ -241,9 +233,8 @@ Status ReshardingOplogApplicationRules::applyOperation( } void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + ScopedCollectionAcquisition& outputColl, + ScopedCollectionAcquisition& stashColl, const repl::OplogEntry& op) const { /** * The rules to apply ordinary insert operations are as follows: @@ -276,7 +267,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // First, query the conflict stash collection using [op _id] as the query. If a doc exists, // apply rule #1 and run a replacement update on the stash collection. - auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery); + auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery); if (!stashCollDoc.isEmpty()) { auto request = UpdateRequest(); request.setNamespaceString(_myStashNss); @@ -285,7 +276,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, stashColl, request); invariant(ur.numMatched != 0); _applierMetrics->onWriteToStashCollections(); @@ -296,11 +287,12 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // Query the output collection for a doc with _id == [op _id]. If a doc does not exist, apply // rule #2 and insert this doc into the output collection. BSONObj outputCollDoc; - auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc); + auto foundDoc = Helpers::findByIdAndNoopUpdate( + opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc); if (!foundDoc) { uassertStatusOK(collection_internal::insertDocument(opCtx, - outputColl, + outputColl.getCollectionPtr(), InsertStatement(oField), nullptr /* OpDebug */, false /* fromMigrate */)); @@ -323,7 +315,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, outputColl, request); invariant(ur.numMatched != 0); return; @@ -331,16 +323,18 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // The doc does not belong to '_donorShardId' under the original shard key, so apply rule #4 // and insert the contents of 'op' to the stash collection. - uassertStatusOK(collection_internal::insertDocument( - opCtx, stashColl, InsertStatement(oField), nullptr /* OpDebug */, false /* fromMigrate */)); + uassertStatusOK(collection_internal::insertDocument(opCtx, + stashColl.getCollectionPtr(), + InsertStatement(oField), + nullptr /* OpDebug */, + false /* fromMigrate */)); _applierMetrics->onWriteToStashCollections(); } void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + ScopedCollectionAcquisition& outputColl, + ScopedCollectionAcquisition& stashColl, const repl::OplogEntry& op) const { /** * The rules to apply ordinary update operations are as follows: @@ -375,7 +369,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt // First, query the conflict stash collection using [op _id] as the query. If a doc exists, // apply rule #1 and update the doc from the stash collection. - auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery); + auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery); if (!stashCollDoc.isEmpty()) { auto request = UpdateRequest(); request.setNamespaceString(_myStashNss); @@ -383,7 +377,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt request.setUpdateModification(std::move(updateMod)); request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, stashColl, request); invariant(ur.numMatched != 0); @@ -394,7 +388,8 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt // Query the output collection for a doc with _id == [op _id]. BSONObj outputCollDoc; - auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc); + auto foundDoc = Helpers::findByIdAndNoopUpdate( + opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc); if (!foundDoc || !_sourceChunkMgr.keyBelongsToShard( @@ -416,7 +411,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt request.setUpdateModification(std::move(updateMod)); request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, outputColl, request); invariant(ur.numMatched != 0); } diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h index 4a1d16f66ca..5d6c052ed0a 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_application.h +++ b/src/mongo/db/s/resharding/resharding_oplog_application.h @@ -79,16 +79,14 @@ public: private: // Applies an insert operation void _applyInsert_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + ScopedCollectionAcquisition& outputColl, + ScopedCollectionAcquisition& stashColl, const repl::OplogEntry& op) const; // Applies an update operation void _applyUpdate_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + ScopedCollectionAcquisition& outputColl, + ScopedCollectionAcquisition& stashColl, const repl::OplogEntry& op) const; // Applies a delete operation diff --git a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp index 3f088911d2b..b83b62272a9 100644 --- a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp +++ b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp @@ -57,8 +57,8 @@ void deleteShardingIndexCatalogEntries(OperationContext* opCtx, } -const ScopedCollectionAcquisition& getAcquisitionForNss( - const std::vector<ScopedCollectionAcquisition>& acquisitions, const NamespaceString& nss) { +ScopedCollectionAcquisition& getAcquisitionForNss( + std::vector<ScopedCollectionAcquisition>& acquisitions, const NamespaceString& nss) { auto it = std::find_if(acquisitions.begin(), acquisitions.end(), [&nss](auto& acquisition) { return acquisition.nss() == nss; }); @@ -80,7 +80,7 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); AutoGetCollection fromToColl( opCtx, fromNss, MODE_IX, AutoGetCollection::Options{}.secondaryNssOrUUIDs({toNss})); - const auto acquisitions = acquireCollections( + auto acquisitions = acquireCollections( opCtx, {CollectionAcquisitionRequest( NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), @@ -184,11 +184,24 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, opCtx, "AddIndexCatalogEntry", NamespaceString::kShardIndexCatalogNamespace.ns(), [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, userCollectionNss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = getAcquisitionForNss( + acquisitions, NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = + getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace); { // First get the document to check the index version if the document already exists @@ -198,7 +211,7 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, << ShardAuthoritativeCollectionType::kUuidFieldName << collectionUUID); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("AddIndexCatalogEntry"), collectionDoc); @@ -225,18 +238,16 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, << ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); BSONObjBuilder builder(indexCatalogEntry.toBSON()); auto idStr = format(FMT_STRING("{}_{}"), collectionUUID.toString(), name); builder.append("_id", idStr); uassertStatusOK(collection_internal::insertDocument(opCtx, - idxColl.getCollection(), + idxColl.getCollectionPtr(), InsertStatement{builder.obj()}, nullptr, false)); @@ -245,7 +256,7 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, userCollectionNss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogInsertEntry(indexCatalogEntry).toBSON()); wunit.commit(); }); @@ -263,11 +274,25 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = getAcquisitionForNss( + acquisitions, NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = + getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace); + { // First get the document to check the index version if the document already exists const auto query = @@ -275,7 +300,7 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("RemoveIndexCatalogEntry"), collectionDoc); @@ -301,18 +326,9 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, << ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - const auto idxColl = - acquireCollection(opCtx, - CollectionAcquisitionRequest( - NamespaceString(NamespaceString::kShardIndexCatalogNamespace), - PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, - repl::ReadConcernArgs::get(opCtx), - AcquisitionPrerequisites::kWrite), - MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); mongo::deleteObjects(opCtx, @@ -343,18 +359,32 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = getAcquisitionForNss( + acquisitions, NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = + getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace); + { const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("ReplaceIndexCatalogEntry"), collectionDoc); @@ -383,17 +413,9 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx, << ShardAuthoritativeCollectionType::kIndexVersionFieldName << indexVersion)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - const auto idxColl = - acquireCollection(opCtx, - CollectionAcquisitionRequest( - NamespaceString(NamespaceString::kShardIndexCatalogNamespace), - PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, - repl::ReadConcernArgs::get(opCtx), - AcquisitionPrerequisites::kWrite), - MODE_IX); { // Clear old indexes. repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); @@ -435,7 +457,7 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace WriteUnitOfWork wunit(opCtx); Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); Lock::CollectionLock collLock(opCtx, nss, MODE_IX); - const auto acquisitions = acquireCollections( + auto acquisitions = acquireCollections( opCtx, {CollectionAcquisitionRequest( NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), @@ -474,9 +496,6 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace mongo::deleteObjects(opCtx, collsColl, query, true); } - // AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, - // MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); deleteShardingIndexCatalogEntries(opCtx, idxColl, *collectionUUID); @@ -501,7 +520,7 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - const auto acquisitions = acquireCollections( + auto acquisitions = acquireCollections( opCtx, {CollectionAcquisitionRequest( NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 2573113c24b..dc385b0449c 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -55,6 +55,7 @@ #include "mongo/db/s/shard_server_catalog_cache_loader.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" #include "mongo/db/vector_clock_metadata_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/task_executor_pool.h" @@ -506,8 +507,15 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( write_ops::UpdateModification::parseFromClassicUpdate(updateObj)); try { - AutoGetCollection autoColl(opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IX); - auto result = update(opCtx, autoColl.ensureDbExists(opCtx), updateReq); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kServerConfigurationNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + auto result = update(opCtx, collection, updateReq); if (result.numMatched == 0) { LOGV2_WARNING(22076, "Failed to update config server connection string of shard identity " diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index 93d1b3cfd3d..70d037ef865 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -47,6 +47,7 @@ #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/shard_role.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/write_concern.h" #include "mongo/db/write_concern_options.h" @@ -145,38 +146,43 @@ Status modifyRecoveryDocument(OperationContext* opCtx, RecoveryDocument::ChangeType change, const WriteConcernOptions& writeConcern) { try { - // Use boost::optional so we can release the locks early - boost::optional<AutoGetDb> autoGetDb; - autoGetDb.emplace(opCtx, NamespaceString::kServerConfigurationNamespace.dbName(), MODE_X); - - const auto configOpTime = [&]() { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - const auto vcConfigTimeTs = vcTime.configTime().asTimestamp(); - return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm); - }(); - - BSONObj updateObj = RecoveryDocument::createChangeObj(configOpTime, change); - - LOGV2_DEBUG(22083, - 1, - "Changing sharding recovery document {update}", - "Changing sharding recovery document", - "update"_attr = redact(updateObj)); - - auto updateReq = UpdateRequest(); - updateReq.setNamespaceString(NamespaceString::kServerConfigurationNamespace); - updateReq.setQuery(RecoveryDocument::getQuery()); - updateReq.setUpdateModification( - write_ops::UpdateModification::parseFromClassicUpdate(updateObj)); - updateReq.setUpsert(); - - UpdateResult result = update(opCtx, autoGetDb->ensureDbExists(opCtx), updateReq); - invariant(result.numDocsModified == 1 || !result.upsertedId.isEmpty()); - invariant(result.numMatched <= 1); - - // Wait until the majority write concern has been satisfied, but do it outside of lock - autoGetDb = boost::none; + { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kServerConfigurationNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + + const auto configOpTime = [&]() { + const auto vcTime = VectorClock::get(opCtx)->getTime(); + const auto vcConfigTimeTs = vcTime.configTime().asTimestamp(); + return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm); + }(); + + BSONObj updateObj = RecoveryDocument::createChangeObj(configOpTime, change); + + LOGV2_DEBUG(22083, + 1, + "Changing sharding recovery document {update}", + "Changing sharding recovery document", + "update"_attr = redact(updateObj)); + + auto updateReq = UpdateRequest(); + updateReq.setNamespaceString(NamespaceString::kServerConfigurationNamespace); + updateReq.setQuery(RecoveryDocument::getQuery()); + updateReq.setUpdateModification( + write_ops::UpdateModification::parseFromClassicUpdate(updateObj)); + updateReq.setUpsert(); + + UpdateResult result = update(opCtx, collection, updateReq); + invariant(result.numDocsModified == 1 || !result.upsertedId.isEmpty()); + invariant(result.numMatched <= 1); + } + // Wait for write concern after having released the locks. WriteConcernResult writeConcernResult; return waitForWriteConcern(opCtx, repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(), diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp index f085e750d8e..0939a556a00 100644 --- a/src/mongo/db/serverless/shard_split_donor_service.cpp +++ b/src/mongo/db/serverless/shard_split_donor_service.cpp @@ -43,6 +43,7 @@ #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/serverless/shard_split_statistics.h" #include "mongo/db/serverless/shard_split_utils.h" +#include "mongo/db/shard_role.h" #include "mongo/executor/cancelable_executor.h" #include "mongo/executor/connection_pool.h" #include "mongo/executor/network_interface_factory.h" @@ -881,13 +882,20 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS auto opCtxHolder = _cancelableOpCtxFactory->makeOperationContext(&cc()); auto opCtx = opCtxHolder.get(); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); if (!isInsert) { uassert(ErrorCodes::NamespaceNotFound, str::stream() << _stateDocumentsNS.toStringForErrorMsg() << " does not exist", - collection); + collection.exists()); } writeConflictRetry( @@ -951,7 +959,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS const auto filter = BSON(ShardSplitDonorDocument::kIdFieldName << uuid); auto updateResult = Helpers::upsert(opCtx, - _stateDocumentsNS, + collection, filter, updatedStateDocBson, /*fromMigrate=*/false); @@ -967,7 +975,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS const auto originalRecordId = Helpers::findOne(opCtx, - collection.getCollection(), + collection.getCollectionPtr(), BSON("_id" << originalStateDocBson["_id"])); const auto originalSnapshot = Snapshotted<BSONObj>( opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson); @@ -980,7 +988,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS collection_internal::updateDocument( opCtx, - *collection, + collection.getCollectionPtr(), originalRecordId, originalSnapshot, updatedStateDocBson, diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp index 6f8a42a417c..7afdd4e6d8e 100644 --- a/src/mongo/db/serverless/shard_split_utils.cpp +++ b/src/mongo/db/serverless/shard_split_utils.cpp @@ -154,7 +154,13 @@ repl::ReplSetConfig makeSplitConfig(const repl::ReplSetConfig& config, Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& stateDoc) { const auto nss = NamespaceString::kShardSplitDonorsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::PrimarySteppedDown, str::stream() << "No longer primary while attempting to insert shard split" @@ -166,7 +172,8 @@ Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st << stateDoc.getId() << ShardSplitDonorDocument::kExpireAtFieldName << BSON("$exists" << false)); const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON()); - auto updateResult = Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false); + auto updateResult = + Helpers::upsert(opCtx, collection, filter, updateMod, /*fromMigrate=*/false); invariant(!updateResult.numDocsModified); if (updateResult.upsertedId.isEmpty()) { @@ -180,15 +187,22 @@ Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st Status updateStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& stateDoc) { const auto nss = NamespaceString::kShardSplitDonorsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString(nss), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); - if (!collection) { + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, str::stream() << nss.toStringForErrorMsg() << " does not exist"); } return writeConflictRetry(opCtx, "updateShardSplitStateDoc", nss.ns(), [&]() -> Status { - auto updateResult = Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false); + auto updateResult = + Helpers::upsert(opCtx, collection, stateDoc.toBSON(), /*fromMigrate=*/false); if (updateResult.numMatched == 0) { return {ErrorCodes::NoSuchKey, str::stream() << "Existing shard split state document not found for id: " diff --git a/src/mongo/db/shard_role.cpp b/src/mongo/db/shard_role.cpp index 22bc13ed110..450e343befc 100644 --- a/src/mongo/db/shard_role.cpp +++ b/src/mongo/db/shard_role.cpp @@ -284,16 +284,6 @@ std::vector<ScopedCollectionOrViewAcquisition> acquireResolvedCollectionsOrViews std::holds_alternative<CollectionPtr>(snapshotedServices.collectionPtrOrView); if (isCollection) { - const auto& collectionPtr = - std::get<CollectionPtr>(snapshotedServices.collectionPtrOrView); - invariant(!prerequisites.uuid || prerequisites.uuid == collectionPtr->uuid()); - if (!prerequisites.uuid && collectionPtr) { - // If the uuid wasn't originally set on the AcquisitionRequest, set it now on the - // prerequisites so that on restore from yield we can check we are restoring the - // same instance of the ns. - prerequisites.uuid = collectionPtr->uuid(); - } - shard_role_details::AcquiredCollection& acquiredCollection = getOrMakeTransactionResources(opCtx).addAcquiredCollection( {prerequisites, @@ -451,11 +441,11 @@ CollectionAcquisitionRequest CollectionAcquisitionRequest::fromOpCtx( return CollectionAcquisitionRequest(nssOrUUID, placementConcern, readConcern, operationType); } -const UUID& ScopedCollectionAcquisition::uuid() const { +UUID ScopedCollectionAcquisition::uuid() const { invariant(exists(), str::stream() << "Collection " << nss().toStringForErrorMsg() << " doesn't exist, so its UUID cannot be obtained"); - return *_acquiredCollection.prerequisites.uuid; + return _acquiredCollection.collectionPtr->uuid(); } const ScopedCollectionDescription& ScopedCollectionAcquisition::getShardingDescription() const { @@ -473,6 +463,13 @@ const boost::optional<ScopedCollectionFilter>& ScopedCollectionAcquisition::getS return _acquiredCollection.ownershipFilter; } +const CollectionPtr& ScopedCollectionAcquisition::getCollectionPtr() const { + tassert(ErrorCodes::InternalError, + "Collection acquisition has been invalidated", + !_acquiredCollection.invalidated); + return _acquiredCollection.collectionPtr; +} + ScopedCollectionAcquisition::~ScopedCollectionAcquisition() { if (_opCtx) { const auto& transactionResources = getTransactionResources(_opCtx); @@ -835,11 +832,16 @@ ScopedLocalCatalogWriteFence::ScopedLocalCatalogWriteFence(OperationContext* opC // OnCommit, there is nothing to do because the caller is not allowed to use the collection in // the scope of the ScopedLocalCatalogWriteFence and the destructor will take care of updating // the acquisition to point to the latest changed value. + std::weak_ptr<shard_role_details::AcquiredCollection::SharedImpl> sharedImplWeakPtr = + _acquiredCollection->sharedImpl; opCtx->recoveryUnit()->onRollback( - [acquiredCollection = _acquiredCollection](OperationContext* opCtx) mutable { + [acquiredCollection = _acquiredCollection, + sharedImplWeakPtr = sharedImplWeakPtr](OperationContext* opCtx) mutable { // OnRollback, the acquired collection must be set to reference the previously // established catalog snapshot - _updateAcquiredLocalCollection(opCtx, acquiredCollection); + if (!sharedImplWeakPtr.expired()) { + _updateAcquiredLocalCollection(opCtx, acquiredCollection); + } }); } @@ -851,13 +853,17 @@ void ScopedLocalCatalogWriteFence::_updateAcquiredLocalCollection( OperationContext* opCtx, shard_role_details::AcquiredCollection* acquiredCollection) { try { const auto catalog = CollectionCatalog::latest(opCtx); + const auto& nss = acquiredCollection->prerequisites.nss; auto collection = catalog->lookupCollectionByNamespace(opCtx, acquiredCollection->prerequisites.nss); - invariant(collection); - + checkCollectionUUIDMismatch(opCtx, nss, collection, acquiredCollection->prerequisites.uuid); acquiredCollection->collectionPtr = CollectionPtr(collection); - } catch (...) { - fassertFailedWithStatus(737661, exceptionToStatus()); + } catch (const DBException& ex) { + LOGV2_DEBUG(7653800, + 1, + "Failed to update ScopedLocalCatalogWriteFence", + "ex"_attr = redact(ex.toString())); + acquiredCollection->invalidated = true; } } @@ -885,13 +891,20 @@ boost::optional<YieldedTransactionResources> yieldTransactionResourcesFromOperat invariant(!transactionResources->yielded); - // Yielding kLocalCatalogOnlyWithPotentialDataLoss acquisitions is not allowed. + for (auto& acquisition : transactionResources->acquiredCollections) { + // Yielding kLocalCatalogOnlyWithPotentialDataLoss acquisitions is not allowed. invariant( !stdx::holds_alternative<AcquisitionPrerequisites::PlacementConcernPlaceholder>( acquisition.prerequisites.placementConcern), str::stream() << "Collection " << acquisition.prerequisites.nss.toStringForErrorMsg() << " acquired with special placement concern and cannot be yielded"); + + // If the uuid wasn't originally set on the prerequisites, set it now so that on restore + // from yield we can check we are restoring the same instance of the ns. + if (!acquisition.prerequisites.uuid && acquisition.collectionPtr) { + acquisition.prerequisites.uuid = acquisition.collectionPtr->uuid(); + } } // Yielding view acquisitions is not supported. diff --git a/src/mongo/db/shard_role.h b/src/mongo/db/shard_role.h index 75ce22b12f1..71853124a5f 100644 --- a/src/mongo/db/shard_role.h +++ b/src/mongo/db/shard_role.h @@ -199,14 +199,14 @@ public: * Returns whether the acquisition found a collection or the collection didn't exist. */ bool exists() const { - return bool(_acquiredCollection.prerequisites.uuid); + return bool(_acquiredCollection.collectionPtr); } /** * Returns the UUID of the acquired collection, but this operation is only allowed if the * collection `exists()`, otherwise this method will invariant. */ - const UUID& uuid() const; + UUID uuid() const; // Access to services associated with the specified collection top to bottom on the hierarchical // stack @@ -217,10 +217,7 @@ public: const boost::optional<ScopedCollectionFilter>& getShardingFilter() const; // Local catalog services - - const CollectionPtr& getCollectionPtr() const { - return _acquiredCollection.collectionPtr; - } + const CollectionPtr& getCollectionPtr() const; private: friend class ScopedLocalCatalogWriteFence; diff --git a/src/mongo/db/shard_role_test.cpp b/src/mongo/db/shard_role_test.cpp index 167780ce139..57626db51d4 100644 --- a/src/mongo/db/shard_role_test.cpp +++ b/src/mongo/db/shard_role_test.cpp @@ -1423,6 +1423,64 @@ TEST_F(ShardRoleTest, RestoreForWriteFailsIfCollectionIsNowAView) { testRestoreFailsIfCollectionIsNowAView(AcquisitionPrerequisites::kWrite); } +TEST_F(ShardRoleTest, RestoreCollectionCreatedUnderScopedLocalCatalogWriteFence) { + const auto nss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); + + // Create the collection + { + WriteUnitOfWork wuow(opCtx()); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx(), &acquisition); + createTestCollection(opCtx(), nss); + wuow.commit(); + } + ASSERT_TRUE(acquisition.exists()); + + // Yield + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + ASSERT(yieldedTransactionResources); + + // Restore works + restoreTransactionResourcesToOperationContext(opCtx(), std::move(*yieldedTransactionResources)); +} + +TEST_F(ShardRoleTest, + RestoreCollectionCreatedUnderScopedLocalCatalogWriteFenceFailsIfNoLongerExists) { + const auto nss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); + + // Create the collection + { + WriteUnitOfWork wuow(opCtx()); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx(), &acquisition); + createTestCollection(opCtx(), nss); + wuow.commit(); + } + ASSERT_TRUE(acquisition.exists()); + + // Yield + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + ASSERT(yieldedTransactionResources); + + // Drop the collection + DBDirectClient client(opCtx()); + client.dropCollection(nss); + + // Restore should fail + ASSERT_THROWS_CODE(restoreTransactionResourcesToOperationContext( + opCtx(), std::move(*yieldedTransactionResources)), + DBException, + ErrorCodes::CollectionUUIDMismatch); +} + // --------------------------------------------------------------------------- // Storage snapshot @@ -1584,5 +1642,58 @@ TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceOutsideWUOURollback) { ASSERT(!acquisition.getCollectionPtr()->isTemporary()); } +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterAcquisitionOutOfScope) { + // Tests that nothing breaks if ScopedLocalCatalogWriteFence's onRollback handler is executed + // when the collection acquisition has already gone out of scope. + WriteUnitOfWork wuow1(opCtx()); + { + auto acquisition = acquireCollection(opCtx(), + {nssShardedCollection1, + PlacementConcern{{}, shardVersionShardedCollection1}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}, + MODE_IX); + ScopedLocalCatalogWriteFence(opCtx(), &acquisition); + } +} + +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterANotherClientCreatedCollection) { + const NamespaceString nss = + NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + + // Acquire a collection that does not exist. + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); + + // Another client creates the collection + { + auto newClient = opCtx()->getServiceContext()->makeClient("MigrationCoordinator"); + auto newOpCtx = newClient->makeOperationContext(); + createTestCollection(newOpCtx.get(), nss); + } + + // Acquisition still reflects that the collection does not exist. + ASSERT_FALSE(acquisition.exists()); + + // Original client attempts to create the collection, which will result in a WriteConflict and + // rollback. + { + WriteUnitOfWork wuow(opCtx()); + ScopedLocalCatalogWriteFence localCatalogWriteFence(opCtx(), &acquisition); + auto db = DatabaseHolder::get(opCtx())->openDb(opCtx(), nss.dbName()); + ASSERT_THROWS_CODE(db->createCollection(opCtx(), nss, CollectionOptions()), + DBException, + ErrorCodes::WriteConflict); + wuow.commit(); + } + + // Check that after rollback the acquisition has been updated to reflect the latest state of the + // catalog (i.e. the collection exists). + ASSERT_TRUE(acquisition.exists()); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/storage/storage_repair_observer_test.cpp b/src/mongo/db/storage/storage_repair_observer_test.cpp index 268dcebf993..e5960aa75be 100644 --- a/src/mongo/db/storage/storage_repair_observer_test.cpp +++ b/src/mongo/db/storage/storage_repair_observer_test.cpp @@ -39,6 +39,7 @@ #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/storage_repair_observer.h" #include "mongo/logv2/log.h" #include "mongo/unittest/death_test.h" @@ -73,10 +74,14 @@ public: void createMockReplConfig(OperationContext* opCtx) { BSONObj replConfig; Lock::DBLock dbLock(opCtx, DatabaseName::kLocal, MODE_X); - Helpers::putSingleton( + auto coll = acquireCollection( opCtx, - NamespaceString::createNamespaceString_forTest(boost::none, "local.system.replset"), - replConfig); + CollectionAcquisitionRequest(NamespaceString(NamespaceString::kSystemReplSetNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + Helpers::putSingleton(opCtx, coll, replConfig); } void assertReplConfigValid(OperationContext* opCtx, bool valid) { diff --git a/src/mongo/db/transaction/transaction_participant_test.cpp b/src/mongo/db/transaction/transaction_participant_test.cpp index 0c00eaf5c4c..9fb8a1d0851 100644 --- a/src/mongo/db/transaction/transaction_participant_test.cpp +++ b/src/mongo/db/transaction/transaction_participant_test.cpp @@ -49,6 +49,7 @@ #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/fill_locker_info.h" #include "mongo/db/storage/durable_history_pin.h" #include "mongo/db/transaction/server_transactions_metrics.h" @@ -6230,9 +6231,11 @@ TEST_F(TxnParticipantTest, CommitSplitPreparedTransaction) { // Update `2` to increment its `value` to 2. This must be done in the same split session as the // insert. callUnderSplitSession(splitSessions[1].session, [nullOpDbg](OperationContext* opCtx) { - AutoGetCollection userColl(opCtx, kNss, LockMode::MODE_IX); - Helpers::update( - opCtx, userColl->ns(), BSON("_id" << 2), BSON("$inc" << BSON("value" << 1))); + auto userColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, kNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + Helpers::update(opCtx, userColl, BSON("_id" << 2), BSON("$inc" << BSON("value" << 1))); }); // Mimic the methods to call for a secondary performing a split prepare. Those are called inside diff --git a/src/mongo/db/transaction_resources.h b/src/mongo/db/transaction_resources.h index 57b3f87b0f6..951e7c43d9a 100644 --- a/src/mongo/db/transaction_resources.h +++ b/src/mongo/db/transaction_resources.h @@ -96,6 +96,21 @@ struct AcquisitionPrerequisites { namespace shard_role_details { struct AcquiredCollection { + AcquiredCollection(AcquisitionPrerequisites prerequisites, + std::shared_ptr<Lock::DBLock> dbLock, + boost::optional<Lock::CollectionLock> collectionLock, + boost::optional<ScopedCollectionDescription> collectionDescription, + boost::optional<ScopedCollectionFilter> ownershipFilter, + CollectionPtr collectionPtr) + : prerequisites(std::move(prerequisites)), + dbLock(std::move(dbLock)), + collectionLock(std::move(collectionLock)), + collectionDescription(std::move(collectionDescription)), + ownershipFilter(std::move(ownershipFilter)), + collectionPtr(std::move(collectionPtr)), + invalidated(false), + sharedImpl(std::make_shared<SharedImpl>()) {} + AcquisitionPrerequisites prerequisites; std::shared_ptr<Lock::DBLock> dbLock; @@ -105,6 +120,18 @@ struct AcquiredCollection { boost::optional<ScopedCollectionFilter> ownershipFilter; CollectionPtr collectionPtr; + + // Indicates whether this acquisition has been invalidated after a ScopedLocalCatalogWriteFence + // was unable to restore it on rollback. + bool invalidated; + + // Used by the ScopedLocalCatalogWriteFence to track the lifetime of AcquiredCollection. + // ScopedLocalCatalogWriteFence will hold a weak_ptr pointing to 'sharedImpl'. The 'onRollback' + // handler it installs will use that weak_ptr to determine if the AcquiredCollection is still + // alive. + // TODO: (jordist) SERVER-XXXXX Rework this. + struct SharedImpl {}; + std::shared_ptr<SharedImpl> sharedImpl; }; struct AcquiredView { diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index e50510aa2c6..8861e9d1fda 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -52,6 +52,7 @@ #include "mongo/db/ops/update_request.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/update/update_driver.h" #include "mongo/dbtests/dbtests.h" @@ -190,12 +191,15 @@ public: void run() { // Run the update. { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = + acquireCollection(&_opCtx, + CollectionAcquisitionRequest::fromOpCtx( + &_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver(_expCtx); - CollectionPtr collection = ctx.getCollection(); - ASSERT(collection); // Collection should be empty. ASSERT_EQUALS(0U, count(BSONObj())); @@ -254,7 +258,12 @@ public: void run() { // Run the update. { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = + acquireCollection(&_opCtx, + CollectionAcquisitionRequest::fromOpCtx( + &_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); // Populate the collection. for (int i = 0; i < 10; ++i) { @@ -265,13 +274,10 @@ public: CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver(_expCtx); - CollectionPtr coll( - CollectionCatalog::get(&_opCtx)->lookupCollectionByNamespace(&_opCtx, nss)); - ASSERT(coll); // Get the RecordIds that would be returned by an in-order scan. vector<RecordId> recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); auto request = UpdateRequest(); request.setNamespaceString(nss); @@ -304,10 +310,10 @@ public: auto ws = make_unique<WorkingSet>(); auto cs = make_unique<CollectionScan>( - _expCtx.get(), coll, collScanParams, ws.get(), cq->root()); + _expCtx.get(), collection.getCollectionPtr(), collScanParams, ws.get(), cq->root()); - auto updateStage = - make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, cs.release()); + auto updateStage = make_unique<UpdateStage>( + _expCtx.get(), updateParams, ws.get(), collection, cs.release()); const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats()); @@ -322,10 +328,12 @@ public: // Remove recordIds[targetDocIndex]; static_cast<PlanStage*>(updateStage.get())->saveState(); - BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value(); + BSONObj targetDoc = + collection.getCollectionPtr()->docFor(&_opCtx, recordIds[targetDocIndex]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); - static_cast<PlanStage*>(updateStage.get())->restoreState(&coll); + static_cast<PlanStage*>(updateStage.get()) + ->restoreState(&collection.getCollectionPtr()); // Do the remaining updates. while (!updateStage->isEOF()) { @@ -374,10 +382,12 @@ public: ASSERT_EQUALS(10U, count(BSONObj())); // Various variables we'll need. - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); auto request = UpdateRequest(); request.setNamespaceString(nss); UpdateDriver driver(_expCtx); @@ -388,7 +398,7 @@ public: // Get the RecordIds that would be returned by an in-order scan. vector<RecordId> recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Populate the request. request.setQuery(query); @@ -419,8 +429,8 @@ public: UpdateStageParams updateParams(&request, &driver, opDebug); updateParams.canonicalQuery = cq.get(); - const auto updateStage = - make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release()); + const auto updateStage = make_unique<UpdateStage>( + _expCtx.get(), updateParams, ws.get(), collection, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -444,7 +454,7 @@ public: // Should have done the update. BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0); vector<BSONObj> objs; - getCollContents(coll, &objs); + getCollContents(collection.getCollectionPtr(), &objs); ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc); // That should be it. @@ -467,10 +477,12 @@ public: ASSERT_EQUALS(50U, count(BSONObj())); // Various variables we'll need. - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); auto request = UpdateRequest(); request.setNamespaceString(nss); UpdateDriver driver(_expCtx); @@ -481,7 +493,7 @@ public: // Get the RecordIds that would be returned by an in-order scan. vector<RecordId> recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Populate the request. request.setQuery(query); @@ -512,8 +524,8 @@ public: UpdateStageParams updateParams(&request, &driver, opDebug); updateParams.canonicalQuery = cq.get(); - auto updateStage = - make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release()); + auto updateStage = make_unique<UpdateStage>( + _expCtx.get(), updateParams, ws.get(), collection, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -537,7 +549,7 @@ public: // Should have done the update. vector<BSONObj> objs; - getCollContents(coll, &objs); + getCollContents(collection.getCollectionPtr(), &objs); ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc); // That should be it. diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index c05af813d64..8e181031f98 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -244,7 +244,7 @@ protected: uassertStatusOK(applyCommand_inlock( &_opCtx, ApplierOperation{&entry}, getOplogApplicationMode())); } else { - const auto coll = acquireCollection( + auto coll = acquireCollection( &_opCtx, {nss(), {}, {}, AcquisitionPrerequisites::kWrite}, MODE_IX); WriteUnitOfWork wunit(&_opCtx); auto lastApplied = repl::ReplicationCoordinator::get(_opCtx.getServiceContext()) @@ -254,7 +254,6 @@ protected: ASSERT_OK(_opCtx.recoveryUnit()->setTimestamp(nextTimestamp)); const bool dataIsConsistent = true; uassertStatusOK(applyOperation_inlock(&_opCtx, - ctx.db(), coll, ApplierOperation{&entry}, false, |