summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorJordi Serra Torrens <jordi.serra-torrens@mongodb.com>2023-05-09 13:34:04 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-09 14:59:57 +0000
commitef9da916d39ac0ea7c6aa3426dfdfbabe3d2cd5f (patch)
tree7640854cde43fb42bc090563658fd285def9c8fc /src/mongo/db/s
parent75825b6618a887a76301b770e68c82077a84cb68 (diff)
downloadmongo-ef9da916d39ac0ea7c6aa3426dfdfbabe3d2cd5f.tar.gz
SERVER-76538 Use acquisitons on internal update paths
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp53
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.h10
-rw-r--r--src/mongo/db/s/sharding_index_catalog_ddl_util.cpp119
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp12
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp68
8 files changed, 168 insertions, 126 deletions
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 9210fc9cc2c..c81b3bed882 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1731,11 +1731,17 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
BSONObjIterator i(xfer["reload"].Obj());
while (i.more()) {
totalDocs++;
- AutoGetCollection autoColl(opCtx, _nss, MODE_IX);
+ auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(_nss,
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Collection " << _nss.toStringForErrorMsg()
<< " was dropped in the middle of the migration",
- autoColl.getCollection());
+ collection.exists());
BSONObj updatedDoc = i.next().Obj();
@@ -1765,7 +1771,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
// We are in write lock here, so sure we aren't killing
writeConflictRetry(opCtx, "transferModsUpdates", _nss.ns(), [&] {
- auto res = Helpers::upsert(opCtx, _nss, updatedDoc, true);
+ auto res = Helpers::upsert(opCtx, collection, updatedDoc, true);
if (!res.upsertedId.isEmpty()) {
changeInOrphans++;
}
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 33016bf5bf6..995a1db5eb9 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/s/sharding_write_router.h"
#include "mongo/db/session/session_catalog_mongod.h"
#include "mongo/db/shard_id.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_shard.h"
@@ -277,8 +278,11 @@ protected:
const BSONObj& filter,
const BSONObj& update,
const ReshardingEnv& env) {
- AutoGetCollection coll(opCtx, nss, MODE_IX);
- Helpers::update(opCtx, nss, filter, update);
+ auto coll = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ Helpers::update(opCtx, coll, filter, update);
}
void deleteDoc(OperationContext* opCtx,
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index 09aa5aa0845..e3a82f24d7f 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -997,15 +997,21 @@ void ReshardingDonorService::DonorStateMachine::_updateDonorDocument(
const auto& nss = NamespaceString::kDonorReshardingOperationsNamespace;
writeConflictRetry(opCtx.get(), "DonorStateMachine::_updateDonorDocument", nss.toString(), [&] {
- AutoGetCollection coll(opCtx.get(), nss, MODE_X);
+ auto coll = acquireCollection(
+ opCtx.get(),
+ CollectionAcquisitionRequest(NamespaceString(nss),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx.get()),
+ AcquisitionPrerequisites::kWrite),
+ MODE_X);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << nss.toStringForErrorMsg() << " does not exist",
- coll);
+ coll.exists());
WriteUnitOfWork wuow(opCtx.get());
Helpers::update(opCtx.get(),
- nss,
+ coll,
BSON(ReshardingDonorDocument::kReshardingUUIDFieldName
<< _metadata.getReshardingUUID()),
BSON("$set" << BSON(ReshardingDonorDocument::kMutableStateFieldName
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index c25c379139a..154c6b457ba 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -152,7 +152,7 @@ Status ReshardingOplogApplicationRules::applyOperation(
const auto outputDb = AutoGetDb(opCtx, _outputNss.dbName(), MODE_IX);
- const auto outputColl =
+ auto outputColl =
opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
return acquireCollection(
opCtx,
@@ -167,7 +167,7 @@ Status ReshardingOplogApplicationRules::applyOperation(
<< _outputNss.toStringForErrorMsg(),
outputColl.exists());
- const auto stashColl =
+ auto stashColl =
opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
return acquireCollection(
opCtx,
@@ -185,20 +185,12 @@ Status ReshardingOplogApplicationRules::applyOperation(
auto opType = op.getOpType();
switch (opType) {
case repl::OpTypeEnum::kInsert:
- _applyInsert_inlock(opCtx,
- outputDb.getDb(),
- outputColl.getCollectionPtr(),
- stashColl.getCollectionPtr(),
- op);
+ _applyInsert_inlock(opCtx, outputColl, stashColl, op);
_applierMetrics->onInsertApplied();
break;
case repl::OpTypeEnum::kUpdate:
- _applyUpdate_inlock(opCtx,
- outputDb.getDb(),
- outputColl.getCollectionPtr(),
- stashColl.getCollectionPtr(),
- op);
+ _applyUpdate_inlock(opCtx, outputColl, stashColl, op);
_applierMetrics->onUpdateApplied();
break;
case repl::OpTypeEnum::kDelete: {
@@ -241,9 +233,8 @@ Status ReshardingOplogApplicationRules::applyOperation(
}
void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ ScopedCollectionAcquisition& outputColl,
+ ScopedCollectionAcquisition& stashColl,
const repl::OplogEntry& op) const {
/**
* The rules to apply ordinary insert operations are as follows:
@@ -276,7 +267,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and run a replacement update on the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -285,7 +276,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
request.setUpsert(false);
request.setFromOplogApplication(true);
- UpdateResult ur = update(opCtx, db, request);
+ UpdateResult ur = update(opCtx, stashColl, request);
invariant(ur.numMatched != 0);
_applierMetrics->onWriteToStashCollections();
@@ -296,11 +287,12 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// Query the output collection for a doc with _id == [op _id]. If a doc does not exist, apply
// rule #2 and insert this doc into the output collection.
BSONObj outputCollDoc;
- auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc);
+ auto foundDoc = Helpers::findByIdAndNoopUpdate(
+ opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc);
if (!foundDoc) {
uassertStatusOK(collection_internal::insertDocument(opCtx,
- outputColl,
+ outputColl.getCollectionPtr(),
InsertStatement(oField),
nullptr /* OpDebug */,
false /* fromMigrate */));
@@ -323,7 +315,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
request.setUpsert(false);
request.setFromOplogApplication(true);
- UpdateResult ur = update(opCtx, db, request);
+ UpdateResult ur = update(opCtx, outputColl, request);
invariant(ur.numMatched != 0);
return;
@@ -331,16 +323,18 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// The doc does not belong to '_donorShardId' under the original shard key, so apply rule #4
// and insert the contents of 'op' to the stash collection.
- uassertStatusOK(collection_internal::insertDocument(
- opCtx, stashColl, InsertStatement(oField), nullptr /* OpDebug */, false /* fromMigrate */));
+ uassertStatusOK(collection_internal::insertDocument(opCtx,
+ stashColl.getCollectionPtr(),
+ InsertStatement(oField),
+ nullptr /* OpDebug */,
+ false /* fromMigrate */));
_applierMetrics->onWriteToStashCollections();
}
void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ ScopedCollectionAcquisition& outputColl,
+ ScopedCollectionAcquisition& stashColl,
const repl::OplogEntry& op) const {
/**
* The rules to apply ordinary update operations are as follows:
@@ -375,7 +369,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and update the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -383,7 +377,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
request.setUpdateModification(std::move(updateMod));
request.setUpsert(false);
request.setFromOplogApplication(true);
- UpdateResult ur = update(opCtx, db, request);
+ UpdateResult ur = update(opCtx, stashColl, request);
invariant(ur.numMatched != 0);
@@ -394,7 +388,8 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
// Query the output collection for a doc with _id == [op _id].
BSONObj outputCollDoc;
- auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc);
+ auto foundDoc = Helpers::findByIdAndNoopUpdate(
+ opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc);
if (!foundDoc ||
!_sourceChunkMgr.keyBelongsToShard(
@@ -416,7 +411,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
request.setUpdateModification(std::move(updateMod));
request.setUpsert(false);
request.setFromOplogApplication(true);
- UpdateResult ur = update(opCtx, db, request);
+ UpdateResult ur = update(opCtx, outputColl, request);
invariant(ur.numMatched != 0);
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h
index 4a1d16f66ca..5d6c052ed0a 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.h
@@ -79,16 +79,14 @@ public:
private:
// Applies an insert operation
void _applyInsert_inlock(OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ ScopedCollectionAcquisition& outputColl,
+ ScopedCollectionAcquisition& stashColl,
const repl::OplogEntry& op) const;
// Applies an update operation
void _applyUpdate_inlock(OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ ScopedCollectionAcquisition& outputColl,
+ ScopedCollectionAcquisition& stashColl,
const repl::OplogEntry& op) const;
// Applies a delete operation
diff --git a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
index 3f088911d2b..b83b62272a9 100644
--- a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
@@ -57,8 +57,8 @@ void deleteShardingIndexCatalogEntries(OperationContext* opCtx,
}
-const ScopedCollectionAcquisition& getAcquisitionForNss(
- const std::vector<ScopedCollectionAcquisition>& acquisitions, const NamespaceString& nss) {
+ScopedCollectionAcquisition& getAcquisitionForNss(
+ std::vector<ScopedCollectionAcquisition>& acquisitions, const NamespaceString& nss) {
auto it = std::find_if(acquisitions.begin(), acquisitions.end(), [&nss](auto& acquisition) {
return acquisition.nss() == nss;
});
@@ -80,7 +80,7 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
AutoGetCollection fromToColl(
opCtx, fromNss, MODE_IX, AutoGetCollection::Options{}.secondaryNssOrUUIDs({toNss}));
- const auto acquisitions = acquireCollections(
+ auto acquisitions = acquireCollections(
opCtx,
{CollectionAcquisitionRequest(
NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
@@ -184,11 +184,24 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx,
opCtx, "AddIndexCatalogEntry", NamespaceString::kShardIndexCatalogNamespace.ns(), [&]() {
WriteUnitOfWork wunit(opCtx);
AutoGetCollection userColl(opCtx, userCollectionNss, MODE_IX);
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
{
// First get the document to check the index version if the document already exists
@@ -198,7 +211,7 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx,
<< ShardAuthoritativeCollectionType::kUuidFieldName << collectionUUID);
BSONObj collectionDoc;
bool docExists =
- Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc);
+ Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc);
if (docExists) {
auto collection = ShardAuthoritativeCollectionType::parse(
IDLParserContext("AddIndexCatalogEntry"), collectionDoc);
@@ -225,18 +238,16 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx,
<< ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod));
request.setUpsert(true);
request.setFromOplogApplication(true);
- mongo::update(opCtx, collsColl.getDb(), request);
+ mongo::update(opCtx, collsColl, request);
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
-
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
BSONObjBuilder builder(indexCatalogEntry.toBSON());
auto idStr = format(FMT_STRING("{}_{}"), collectionUUID.toString(), name);
builder.append("_id", idStr);
uassertStatusOK(collection_internal::insertDocument(opCtx,
- idxColl.getCollection(),
+ idxColl.getCollectionPtr(),
InsertStatement{builder.obj()},
nullptr,
false));
@@ -245,7 +256,7 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
opCtx,
userCollectionNss,
- idxColl->uuid(),
+ idxColl.uuid(),
ShardingIndexCatalogInsertEntry(indexCatalogEntry).toBSON());
wunit.commit();
});
@@ -263,11 +274,25 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx,
[&]() {
WriteUnitOfWork wunit(opCtx);
AutoGetCollection userColl(opCtx, nss, MODE_IX);
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
+
{
// First get the document to check the index version if the document already exists
const auto query =
@@ -275,7 +300,7 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx,
<< nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid);
BSONObj collectionDoc;
bool docExists =
- Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc);
+ Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc);
if (docExists) {
auto collection = ShardAuthoritativeCollectionType::parse(
IDLParserContext("RemoveIndexCatalogEntry"), collectionDoc);
@@ -301,18 +326,9 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx,
<< ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod));
request.setUpsert(true);
request.setFromOplogApplication(true);
- mongo::update(opCtx, collsColl.getDb(), request);
+ mongo::update(opCtx, collsColl, request);
}
- const auto idxColl =
- acquireCollection(opCtx,
- CollectionAcquisitionRequest(
- NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
- PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
- repl::ReadConcernArgs::get(opCtx),
- AcquisitionPrerequisites::kWrite),
- MODE_IX);
-
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
mongo::deleteObjects(opCtx,
@@ -343,18 +359,32 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx,
[&]() {
WriteUnitOfWork wunit(opCtx);
AutoGetCollection userColl(opCtx, nss, MODE_IX);
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
+
{
const auto query =
BSON(ShardAuthoritativeCollectionType::kNssFieldName
<< nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid);
BSONObj collectionDoc;
bool docExists =
- Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc);
+ Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc);
if (docExists) {
auto collection = ShardAuthoritativeCollectionType::parse(
IDLParserContext("ReplaceIndexCatalogEntry"), collectionDoc);
@@ -383,17 +413,9 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx,
<< ShardAuthoritativeCollectionType::kIndexVersionFieldName << indexVersion));
request.setUpsert(true);
request.setFromOplogApplication(true);
- mongo::update(opCtx, collsColl.getDb(), request);
+ mongo::update(opCtx, collsColl, request);
}
- const auto idxColl =
- acquireCollection(opCtx,
- CollectionAcquisitionRequest(
- NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
- PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
- repl::ReadConcernArgs::get(opCtx),
- AcquisitionPrerequisites::kWrite),
- MODE_IX);
{
// Clear old indexes.
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
@@ -435,7 +457,7 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace
WriteUnitOfWork wunit(opCtx);
Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX);
Lock::CollectionLock collLock(opCtx, nss, MODE_IX);
- const auto acquisitions = acquireCollections(
+ auto acquisitions = acquireCollections(
opCtx,
{CollectionAcquisitionRequest(
NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
@@ -474,9 +496,6 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace
mongo::deleteObjects(opCtx, collsColl, query, true);
}
- // AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace,
- // MODE_IX);
-
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
deleteShardingIndexCatalogEntries(opCtx, idxColl, *collectionUUID);
@@ -501,7 +520,7 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx,
[&]() {
WriteUnitOfWork wunit(opCtx);
AutoGetCollection userColl(opCtx, nss, MODE_IX);
- const auto acquisitions = acquireCollections(
+ auto acquisitions = acquireCollections(
opCtx,
{CollectionAcquisitionRequest(
NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 2573113c24b..dc385b0449c 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/db/server_options.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/vector_clock_metadata_hook.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/task_executor_pool.h"
@@ -506,8 +507,15 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString(
write_ops::UpdateModification::parseFromClassicUpdate(updateObj));
try {
- AutoGetCollection autoColl(opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IX);
- auto result = update(opCtx, autoColl.ensureDbExists(opCtx), updateReq);
+ auto collection =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kServerConfigurationNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ auto result = update(opCtx, collection, updateReq);
if (result.numMatched == 0) {
LOGV2_WARNING(22076,
"Failed to update config server connection string of shard identity "
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 93d1b3cfd3d..70d037ef865 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/vector_clock_mutable.h"
#include "mongo/db/write_concern.h"
#include "mongo/db/write_concern_options.h"
@@ -145,38 +146,43 @@ Status modifyRecoveryDocument(OperationContext* opCtx,
RecoveryDocument::ChangeType change,
const WriteConcernOptions& writeConcern) {
try {
- // Use boost::optional so we can release the locks early
- boost::optional<AutoGetDb> autoGetDb;
- autoGetDb.emplace(opCtx, NamespaceString::kServerConfigurationNamespace.dbName(), MODE_X);
-
- const auto configOpTime = [&]() {
- const auto vcTime = VectorClock::get(opCtx)->getTime();
- const auto vcConfigTimeTs = vcTime.configTime().asTimestamp();
- return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm);
- }();
-
- BSONObj updateObj = RecoveryDocument::createChangeObj(configOpTime, change);
-
- LOGV2_DEBUG(22083,
- 1,
- "Changing sharding recovery document {update}",
- "Changing sharding recovery document",
- "update"_attr = redact(updateObj));
-
- auto updateReq = UpdateRequest();
- updateReq.setNamespaceString(NamespaceString::kServerConfigurationNamespace);
- updateReq.setQuery(RecoveryDocument::getQuery());
- updateReq.setUpdateModification(
- write_ops::UpdateModification::parseFromClassicUpdate(updateObj));
- updateReq.setUpsert();
-
- UpdateResult result = update(opCtx, autoGetDb->ensureDbExists(opCtx), updateReq);
- invariant(result.numDocsModified == 1 || !result.upsertedId.isEmpty());
- invariant(result.numMatched <= 1);
-
- // Wait until the majority write concern has been satisfied, but do it outside of lock
- autoGetDb = boost::none;
+ {
+ auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kServerConfigurationNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_X);
+
+ const auto configOpTime = [&]() {
+ const auto vcTime = VectorClock::get(opCtx)->getTime();
+ const auto vcConfigTimeTs = vcTime.configTime().asTimestamp();
+ return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm);
+ }();
+
+ BSONObj updateObj = RecoveryDocument::createChangeObj(configOpTime, change);
+
+ LOGV2_DEBUG(22083,
+ 1,
+ "Changing sharding recovery document {update}",
+ "Changing sharding recovery document",
+ "update"_attr = redact(updateObj));
+
+ auto updateReq = UpdateRequest();
+ updateReq.setNamespaceString(NamespaceString::kServerConfigurationNamespace);
+ updateReq.setQuery(RecoveryDocument::getQuery());
+ updateReq.setUpdateModification(
+ write_ops::UpdateModification::parseFromClassicUpdate(updateObj));
+ updateReq.setUpsert();
+
+ UpdateResult result = update(opCtx, collection, updateReq);
+ invariant(result.numDocsModified == 1 || !result.upsertedId.isEmpty());
+ invariant(result.numMatched <= 1);
+ }
+ // Wait for write concern after having released the locks.
WriteConcernResult writeConcernResult;
return waitForWriteConcern(opCtx,
repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),