summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordi Serra Torrens <jordi.serra-torrens@mongodb.com>2023-04-25 10:09:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-25 10:55:11 +0000
commitc6f29c10cca4b962afa76d05f656264318333cf6 (patch)
tree0b58b9c533ebc1f0254ac031d02dce67863b9b87
parentd52ffd2461d42540bd29ff3ef996ada28fe6776d (diff)
downloadmongo-c6f29c10cca4b962afa76d05f656264318333cf6.tar.gz
SERVER-75580 Thread-through `ScopedCollectionOrViewAcquisition` on the internal delete code paths
-rw-r--r--src/mongo/db/change_collection_expired_change_remover_test.cpp29
-rw-r--r--src/mongo/db/change_collection_expired_documents_remover.cpp17
-rw-r--r--src/mongo/db/change_stream_change_collection_manager.cpp9
-rw-r--r--src/mongo/db/change_stream_change_collection_manager.h12
-rw-r--r--src/mongo/db/change_stream_pre_images_collection_manager.cpp44
-rw-r--r--src/mongo/db/change_stream_pre_images_collection_manager.h10
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp17
-rw-r--r--src/mongo/db/commands/write_commands.cpp21
-rw-r--r--src/mongo/db/dbhelpers.cpp10
-rw-r--r--src/mongo/db/dbhelpers.h3
-rw-r--r--src/mongo/db/exec/batched_delete_stage.cpp2
-rw-r--r--src/mongo/db/exec/batched_delete_stage.h2
-rw-r--r--src/mongo/db/exec/delete_stage.cpp9
-rw-r--r--src/mongo/db/exec/delete_stage.h5
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp30
-rw-r--r--src/mongo/db/global_index.cpp17
-rw-r--r--src/mongo/db/global_index.h6
-rw-r--r--src/mongo/db/ops/delete.cpp16
-rw-r--r--src/mongo/db/ops/delete.h6
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp4
-rw-r--r--src/mongo/db/query/get_executor.cpp35
-rw-r--r--src/mongo/db/query/get_executor.h4
-rw-r--r--src/mongo/db/query/internal_plans.cpp86
-rw-r--r--src/mongo/db/query/internal_plans.h11
-rw-r--r--src/mongo/db/query/plan_executor.cpp11
-rw-r--r--src/mongo/db/query/plan_executor.h21
-rw-r--r--src/mongo/db/query/plan_executor_factory.cpp8
-rw-r--r--src/mongo/db/query/plan_executor_factory.h6
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp30
-rw-r--r--src/mongo/db/query/plan_executor_impl.h24
-rw-r--r--src/mongo/db/repl/apply_ops.cpp13
-rw-r--r--src/mongo/db/repl/oplog.cpp14
-rw-r--r--src/mongo/db/repl/oplog.h2
-rw-r--r--src/mongo/db/repl/oplog_applier_utils.cpp34
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp12
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp36
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp62
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp16
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp16
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp14
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp35
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp16
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp103
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp17
-rw-r--r--src/mongo/db/s/sharding_index_catalog_ddl_util.cpp175
-rw-r--r--src/mongo/db/serverless/shard_split_utils.cpp16
-rw-r--r--src/mongo/db/shard_role.cpp16
-rw-r--r--src/mongo/db/shard_role.h5
-rw-r--r--src/mongo/db/transaction_resources.cpp4
-rw-r--r--src/mongo/db/transaction_resources.h5
-rw-r--r--src/mongo/db/ttl.cpp86
-rw-r--r--src/mongo/db/ttl.h5
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp33
-rw-r--r--src/mongo/dbtests/query_stage_batched_delete.cpp118
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp30
-rw-r--r--src/mongo/dbtests/repltests.cpp4
57 files changed, 865 insertions, 533 deletions
diff --git a/src/mongo/db/change_collection_expired_change_remover_test.cpp b/src/mongo/db/change_collection_expired_change_remover_test.cpp
index 88a3eb36f23..16d4e516b51 100644
--- a/src/mongo/db/change_collection_expired_change_remover_test.cpp
+++ b/src/mongo/db/change_collection_expired_change_remover_test.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/server_parameter_with_storage.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/record_data.h"
#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/unittest/unittest.h"
@@ -131,16 +132,21 @@ protected:
Date_t expirationTime) {
// Acquire intent-exclusive lock on the change collection. Early exit if the collection
// doesn't exist.
- const auto changeCollection =
- AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kWrite, tenantId};
+ const auto changeCollection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(NamespaceString::makeChangeCollectionNSS(tenantId),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
// Get the 'maxRecordIdBound' and perform the removal of the expired documents.
const auto maxRecordIdBound =
ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(
- opCtx, &*changeCollection)
+ opCtx, changeCollection)
->maxRecordIdBound;
return ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments(
- opCtx, &*changeCollection, maxRecordIdBound, expirationTime);
+ opCtx, changeCollection, maxRecordIdBound, expirationTime);
}
const TenantId _tenantId;
@@ -244,18 +250,23 @@ TEST_F(ChangeCollectionExpiredChangeRemoverTest, VerifyLastExpiredDocument) {
clockSource()->advance(Milliseconds(1));
}
- auto changeCollection =
- AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kRead, _tenantId};
+ const auto changeCollection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(NamespaceString::makeChangeCollectionNSS(_tenantId),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kRead),
+ MODE_IS);
auto maxExpiredRecordId =
- ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(
- opCtx, &*changeCollection)
+ ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(opCtx,
+ changeCollection)
->maxRecordIdBound;
// Get the document found at 'maxExpiredRecordId' and test it against 'lastExpiredDocument'.
auto scanExecutor =
InternalPlanner::collectionScan(opCtx,
- &(*changeCollection),
+ &changeCollection,
PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY,
InternalPlanner::Direction::FORWARD,
boost::none,
diff --git a/src/mongo/db/change_collection_expired_documents_remover.cpp b/src/mongo/db/change_collection_expired_documents_remover.cpp
index 1cfdee40186..5dc4d1fe7e6 100644
--- a/src/mongo/db/change_collection_expired_documents_remover.cpp
+++ b/src/mongo/db/change_collection_expired_documents_remover.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/duration.h"
@@ -92,12 +93,18 @@ void removeExpiredDocuments(Client* client) {
change_stream_serverless_helpers::getExpireAfterSeconds(tenantId);
// Acquire intent-exclusive lock on the change collection.
- AutoGetChangeCollection changeCollection{
- opCtx.get(), AutoGetChangeCollection::AccessMode::kWrite, tenantId};
+ const auto changeCollection =
+ acquireCollection(opCtx.get(),
+ CollectionAcquisitionRequest(
+ NamespaceString::makeChangeCollectionNSS(tenantId),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx.get()),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
// Early exit if collection does not exist or if running on a secondary (requires
// opCtx->lockState()->isRSTLLocked()).
- if (!changeCollection ||
+ if (!changeCollection.exists() ||
!repl::ReplicationCoordinator::get(opCtx.get())
->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kConfig.toString())) {
continue;
@@ -108,7 +115,7 @@ void removeExpiredDocuments(Client* client) {
// to remove.
auto purgingJobMetadata =
ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(
- opCtx.get(), &*changeCollection);
+ opCtx.get(), changeCollection);
if (!purgingJobMetadata) {
continue;
}
@@ -116,7 +123,7 @@ void removeExpiredDocuments(Client* client) {
removedCount +=
ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments(
opCtx.get(),
- &*changeCollection,
+ changeCollection,
purgingJobMetadata->maxRecordIdBound,
currentWallTime - Seconds(expiredAfterSeconds));
changeCollectionManager.getPurgingJobStats().scannedCollections.fetchAndAddRelaxed(1);
diff --git a/src/mongo/db/change_stream_change_collection_manager.cpp b/src/mongo/db/change_stream_change_collection_manager.cpp
index 091fee83f43..e75140f9de3 100644
--- a/src/mongo/db/change_stream_change_collection_manager.cpp
+++ b/src/mongo/db/change_stream_change_collection_manager.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/repl/oplog_entry_gen.h"
#include "mongo/db/server_feature_flags_gen.h"
#include "mongo/db/server_options.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log.h"
namespace mongo {
@@ -449,14 +450,14 @@ void ChangeStreamChangeCollectionManager::insertDocumentsToChangeCollection(
boost::optional<ChangeCollectionPurgingJobMetadata>
ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(
- OperationContext* opCtx, const CollectionPtr* changeCollection) {
+ OperationContext* opCtx, const ScopedCollectionAcquisition& changeCollection) {
auto findWallTimeAndRecordIdForFirstDocument = [&](InternalPlanner::Direction direction)
-> boost::optional<std::pair<long long, RecordId>> {
BSONObj currChangeDoc;
RecordId currRecordId;
auto scanExecutor = InternalPlanner::collectionScan(
- opCtx, changeCollection, PlanYieldPolicy::YieldPolicy::YIELD_AUTO, direction);
+ opCtx, &changeCollection, PlanYieldPolicy::YieldPolicy::YIELD_AUTO, direction);
switch (scanExecutor->getNext(&currChangeDoc, &currRecordId)) {
case PlanExecutor::IS_EOF:
return boost::none;
@@ -479,7 +480,7 @@ ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(
size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments(
OperationContext* opCtx,
- const CollectionPtr* changeCollection,
+ const ScopedCollectionAcquisition& changeCollection,
RecordIdBound maxRecordIdBound,
Date_t expirationTime) {
auto params = std::make_unique<DeleteStageParams>();
@@ -489,7 +490,7 @@ size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocume
LTEMatchExpression filter{"wall"_sd, Value(expirationTime)};
auto deleteExecutor = InternalPlanner::deleteWithCollectionScan(
opCtx,
- &(*changeCollection),
+ changeCollection,
std::move(params),
PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
InternalPlanner::Direction::FORWARD,
diff --git a/src/mongo/db/change_stream_change_collection_manager.h b/src/mongo/db/change_stream_change_collection_manager.h
index f398824069a..9d7f3277d44 100644
--- a/src/mongo/db/change_stream_change_collection_manager.h
+++ b/src/mongo/db/change_stream_change_collection_manager.h
@@ -33,6 +33,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
namespace mongo {
@@ -199,16 +200,17 @@ public:
*/
static boost::optional<ChangeCollectionPurgingJobMetadata>
getChangeCollectionPurgingJobMetadata(OperationContext* opCtx,
- const CollectionPtr* changeCollection);
+ const ScopedCollectionAcquisition& changeCollection);
/** Removes documents from a change collection whose wall time is less than the
* 'expirationTime'. Returns the number of documents deleted. The 'maxRecordIdBound' is the
* maximum record id bound that will not be included in the collection scan.
*/
- static size_t removeExpiredChangeCollectionsDocuments(OperationContext* opCtx,
- const CollectionPtr* changeCollection,
- RecordIdBound maxRecordIdBound,
- Date_t expirationTime);
+ static size_t removeExpiredChangeCollectionsDocuments(
+ OperationContext* opCtx,
+ const ScopedCollectionAcquisition& changeCollection,
+ RecordIdBound maxRecordIdBound,
+ Date_t expirationTime);
private:
// Change collections purging job stats.
diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.cpp b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
index e394f68c0cd..6dd41a8e8f2 100644
--- a/src/mongo/db/change_stream_pre_images_collection_manager.cpp
+++ b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/storage_interface.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/idle_thread_block.h"
@@ -63,7 +64,7 @@ const auto getPreImagesCollectionManager =
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> getDeleteExpiredPreImagesExecutor(
OperationContext* opCtx,
- const CollectionPtr& preImageColl,
+ const ScopedCollectionAcquisition& preImageColl,
const MatchExpression* filterPtr,
Timestamp maxRecordIdTimestamp,
UUID currentCollectionUUID) {
@@ -80,7 +81,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> getDeleteExpiredPreImagesEx
return InternalPlanner::deleteWithCollectionScan(
opCtx,
- &preImageColl,
+ preImageColl,
std::move(params),
PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
InternalPlanner::Direction::FORWARD,
@@ -346,7 +347,7 @@ void ChangeStreamPreImagesCollectionManager::performExpiredChangeStreamPreImages
size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImagesCommon(
OperationContext* opCtx,
- const CollectionPtr& preImageColl,
+ const ScopedCollectionAcquisition& preImageColl,
const MatchExpression* filterPtr,
Timestamp maxRecordIdTimestamp) {
size_t numberOfRemovals = 0;
@@ -356,8 +357,9 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImag
// collection being examined.
Date_t firstDocWallTime{};
- while ((currentCollectionUUID = findNextCollectionUUID(
- opCtx, &preImageColl, currentCollectionUUID, firstDocWallTime))) {
+ while (
+ (currentCollectionUUID = findNextCollectionUUID(
+ opCtx, &preImageColl.getCollectionPtr(), currentCollectionUUID, firstDocWallTime))) {
writeConflictRetry(
opCtx,
"ChangeStreamExpiredPreImagesRemover",
@@ -383,11 +385,16 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImag
size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImages(
OperationContext* opCtx, Date_t currentTimeForTimeBasedExpiration) {
// Acquire intent-exclusive lock on the change collection.
- AutoGetCollection preImageColl(
- opCtx, NamespaceString::makePreImageCollectionNSS(boost::none), MODE_IX);
+ const auto preImageColl = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(NamespaceString::makePreImageCollectionNSS(boost::none),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
// Early exit if the collection doesn't exist or running on a secondary.
- if (!preImageColl ||
+ if (!preImageColl.exists() ||
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
opCtx, DatabaseName::kConfig.toString())) {
return 0;
@@ -411,14 +418,14 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImag
// RecordId for this collection. Whether the pre-image has to be deleted will be determined
// by the 'filter' parameter.
return _deleteExpiredChangeStreamPreImagesCommon(
- opCtx, *preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */);
+ opCtx, preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */);
}
// 'preImageExpirationTime' is not set, so the last expired pre-image timestamp is less than
// 'currentEarliestOplogEntryTs'.
return _deleteExpiredChangeStreamPreImagesCommon(
opCtx,
- *preImageColl,
+ preImageColl,
nullptr /* filterPtr */,
Timestamp(currentEarliestOplogEntryTs.asULL() - 1) /* maxRecordIdTimestamp */);
}
@@ -427,13 +434,18 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImag
OperationContext* opCtx, const TenantId& tenantId, Date_t currentTimeForTimeBasedExpiration) {
// Acquire intent-exclusive lock on the change collection.
- AutoGetCollection preImageColl(opCtx,
- NamespaceString::makePreImageCollectionNSS(
- change_stream_serverless_helpers::resolveTenantId(tenantId)),
- MODE_IX);
+ const auto preImageColl =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString::makePreImageCollectionNSS(
+ change_stream_serverless_helpers::resolveTenantId(tenantId)),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
// Early exit if the collection doesn't exist or running on a secondary.
- if (!preImageColl ||
+ if (!preImageColl.exists() ||
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
opCtx, DatabaseName::kConfig.toString())) {
return 0;
@@ -447,7 +459,7 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredChangeStreamPreImag
// Set the 'maxRecordIdTimestamp' parameter (upper scan boundary) to maximum possible. Whether
// the pre-image has to be deleted will be determined by the 'filter' parameter.
return _deleteExpiredChangeStreamPreImagesCommon(
- opCtx, *preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */);
+ opCtx, preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */);
}
} // namespace mongo
diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.h b/src/mongo/db/change_stream_pre_images_collection_manager.h
index e30c49079c2..6e2876a19f3 100644
--- a/src/mongo/db/change_stream_pre_images_collection_manager.h
+++ b/src/mongo/db/change_stream_pre_images_collection_manager.h
@@ -35,6 +35,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/pipeline/change_stream_preimage_gen.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/tenant_id.h"
#include "mongo/util/background.h"
@@ -170,10 +171,11 @@ private:
* | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 1 |
* +-------------------+ +-------------------+ +-------------------+ +-------------------+
*/
- size_t _deleteExpiredChangeStreamPreImagesCommon(OperationContext* opCtx,
- const CollectionPtr& preImageColl,
- const MatchExpression* filterPtr,
- Timestamp maxRecordIdTimestamp);
+ size_t _deleteExpiredChangeStreamPreImagesCommon(
+ OperationContext* opCtx,
+ const ScopedCollectionAcquisition& preImageColl,
+ const MatchExpression* filterPtr,
+ Timestamp maxRecordIdTimestamp);
/**
* Removes expired pre-images in a single tenant enviornment.
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 4ee472f8349..1ed78e6a7b2 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -33,6 +33,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/catalog/collection_yield_restore.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
@@ -364,23 +365,27 @@ void CmdFindAndModify::Invocation::explain(OperationContext* opCtx,
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection collection(opCtx, nss, MODE_IX);
+ const auto collection =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, nss, AcquisitionPrerequisites::OperationType::kWrite),
+ MODE_IX);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName.toStringForErrorMsg() << " does not exist",
- collection.getDb());
+ DatabaseHolder::get(opCtx)->getDb(opCtx, nss.dbName()));
- ParsedDelete parsedDelete(opCtx, &deleteRequest, collection.getCollection());
+ ParsedDelete parsedDelete(opCtx, &deleteRequest, collection.getCollectionPtr());
uassertStatusOK(parsedDelete.parseRequest());
CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss)
->checkShardVersionOrThrow(opCtx);
- const auto exec = uassertStatusOK(
- getExecutorDelete(opDebug, &collection.getCollection(), &parsedDelete, verbosity));
+ const auto exec =
+ uassertStatusOK(getExecutorDelete(opDebug, collection, &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(
- exec.get(), collection.getCollection(), verbosity, BSONObj(), cmdObj, &bodyBuilder);
+ exec.get(), collection.getCollectionPtr(), verbosity, BSONObj(), cmdObj, &bodyBuilder);
} else {
auto updateRequest = UpdateRequest();
updateRequest.setNamespaceString(nss);
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index 157cbb3a5fe..53b50e5ceb6 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -761,13 +761,16 @@ public:
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- AutoGetCollection collection(opCtx, deleteRequest.getNsString(), MODE_IX);
-
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, deleteRequest.getNsString(), AcquisitionPrerequisites::kWrite),
+ MODE_IX);
if (isRequestToTimeseries) {
uassert(ErrorCodes::NamespaceNotFound,
"Could not find time-series buckets collection for write explain",
- *collection);
- auto timeseriesOptions = collection->getTimeseriesOptions();
+ collection.exists());
+ auto timeseriesOptions = collection.getCollectionPtr()->getTimeseriesOptions();
uassert(ErrorCodes::InvalidOptions,
"Time-series buckets collection is missing time-series options",
timeseriesOptions);
@@ -780,17 +783,15 @@ public:
}
ParsedDelete parsedDelete(
- opCtx, &deleteRequest, collection.getCollection(), isRequestToTimeseries);
+ opCtx, &deleteRequest, collection.getCollectionPtr(), isRequestToTimeseries);
uassertStatusOK(parsedDelete.parseRequest());
// Explain the plan tree.
- auto exec = uassertStatusOK(getExecutorDelete(&CurOp::get(opCtx)->debug(),
- &collection.getCollection(),
- &parsedDelete,
- verbosity));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &CurOp::get(opCtx)->debug(), collection, &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(),
- collection.getCollection(),
+ collection.getCollectionPtr(),
verbosity,
BSONObj(),
_commandObj,
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index b5327b458af..4e552e01dbe 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -351,14 +351,10 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
return kpBuilder.obj();
}
-void Helpers::emptyCollection(OperationContext* opCtx, const NamespaceString& nss) {
- OldClientContext context(opCtx, nss);
+void Helpers::emptyCollection(OperationContext* opCtx, const ScopedCollectionAcquisition& coll) {
+ OldClientContext context(opCtx, coll.nss());
repl::UnreplicatedWritesBlock uwb(opCtx);
- CollectionPtr collection = CollectionPtr(
- context.db() ? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)
- : nullptr);
-
- deleteObjects(opCtx, collection, nss, BSONObj(), false);
+ deleteObjects(opCtx, coll, BSONObj(), false);
}
bool Helpers::findByIdAndNoopUpdate(OperationContext* opCtx,
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index c620351824a..90dbb5d88e4 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -40,6 +40,7 @@ class CollectionPtr;
class Database;
class OperationContext;
class FindCommandRequest;
+class ScopedCollectionAcquisition;
/**
* db helpers are helper functions and classes that let us easily manipulate the local
@@ -176,7 +177,7 @@ struct Helpers {
* You do not need to set the database before calling.
* Does not oplog the operation.
*/
- static void emptyCollection(OperationContext* opCtx, const NamespaceString& nss);
+ static void emptyCollection(OperationContext* opCtx, const ScopedCollectionAcquisition& coll);
/*
* Finds the doc and then runs a no-op update by running an update using the doc just read. Used
diff --git a/src/mongo/db/exec/batched_delete_stage.cpp b/src/mongo/db/exec/batched_delete_stage.cpp
index 0d917f69e9d..91d25a57d95 100644
--- a/src/mongo/db/exec/batched_delete_stage.cpp
+++ b/src/mongo/db/exec/batched_delete_stage.cpp
@@ -132,7 +132,7 @@ BatchedDeleteStage::BatchedDeleteStage(
std::unique_ptr<DeleteStageParams> params,
std::unique_ptr<BatchedDeleteStageParams> batchedDeleteParams,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child)
: DeleteStage::DeleteStage(
kStageType.rawData(), expCtx, std::move(params), ws, collection, child),
diff --git a/src/mongo/db/exec/batched_delete_stage.h b/src/mongo/db/exec/batched_delete_stage.h
index 511eacee901..823346d5a71 100644
--- a/src/mongo/db/exec/batched_delete_stage.h
+++ b/src/mongo/db/exec/batched_delete_stage.h
@@ -107,7 +107,7 @@ public:
std::unique_ptr<DeleteStageParams> params,
std::unique_ptr<BatchedDeleteStageParams> batchedDeleteParams,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child);
~BatchedDeleteStage();
diff --git a/src/mongo/db/exec/delete_stage.cpp b/src/mongo/db/exec/delete_stage.cpp
index 534e5b59a0c..d8e48013d99 100644
--- a/src/mongo/db/exec/delete_stage.cpp
+++ b/src/mongo/db/exec/delete_stage.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log.h"
#include "mongo/util/scopeguard.h"
@@ -75,7 +76,7 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams* params) {
DeleteStage::DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child)
: DeleteStage(kStageType.rawData(), expCtx, std::move(params), ws, collection, child) {}
@@ -83,12 +84,12 @@ DeleteStage::DeleteStage(const char* stageType,
ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child)
- : RequiresMutableCollectionStage(stageType, expCtx, collection),
+ : RequiresMutableCollectionStage(stageType, expCtx, collection.getCollectionPtr()),
_params(std::move(params)),
_ws(ws),
- _preWriteFilter(opCtx(), collection->ns()),
+ _preWriteFilter(opCtx(), collection.nss()),
_idRetrying(WorkingSet::INVALID_ID),
_idReturning(WorkingSet::INVALID_ID) {
_children.emplace_back(child);
diff --git a/src/mongo/db/exec/delete_stage.h b/src/mongo/db/exec/delete_stage.h
index bf9b1e9cc52..eaa91728693 100644
--- a/src/mongo/db/exec/delete_stage.h
+++ b/src/mongo/db/exec/delete_stage.h
@@ -33,6 +33,7 @@
#include "mongo/db/exec/write_stage_common.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/session/logical_session_id.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/remove_saver.h"
namespace mongo {
@@ -111,14 +112,14 @@ public:
DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child);
DeleteStage(const char* stageType,
ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
PlanStage* child);
bool isEOF();
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 663dd912bd7..5ca15fb14fc 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -60,6 +60,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/plan_executor_factory.h"
+#include "mongo/db/shard_role.h"
namespace mongo {
@@ -153,13 +154,15 @@ public:
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
// Make sure the collection is valid.
- const auto& collection = autoColl.getCollection();
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Couldn't find collection " << nss.toStringForErrorMsg(),
- collection);
+ collection.exists());
// Pull out the plan
BSONElement planElt = argObj["plan"];
@@ -178,7 +181,7 @@ public:
// Add a fetch at the top for the user so we can get obj back for sure.
unique_ptr<PlanStage> rootFetch = std::make_unique<FetchStage>(
- expCtx.get(), ws.get(), std::move(userRoot), nullptr, collection);
+ expCtx.get(), ws.get(), std::move(userRoot), nullptr, collection.getCollectionPtr());
auto statusWithPlanExecutor =
plan_executor_factory::make(expCtx,
@@ -203,11 +206,12 @@ public:
}
PlanStage* parseQuery(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
BSONObj obj,
WorkingSet* workingSet,
const NamespaceString& nss,
std::vector<std::unique_ptr<MatchExpression>>* exprs) {
+ const auto& collectionPtr = collection.getCollectionPtr();
OperationContext* opCtx = expCtx->opCtx;
BSONElement firstElt = obj.firstElement();
@@ -234,7 +238,7 @@ public:
auto statusWithMatcher =
MatchExpressionParser::parse(argObj,
expCtx,
- ExtensionsCallbackReal(opCtx, &collection->ns()),
+ ExtensionsCallbackReal(opCtx, &collection.nss()),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!statusWithMatcher.isOK()) {
return nullptr;
@@ -262,7 +266,7 @@ public:
// This'll throw if it's not an obj but that's OK.
BSONObj keyPatternObj = keyPatternElement.Obj();
std::vector<const IndexDescriptor*> indexes;
- collection->getIndexCatalog()->findIndexesByKeyPattern(
+ collectionPtr->getIndexCatalog()->findIndexesByKeyPattern(
opCtx, keyPatternObj, IndexCatalog::InclusionPolicy::kReady, &indexes);
uassert(16890,
str::stream() << "Can't find index: " << keyPatternObj,
@@ -279,20 +283,20 @@ public:
str::stream() << "Index 'name' must be a string in: " << nodeArgs,
nodeArgs["name"].type() == BSONType::String);
StringData name = nodeArgs["name"].valueStringData();
- desc = collection->getIndexCatalog()->findIndexByName(opCtx, name);
+ desc = collectionPtr->getIndexCatalog()->findIndexByName(opCtx, name);
uassert(40223, str::stream() << "Can't find index: " << name.toString(), desc);
}
- IndexScanParams params(opCtx, collection, desc);
+ IndexScanParams params(opCtx, collectionPtr, desc);
params.bounds.isSimpleRange = true;
params.bounds.startKey = BSONObj::stripFieldNames(nodeArgs["startKey"].Obj());
params.bounds.endKey = BSONObj::stripFieldNames(nodeArgs["endKey"].Obj());
params.bounds.boundInclusion = IndexBounds::makeBoundInclusionFromBoundBools(
nodeArgs["startKeyInclusive"].Bool(), nodeArgs["endKeyInclusive"].Bool());
params.direction = nodeArgs["direction"].numberInt();
- params.shouldDedup = desc->getEntry()->isMultikey(opCtx, collection);
+ params.shouldDedup = desc->getEntry()->isMultikey(opCtx, collectionPtr);
- return new IndexScan(expCtx.get(), collection, params, workingSet, matcher);
+ return new IndexScan(expCtx.get(), collectionPtr, params, workingSet, matcher);
} else if ("andHash" == nodeName) {
uassert(
16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
@@ -370,7 +374,7 @@ public:
"Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
return new FetchStage(
- expCtx.get(), workingSet, std::move(subNode), matcher, collection);
+ expCtx.get(), workingSet, std::move(subNode), matcher, collectionPtr);
} else if ("limit" == nodeName) {
uassert(16937,
"Limit stage doesn't have a filter (put it on the child)",
@@ -411,7 +415,7 @@ public:
params.direction = CollectionScanParams::BACKWARD;
}
- return new CollectionScan(expCtx.get(), collection, params, workingSet, matcher);
+ return new CollectionScan(expCtx.get(), collectionPtr, params, workingSet, matcher);
} else if ("mergeSort" == nodeName) {
uassert(
16971, "Nodes argument must be provided to sort", nodeArgs["nodes"].isABSONObj());
diff --git a/src/mongo/db/global_index.cpp b/src/mongo/db/global_index.cpp
index caeb38d3745..a31e7e3889c 100644
--- a/src/mongo/db/global_index.cpp
+++ b/src/mongo/db/global_index.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/op_observer/op_observer.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/db/transaction/retryable_writes_stats.h"
#include "mongo/db/transaction/transaction_participant.h"
@@ -252,7 +253,7 @@ void insertKey(OperationContext* opCtx,
}
void deleteKey(OperationContext* opCtx,
- const CollectionPtr& container,
+ const ScopedCollectionAcquisition& container,
const BSONObj& key,
const BSONObj& docKey) {
const auto indexEntry = buildIndexEntry(key, docKey);
@@ -268,7 +269,7 @@ void deleteKey(OperationContext* opCtx,
// is why we delete using a collection scan.
auto planExecutor = InternalPlanner::deleteWithCollectionScan(
opCtx,
- &container,
+ container,
std::move(deleteStageParams),
PlanYieldPolicy::YieldPolicy::NO_YIELD,
InternalPlanner::FORWARD,
@@ -283,7 +284,7 @@ void deleteKey(OperationContext* opCtx,
// Return error if no document has been found or if the associated "key" does not match the key
// provided as parameter.
uassert(ErrorCodes::KeyNotFound,
- str::stream() << "Global index container with UUID " << container->uuid()
+ str::stream() << "Global index container with UUID " << container.uuid()
<< " does not contain specified entry. key:" << key
<< ", docKey:" << docKey,
execState == PlanExecutor::ExecState::ADVANCED &&
@@ -302,16 +303,18 @@ void deleteKey(OperationContext* opCtx,
writeConflictRetry(opCtx, "deleteGlobalIndexKey", ns.toString(), [&] {
WriteUnitOfWork wuow(opCtx);
- AutoGetCollection autoColl(opCtx, ns, MODE_IX);
- auto& container = autoColl.getCollection();
+ const auto coll = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(opCtx, ns, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
uassert(6924201,
str::stream() << "Global index container with UUID " << indexUUID
<< " does not exist.",
- container);
+ coll.exists());
{
repl::UnreplicatedWritesBlock unreplicatedWrites(opCtx);
- deleteKey(opCtx, container, key, docKey);
+ deleteKey(opCtx, coll, key, docKey);
}
opCtx->getServiceContext()->getOpObserver()->onDeleteGlobalIndexKey(
diff --git a/src/mongo/db/global_index.h b/src/mongo/db/global_index.h
index c4a2718c8ed..d04ac043ab6 100644
--- a/src/mongo/db/global_index.h
+++ b/src/mongo/db/global_index.h
@@ -33,6 +33,10 @@
#include "mongo/db/operation_context.h"
#include "mongo/util/uuid.h"
+namespace mongo {
+class ScopedCollectionAcquisition;
+}
+
namespace mongo::global_index {
// The container (collection) fields of an index key. The document key is stored as a BSON object.
@@ -94,7 +98,7 @@ void deleteKey(OperationContext* opCtx,
* the above, this variant requires the call to be wrapped inside a writeConflictRetry.
*/
void deleteKey(OperationContext* opCtx,
- const CollectionPtr& container,
+ const ScopedCollectionAcquisition& container,
const BSONObj& key,
const BSONObj& docKey);
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index b3cf1606b84..2d687a41a97 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -35,40 +35,40 @@
#include "mongo/db/ops/parsed_delete.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/shard_role.h"
namespace mongo {
long long deleteObjects(OperationContext* opCtx,
- const CollectionPtr& collection,
- const NamespaceString& ns,
+ const ScopedCollectionAcquisition& collection,
BSONObj pattern,
bool justOne,
bool god,
bool fromMigrate) {
auto request = DeleteRequest{};
- request.setNsString(ns);
+ request.setNsString(collection.nss());
request.setQuery(pattern);
request.setMulti(!justOne);
request.setGod(god);
request.setFromMigrate(fromMigrate);
- ParsedDelete parsedDelete(opCtx, &request, collection);
+ ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr());
uassertStatusOK(parsedDelete.parseRequest());
auto exec = uassertStatusOK(getExecutorDelete(
- &CurOp::get(opCtx)->debug(), &collection, &parsedDelete, boost::none /* verbosity */));
+ &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */));
return exec->executeDelete();
}
DeleteResult deleteObject(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
const DeleteRequest& request) {
- ParsedDelete parsedDelete(opCtx, &request, collection);
+ ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr());
uassertStatusOK(parsedDelete.parseRequest());
auto exec = uassertStatusOK(getExecutorDelete(
- &CurOp::get(opCtx)->debug(), &collection, &parsedDelete, boost::none /* verbosity */));
+ &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */));
if (!request.getReturnDeleted()) {
return {exec->executeDelete(), boost::none};
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 79a35a3879b..dbe97d19408 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -37,6 +37,7 @@ namespace mongo {
class Database;
class OperationContext;
+class ScopedCollectionAcquisition;
/**
* Deletes objects from 'collection' that match the query predicate given by 'pattern'. If 'justOne'
@@ -44,8 +45,7 @@ class OperationContext;
* not yield. If 'god' is true, deletes are allowed on system namespaces.
*/
long long deleteObjects(OperationContext* opCtx,
- const CollectionPtr& collection,
- const NamespaceString& ns,
+ const ScopedCollectionAcquisition& collection,
BSONObj pattern,
bool justOne,
bool god = false,
@@ -57,7 +57,7 @@ struct DeleteResult {
};
DeleteResult deleteObject(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
const DeleteRequest& request);
} // namespace mongo
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 880b36a8d36..a705b59b403 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -838,7 +838,7 @@ long long writeConflictRetryRemove(OperationContext* opCtx,
}
const auto exec = uassertStatusOK(
- getExecutorDelete(opDebug, &collection, &parsedDelete, boost::none /* verbosity */));
+ getExecutorDelete(opDebug, collection, &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -1566,7 +1566,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
&hangWithLockDuringBatchRemove, opCtx, "hangWithLockDuringBatchRemove");
auto exec = uassertStatusOK(getExecutorDelete(&curOp.debug(),
- &collection,
+ collection,
&parsedDelete,
boost::none /* verbosity */,
std::move(documentCounter)));
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index e8d75e67c5d..e6dd20db8ef 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1725,18 +1725,12 @@ StatusWith<std::unique_ptr<projection_ast::Projection>> makeProjection(const BSO
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
OpDebug* opDebug,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> coll,
+ const ScopedCollectionAcquisition& coll,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity,
DeleteStageParams::DocumentCounter&& documentCounter) {
- const auto& collectionPtr =
- *stdx::visit(OverloadedVisitor{
- [](const CollectionPtr* collectionPtr) { return collectionPtr; },
- [](const ScopedCollectionAcquisition* collectionAcquisition) {
- return &collectionAcquisition->getCollectionPtr();
- },
- },
- coll);
+ const auto& collectionPtr = coll.getCollectionPtr();
+
auto expCtx = parsedDelete->expCtx();
OperationContext* opCtx = expCtx->opCtx;
const DeleteRequest* request = parsedDelete->getRequest();
@@ -1799,7 +1793,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
return plan_executor_factory::make(expCtx,
std::move(ws),
std::make_unique<EOFStage>(expCtx.get()),
- &CollectionPtr::null,
+ &coll,
policy,
false, /* whether we must return owned data */
nss);
@@ -1840,12 +1834,12 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
std::make_unique<DeleteStage>(expCtx.get(),
std::move(deleteStageParams),
ws.get(),
- collectionPtr,
+ coll,
idHackStage.release());
return plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
- coll,
+ &coll,
policy,
false /* whether owned BSON must be returned */);
}
@@ -1928,11 +1922,11 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
std::move(deleteStageParams),
std::make_unique<BatchedDeleteStageParams>(),
ws.get(),
- collectionPtr,
+ coll,
root.release());
} else {
root = std::make_unique<DeleteStage>(
- expCtxRaw, std::move(deleteStageParams), ws.get(), collectionPtr, root.release());
+ expCtxRaw, std::move(deleteStageParams), ws.get(), coll, root.release());
}
if (projection) {
@@ -1945,7 +1939,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
return plan_executor_factory::make(std::move(cq),
std::move(ws),
std::move(root),
- coll,
+ &coll,
policy,
defaultPlannerOptions,
NamespaceString(),
@@ -1958,18 +1952,11 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
OpDebug* opDebug,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> coll,
+ VariantCollectionPtrOrAcquisition coll,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity,
UpdateStageParams::DocumentCounter&& documentCounter) {
- const auto& collectionPtr =
- *stdx::visit(OverloadedVisitor{
- [](const CollectionPtr* collectionPtr) { return collectionPtr; },
- [](const ScopedCollectionAcquisition* collectionAcquisition) {
- return &collectionAcquisition->getCollectionPtr();
- },
- },
- coll);
+ const auto& collectionPtr = coll.getCollectionPtr();
auto expCtx = parsedUpdate->expCtx();
OperationContext* opCtx = expCtx->opCtx;
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 24c082eaa2a..01a3ba23410 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -311,7 +311,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCoun
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
OpDebug* opDebug,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ const ScopedCollectionAcquisition& coll,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity,
DeleteStageParams::DocumentCounter&& documentCounter = nullptr);
@@ -338,7 +338,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
OpDebug* opDebug,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity,
UpdateStageParams::DocumentCounter&& documentCounter = nullptr);
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 377792561a1..9bb6acfb298 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -147,7 +147,7 @@ CollectionScanParams createCollectionScanParams(
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collectionScan(
OperationContext* opCtx,
- const CollectionPtr* coll,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
const Direction direction,
const boost::optional<RecordId>& resumeAfterRecordId,
@@ -155,17 +155,17 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
boost::optional<RecordIdBound> maxRecord,
CollectionScanParams::ScanBoundInclusion boundInclusion,
bool shouldReturnEofOnFilterMismatch) {
- const auto& collection = *coll;
- invariant(collection);
+ const auto& collectionPtr = collection.getCollectionPtr();
+ invariant(collectionPtr);
std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
auto expCtx = make_intrusive<ExpressionContext>(
- opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionPtr->ns());
auto collScanParams = createCollectionScanParams(expCtx,
ws.get(),
- coll,
+ &collectionPtr,
direction,
resumeAfterRecordId,
minRecord,
@@ -173,14 +173,14 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
boundInclusion,
shouldReturnEofOnFilterMismatch);
- auto cs = _collectionScan(expCtx, ws.get(), &collection, collScanParams);
+ auto cs = _collectionScan(expCtx, ws.get(), &collectionPtr, collScanParams);
// Takes ownership of 'ws' and 'cs'.
auto statusWithPlanExecutor =
plan_executor_factory::make(expCtx,
std::move(ws),
std::move(cs),
- &collection,
+ collection,
yieldPolicy,
false /* whether owned BSON must be returned */);
invariant(statusWithPlanExecutor.isOK());
@@ -215,7 +215,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWithCollectionScan(
OperationContext* opCtx,
- const CollectionPtr* coll,
+ const ScopedCollectionAcquisition& coll,
std::unique_ptr<DeleteStageParams> params,
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction,
@@ -225,8 +225,8 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
std::unique_ptr<BatchedDeleteStageParams> batchedDeleteParams,
const MatchExpression* filter,
bool shouldReturnEofOnFilterMismatch) {
- const auto& collection = *coll;
- invariant(collection);
+ const auto& collectionPtr = coll.getCollectionPtr();
+ invariant(collectionPtr);
if (shouldReturnEofOnFilterMismatch) {
tassert(7010801,
"MatchExpression filter must be provided when 'shouldReturnEofOnFilterMismatch' is "
@@ -236,15 +236,15 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
auto ws = std::make_unique<WorkingSet>();
auto expCtx = make_intrusive<ExpressionContext>(
- opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionPtr->ns());
- if (collection->isCapped()) {
+ if (collectionPtr->isCapped()) {
expCtx->setIsCappedDelete();
}
auto collScanParams = createCollectionScanParams(expCtx,
ws.get(),
- coll,
+ &collectionPtr,
direction,
boost::none /* resumeAfterId */,
minRecord,
@@ -252,24 +252,24 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
boundInclusion,
shouldReturnEofOnFilterMismatch);
- auto root = _collectionScan(expCtx, ws.get(), &collection, collScanParams, filter);
+ auto root = _collectionScan(expCtx, ws.get(), &collectionPtr, collScanParams, filter);
if (batchedDeleteParams) {
root = std::make_unique<BatchedDeleteStage>(expCtx.get(),
std::move(params),
std::move(batchedDeleteParams),
ws.get(),
- collection,
+ coll,
root.release());
} else {
root = std::make_unique<DeleteStage>(
- expCtx.get(), std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), coll, root.release());
}
auto executor = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
- &collection,
+ &coll,
yieldPolicy,
false /* whether owned BSON must be returned */
);
@@ -316,7 +316,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::indexScan(
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWithIndexScan(
OperationContext* opCtx,
- const CollectionPtr* coll,
+ const ScopedCollectionAcquisition& coll,
std::unique_ptr<DeleteStageParams> params,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -325,16 +325,16 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction,
std::unique_ptr<BatchedDeleteStageParams> batchedDeleteParams) {
- const auto& collection = *coll;
- invariant(collection);
+ const auto& collectionPtr = coll.getCollectionPtr();
+ invariant(collectionPtr);
auto ws = std::make_unique<WorkingSet>();
auto expCtx = make_intrusive<ExpressionContext>(
- opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionPtr->ns());
std::unique_ptr<PlanStage> root = _indexScan(expCtx,
ws.get(),
- &collection,
+ &collectionPtr,
descriptor,
startKey,
endKey,
@@ -347,17 +347,17 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
std::move(params),
std::move(batchedDeleteParams),
ws.get(),
- collection,
+ coll,
root.release());
} else {
root = std::make_unique<DeleteStage>(
- expCtx.get(), std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), coll, root.release());
}
auto executor = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
- &collection,
+ &coll,
yieldPolicy,
false /* whether owned BSON must be returned */
);
@@ -394,7 +394,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::shardKeyIn
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWithShardKeyIndexScan(
OperationContext* opCtx,
- const CollectionPtr* coll,
+ const ScopedCollectionAcquisition& coll,
std::unique_ptr<DeleteStageParams> params,
const ShardKeyIndex& shardKeyIdx,
const BSONObj& startKey,
@@ -413,25 +413,30 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
yieldPolicy,
direction);
}
- auto collectionScanParams = convertIndexScanParamsToCollScanParams(
- opCtx, coll, shardKeyIdx.keyPattern(), startKey, endKey, boundInclusion, direction);
-
- const auto& collection = *coll;
- invariant(collection);
+ auto collectionScanParams = convertIndexScanParamsToCollScanParams(opCtx,
+ &coll.getCollectionPtr(),
+ shardKeyIdx.keyPattern(),
+ startKey,
+ endKey,
+ boundInclusion,
+ direction);
+
+ const auto& collectionPtr = coll.getCollectionPtr();
+ invariant(collectionPtr);
std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
auto expCtx = make_intrusive<ExpressionContext>(
- opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionPtr->ns());
- auto root = _collectionScan(expCtx, ws.get(), &collection, collectionScanParams);
+ auto root = _collectionScan(expCtx, ws.get(), &collectionPtr, collectionScanParams);
root = std::make_unique<DeleteStage>(
- expCtx.get(), std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), coll, root.release());
auto executor = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
- &collection,
+ &coll,
yieldPolicy,
false /* whether owned BSON must be returned */
);
@@ -441,19 +446,12 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWithIdHack(
OperationContext* opCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> coll,
+ VariantCollectionPtrOrAcquisition coll,
const UpdateStageParams& params,
const IndexDescriptor* descriptor,
const BSONObj& key,
PlanYieldPolicy::YieldPolicy yieldPolicy) {
- const auto& collectionPtr =
- *stdx::visit(OverloadedVisitor{
- [](const CollectionPtr* collectionPtr) { return collectionPtr; },
- [](const ScopedCollectionAcquisition* collectionAcquisition) {
- return &collectionAcquisition->getCollectionPtr();
- },
- },
- coll);
+ const auto& collectionPtr = coll.getCollectionPtr();
invariant(collectionPtr);
auto ws = std::make_unique<WorkingSet>();
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index e9a17a4bb33..96bfbdd3da7 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -37,6 +37,7 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/record_id.h"
#include "mongo/db/s/shard_key_index_util.h"
+#include "mongo/db/shard_role.h"
namespace mongo {
@@ -77,7 +78,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> collectionScan(
OperationContext* opCtx,
- const CollectionPtr* collection,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction = FORWARD,
const boost::optional<RecordId>& resumeAfterRecordId = boost::none,
@@ -99,7 +100,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> deleteWithCollectionScan(
OperationContext* opCtx,
- const CollectionPtr* collection,
+ const ScopedCollectionAcquisition& collection,
std::unique_ptr<DeleteStageParams> deleteStageParams,
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction = FORWARD,
@@ -131,7 +132,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> deleteWithIndexScan(
OperationContext* opCtx,
- const CollectionPtr* collection,
+ const ScopedCollectionAcquisition& collection,
std::unique_ptr<DeleteStageParams> params,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -165,7 +166,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> deleteWithShardKeyIndexScan(
OperationContext* opCtx,
- const CollectionPtr* collection,
+ const ScopedCollectionAcquisition& collection,
std::unique_ptr<DeleteStageParams> params,
const ShardKeyIndex& shardKeyIdx,
const BSONObj& startKey,
@@ -179,7 +180,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> updateWithIdHack(
OperationContext* opCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
const UpdateStageParams& params,
const IndexDescriptor* descriptor,
const BSONObj& key,
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 99b2fd8fefa..abf05440b9d 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -30,6 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/shard_role.h"
#include "mongo/util/fail_point.h"
@@ -59,4 +60,14 @@ void PlanExecutor::checkFailPointPlanExecAlwaysFails() {
}
}
+const CollectionPtr& VariantCollectionPtrOrAcquisition::getCollectionPtr() const {
+ return *stdx::visit(OverloadedVisitor{
+ [](const CollectionPtr* collectionPtr) { return collectionPtr; },
+ [](const ScopedCollectionAcquisition* collectionAcquisition) {
+ return &collectionAcquisition->getCollectionPtr();
+ },
+ },
+ _collectionPtrOrAcquisition);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 77d6a8c4935..2d760e7f5f0 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -41,9 +41,30 @@
namespace mongo {
+class CollectionPtr;
class BSONObj;
class PlanStage;
class RecordId;
+class ScopedCollectionAcquisition;
+
+// TODO: SERVER-76397 Remove this once we use ScopedCollectionAcquisition everywhere.
+class VariantCollectionPtrOrAcquisition {
+public:
+ VariantCollectionPtrOrAcquisition(const CollectionPtr* collectionPtr)
+ : _collectionPtrOrAcquisition(collectionPtr) {}
+ VariantCollectionPtrOrAcquisition(const ScopedCollectionAcquisition* collection)
+ : _collectionPtrOrAcquisition(collection) {}
+
+ const stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*>& get() {
+ return _collectionPtrOrAcquisition;
+ };
+
+ const CollectionPtr& getCollectionPtr() const;
+
+private:
+ stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*>
+ _collectionPtrOrAcquisition;
+};
/**
* If a getMore command specified a lastKnownCommittedOpTime (as secondaries do), we want to stop
diff --git a/src/mongo/db/query/plan_executor_factory.cpp b/src/mongo/db/query/plan_executor_factory.cpp
index 68dedc87795..1a20a44f9eb 100644
--- a/src/mongo/db/query/plan_executor_factory.cpp
+++ b/src/mongo/db/query/plan_executor_factory.cpp
@@ -51,7 +51,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<CanonicalQuery> cq,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions,
NamespaceString nss,
@@ -74,7 +74,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions,
NamespaceString nss,
@@ -99,11 +99,11 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<QuerySolution> qs,
std::unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
size_t plannerOptions,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy) {
- stdx::visit([](const auto& ptr) { dassert(ptr); }, collection);
+ stdx::visit([](const auto& ptr) { dassert(ptr); }, collection.get());
try {
auto execImpl = new PlanExecutorImpl(opCtx,
diff --git a/src/mongo/db/query/plan_executor_factory.h b/src/mongo/db/query/plan_executor_factory.h
index 9970a47015b..6ea6bbe473d 100644
--- a/src/mongo/db/query/plan_executor_factory.h
+++ b/src/mongo/db/query/plan_executor_factory.h
@@ -72,7 +72,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<CanonicalQuery> cq,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions,
NamespaceString nss = NamespaceString(),
@@ -89,7 +89,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions,
NamespaceString nss = NamespaceString(),
@@ -102,7 +102,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<QuerySolution> qs,
std::unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
+ VariantCollectionPtrOrAcquisition collection,
size_t plannerOptions,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy);
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index bdab2f383fd..e9ff3ad0c7f 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -118,17 +118,16 @@ std::unique_ptr<PlanYieldPolicy> makeYieldPolicy(
}
} // namespace
-PlanExecutorImpl::PlanExecutorImpl(
- OperationContext* opCtx,
- unique_ptr<WorkingSet> ws,
- unique_ptr<PlanStage> rt,
- unique_ptr<QuerySolution> qs,
- unique_ptr<CanonicalQuery> cq,
- const boost::intrusive_ptr<ExpressionContext>& expCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
- bool returnOwnedBson,
- NamespaceString nss,
- PlanYieldPolicy::YieldPolicy yieldPolicy)
+PlanExecutorImpl::PlanExecutorImpl(OperationContext* opCtx,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ unique_ptr<QuerySolution> qs,
+ unique_ptr<CanonicalQuery> cq,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ VariantCollectionPtrOrAcquisition collection,
+ bool returnOwnedBson,
+ NamespaceString nss,
+ PlanYieldPolicy::YieldPolicy yieldPolicy)
: _opCtx(opCtx),
_cq(std::move(cq)),
_expCtx(_cq ? _cq->getExpCtx() : expCtx),
@@ -141,12 +140,7 @@ PlanExecutorImpl::PlanExecutorImpl(
invariant(!_expCtx || _expCtx->opCtx == _opCtx);
invariant(!_cq || !_expCtx || _cq->getExpCtx() == _expCtx);
- const CollectionPtr* collectionPtr =
- stdx::visit(OverloadedVisitor{[](const CollectionPtr* coll) { return coll; },
- [](const ScopedCollectionAcquisition* coll) {
- return &coll->getCollectionPtr();
- }},
- collection);
+ const CollectionPtr* collectionPtr = &collection.getCollectionPtr();
invariant(collectionPtr);
const bool collectionExists = static_cast<bool>(*collectionPtr);
@@ -175,7 +169,7 @@ PlanExecutorImpl::PlanExecutorImpl(
PlanYieldPolicy::YieldThroughAcquisitions>(
PlanYieldPolicy::YieldThroughAcquisitions{});
}},
- collection);
+ collection.get());
_yieldPolicy = makeYieldPolicy(
this, collectionExists ? yieldPolicy : PlanYieldPolicy::YieldPolicy::NO_YIELD, yieldable);
diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h
index 3cfd4150279..51b21acb59b 100644
--- a/src/mongo/db/query/plan_executor_impl.h
+++ b/src/mongo/db/query/plan_executor_impl.h
@@ -39,11 +39,10 @@
#include "mongo/db/exec/working_set.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_solution.h"
+#include "mongo/db/shard_role.h"
namespace mongo {
-class ScopedCollectionAcquisition;
-
/**
* Query execution helper. Runs the argument function 'f'. If 'f' throws an exception other than
* 'WriteConflictException' or 'TemporarilyUnavailableException', then these exceptions escape
@@ -91,17 +90,16 @@ public:
* order to avoid depending directly on this concrete implementation of the PlanExecutor
* interface.
*/
- PlanExecutorImpl(
- OperationContext* opCtx,
- std::unique_ptr<WorkingSet> ws,
- std::unique_ptr<PlanStage> rt,
- std::unique_ptr<QuerySolution> qs,
- std::unique_ptr<CanonicalQuery> cq,
- const boost::intrusive_ptr<ExpressionContext>& expCtx,
- stdx::variant<const CollectionPtr*, const ScopedCollectionAcquisition*> collection,
- bool returnOwnedBson,
- NamespaceString nss,
- PlanYieldPolicy::YieldPolicy yieldPolicy);
+ PlanExecutorImpl(OperationContext* opCtx,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ std::unique_ptr<QuerySolution> qs,
+ std::unique_ptr<CanonicalQuery> cq,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ VariantCollectionPtrOrAcquisition collection,
+ bool returnOwnedBson,
+ NamespaceString nss,
+ PlanYieldPolicy::YieldPolicy yieldPolicy);
virtual ~PlanExecutorImpl();
CanonicalQuery* getCanonicalQuery() const final;
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index a2246464c1c..33f241fab53 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -149,9 +150,14 @@ Status _applyOps(OperationContext* opCtx,
}
}
- AutoGetCollection autoColl(
- opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX));
- if (!autoColl.getCollection()) {
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(nss,
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ fixLockModeForSystemDotViewsChanges(nss, MODE_IX));
+ if (!collection.exists()) {
// For idempotency reasons, return success on delete operations.
if (*opType == 'd') {
return Status::OK();
@@ -172,6 +178,7 @@ Status _applyOps(OperationContext* opCtx,
const bool isDataConsistent = true;
return repl::applyOperation_inlock(opCtx,
ctx.db(),
+ collection,
ApplierOperation{&entry},
alwaysUpsert,
oplogApplicationMode,
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 41f0d74072f..75cb146b6fd 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -93,6 +93,7 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_index_catalog_ddl_util.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/stats/server_write_concern_metrics.h"
#include "mongo/db/storage/storage_engine.h"
@@ -1474,6 +1475,7 @@ void logOplogConstraintViolation(OperationContext* opCtx,
// See replset initial sync code.
Status applyOperation_inlock(OperationContext* opCtx,
Database* db,
+ const ScopedCollectionAcquisition& collectionAcquisition,
const OplogEntryOrGroupedInserts& opOrGroupedInserts,
bool alwaysUpsert,
OplogApplication::Mode mode,
@@ -1506,10 +1508,9 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
NamespaceString requestNss;
- CollectionPtr collection;
if (auto uuid = op.getUuid()) {
auto catalog = CollectionCatalog::get(opCtx);
- collection = CollectionPtr(catalog->lookupCollectionByUUID(opCtx, uuid.value()));
+ const auto collection = CollectionPtr(catalog->lookupCollectionByUUID(opCtx, uuid.value()));
// Invalidate the image collection if collectionUUID does not resolve and this op returns
// a preimage or postimage. We only expect this to happen when in kInitialSync mode but
// this can sometimes occur in kRecovering mode during rollback-via-refetch. In either case
@@ -1538,16 +1539,17 @@ Status applyOperation_inlock(OperationContext* opCtx,
<< uuid.value() << "): " << redact(opOrGroupedInserts.toBSON()),
collection);
requestNss = collection->ns();
+ dassert(requestNss == collectionAcquisition.nss());
dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX));
} else {
requestNss = op.getNss();
invariant(requestNss.coll().size());
dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX),
requestNss.toStringForErrorMsg());
- collection = CollectionPtr(
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, requestNss));
}
+ const CollectionPtr& collection = collectionAcquisition.getCollectionPtr();
+
assertInitialSyncCanContinueDuringShardMerge(opCtx, requestNss, op);
BSONObj o = op.getObject();
@@ -2105,7 +2107,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
request.setReturnDeleted(true);
}
- DeleteResult result = deleteObject(opCtx, collection, request);
+ DeleteResult result = deleteObject(opCtx, collectionAcquisition, request);
if (op.getNeedsRetryImage()) {
// Even if `result.nDeleted` is 0, we want to perform a write to the
// imageCollection to advance the txnNumber/ts and invalidate the image. This
@@ -2224,7 +2226,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
global_index::deleteKey(
opCtx,
- collection,
+ collectionAcquisition,
op.getObject().getObjectField(global_index::kOplogEntryIndexKeyFieldName),
op.getObject().getObjectField(global_index::kOplogEntryDocKeyFieldName));
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index c94c562f1b0..c95902e8373 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -52,6 +52,7 @@ class Database;
class NamespaceString;
class OperationContext;
class OperationSessionInfo;
+class ScopedCollectionAcquisition;
class Session;
using OplogSlot = repl::OpTime;
@@ -224,6 +225,7 @@ void logOplogConstraintViolation(OperationContext* opCtx,
*/
Status applyOperation_inlock(OperationContext* opCtx,
Database* db,
+ const ScopedCollectionAcquisition& collectionAcquisition,
const OplogEntryOrGroupedInserts& opOrGroupedInserts,
bool alwaysUpsert,
OplogApplication::Mode mode,
diff --git a/src/mongo/db/repl/oplog_applier_utils.cpp b/src/mongo/db/repl/oplog_applier_utils.cpp
index 05c4ef0ad14..18aa2da962f 100644
--- a/src/mongo/db/repl/oplog_applier_utils.cpp
+++ b/src/mongo/db/repl/oplog_applier_utils.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/repl/oplog_applier_utils.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
#include "mongo/db/server_feature_flags_gen.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/stats/counters.h"
#include "mongo/util/fail_point.h"
@@ -409,7 +410,7 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon(
writeConflictRetry(opCtx, "applyOplogEntryOrGroupedInserts_CRUD", nss.ns(), [&] {
// Need to throw instead of returning a status for it to be properly ignored.
try {
- boost::optional<AutoGetCollection> autoColl;
+ boost::optional<ScopedCollectionAcquisition> coll;
Database* db = nullptr;
// If the collection UUID does not resolve, acquire the collection using the
@@ -420,25 +421,39 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon(
// needs to be done everywhere this situation is possible. We should try
// to consolidate this into applyOperation_inlock.
try {
- autoColl.emplace(opCtx,
- getNsOrUUID(nss, *op),
- fixLockModeForSystemDotViewsChanges(nss, MODE_IX));
- db = autoColl->getDb();
+ coll.emplace(
+ acquireCollection(opCtx,
+ {getNsOrUUID(nss, *op),
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite},
+ fixLockModeForSystemDotViewsChanges(nss, MODE_IX)));
+
+ AutoGetDb autoDb(opCtx, coll->nss().dbName(), MODE_IX);
+ db = autoDb.getDb();
} catch (ExceptionFor<ErrorCodes::NamespaceNotFound>& ex) {
if (!isDataConsistent) {
- autoColl.emplace(
- opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX));
- db = autoColl->ensureDbExists(opCtx);
+ coll.emplace(acquireCollection(
+ opCtx,
+ {nss,
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite},
+ fixLockModeForSystemDotViewsChanges(nss, MODE_IX)));
+
+ AutoGetDb autoDb(opCtx, coll->nss().dbName(), MODE_IX);
+ db = autoDb.ensureDbExists(opCtx);
} else {
throw ex;
}
}
+ invariant(coll);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "missing database ("
<< nss.dbName().toStringForErrorMsg() << ")",
db);
- OldClientContext ctx(opCtx, autoColl->getNss(), db);
+ OldClientContext ctx(opCtx, coll->nss(), db);
// We convert updates to upserts in secondary mode when the
// oplogApplicationEnforcesSteadyStateConstraints parameter is false, to avoid
@@ -452,6 +467,7 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon(
oplogApplicationMode == OplogApplication::Mode::kSecondary;
Status status = applyOperation_inlock(opCtx,
db,
+ *coll,
entryOrGroupedInserts,
shouldAlwaysUpsert,
oplogApplicationMode,
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index eea24c1ce0e..427c05f6dbd 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -93,6 +93,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/session/kill_sessions_local.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/control/journal_flusher.h"
#include "mongo/db/storage/flow_control.h"
#include "mongo/db/storage/storage_engine.h"
@@ -621,8 +622,15 @@ Status ReplicationCoordinatorExternalStateImpl::replaceLocalConfigDocument(
writeConflictRetry(
opCtx, "replace replica set config", NamespaceString::kSystemReplSetNamespace.ns(), [&] {
WriteUnitOfWork wuow(opCtx);
- AutoGetCollection coll(opCtx, NamespaceString::kSystemReplSetNamespace, MODE_X);
- Helpers::emptyCollection(opCtx, NamespaceString::kSystemReplSetNamespace);
+ const auto coll =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kSystemReplSetNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_X);
+ Helpers::emptyCollection(opCtx, coll);
Helpers::putSingleton(opCtx, NamespaceString::kSystemReplSetNamespace, config);
wuow.commit();
});
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index a88ee9c48b9..18d50cf69ac 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -70,6 +70,7 @@
#include "mongo/db/s/shard_identity_rollback_notifier.h"
#include "mongo/db/session/logical_session_id.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/control/journal_flusher.h"
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/transaction/transaction_participant.h"
@@ -1623,7 +1624,12 @@ void syncFixUp(OperationContext* opCtx,
const NamespaceString docNss(doc.ns);
Lock::DBLock docDbLock(opCtx, docNss.dbName(), MODE_X);
OldClientContext ctx(opCtx, docNss);
- CollectionWriter collection(opCtx, uuid);
+ auto collection = acquireCollection(opCtx,
+ {NamespaceStringOrUUID(docNss.dbName(), uuid),
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite},
+ MODE_X);
// Adds the doc to our rollback file if the collection was not dropped while
// rolling back createCollection operations. Does not log an error when
@@ -1631,9 +1637,10 @@ void syncFixUp(OperationContext* opCtx,
// the collection was dropped as part of rolling back a createCollection
// command and the document no longer exists.
- if (collection && removeSaver) {
+ if (collection.exists() && removeSaver) {
BSONObj obj;
- bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj);
+ bool found =
+ Helpers::findOne(opCtx, collection.getCollectionPtr(), pattern, obj);
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
@@ -1672,8 +1679,8 @@ void syncFixUp(OperationContext* opCtx,
// here.
deletes++;
- if (collection) {
- if (collection->isCapped()) {
+ if (collection.exists()) {
+ if (collection.getCollectionPtr()->isCapped()) {
// Can't delete from a capped collection - so we truncate instead.
// if this item must go, so must all successors.
@@ -1684,7 +1691,8 @@ void syncFixUp(OperationContext* opCtx,
const auto clock = opCtx->getServiceContext()->getFastClockSource();
const auto findOneStart = clock->now();
- RecordId loc = Helpers::findOne(opCtx, collection.get(), pattern);
+ RecordId loc =
+ Helpers::findOne(opCtx, collection.getCollectionPtr(), pattern);
if (clock->now() - findOneStart > Milliseconds(200))
LOGV2_WARNING(
21726,
@@ -1698,19 +1706,24 @@ void syncFixUp(OperationContext* opCtx,
writeConflictRetry(
opCtx,
"cappedTruncateAfter",
- collection->ns().ns(),
+ collection.nss().ns(),
[&] {
collection_internal::cappedTruncateAfter(
- opCtx, collection.get(), loc, true);
+ opCtx,
+ collection.getCollectionPtr(),
+ loc,
+ true);
});
} catch (const DBException& e) {
if (e.code() == 13415) {
// hack: need to just make cappedTruncate do this...
+ CollectionWriter collectionWriter(opCtx, &collection);
writeConflictRetry(
- opCtx, "truncate", collection->ns().ns(), [&] {
+ opCtx, "truncate", collection.nss().ns(), [&] {
WriteUnitOfWork wunit(opCtx);
uassertStatusOK(
- collection.getWritableCollection(opCtx)
+ collectionWriter
+ .getWritableCollection(opCtx)
->truncate(opCtx));
wunit.commit();
});
@@ -1737,8 +1750,7 @@ void syncFixUp(OperationContext* opCtx,
}
} else {
deleteObjects(opCtx,
- collection.get(),
- *nss,
+ collection,
pattern,
true, // justOne
true); // god
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index a84e0aef113..01e9a24cbea 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -74,6 +74,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/rollback_gen.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/checkpointer.h"
#include "mongo/db/storage/control/journal_flusher.h"
#include "mongo/db/storage/control/storage_control.h"
@@ -675,13 +676,17 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
using Result = StatusWith<std::vector<BSONObj>>;
auto collectionAccessMode = isFind ? MODE_IS : MODE_IX;
- AutoGetCollection autoColl(opCtx, nsOrUUID, collectionAccessMode);
- auto collectionResult = getCollection(
- autoColl, nsOrUUID, str::stream() << "Unable to proceed with " << opStr << ".");
- if (!collectionResult.isOK()) {
- return Result(collectionResult.getStatus());
+ const auto collection =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, nsOrUUID, AcquisitionPrerequisites::kWrite),
+ collectionAccessMode);
+ if (!collection.exists()) {
+ return Status{ErrorCodes::NamespaceNotFound,
+ str::stream()
+ << "Collection [" << nsOrUUID.toString() << "] not found. "
+ << "Unable to proceed with " << opStr << "."};
}
- const auto& collection = *collectionResult.getValue();
auto isForward = scanDirection == StorageInterface::ScanDirection::kForward;
auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD;
@@ -700,16 +705,19 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
}
// Use collection scan.
planExecutor = isFind
- ? InternalPlanner::collectionScan(
- opCtx, &collection, PlanYieldPolicy::YieldPolicy::NO_YIELD, direction)
+ ? InternalPlanner::collectionScan(opCtx,
+ &collection.getCollectionPtr(),
+ PlanYieldPolicy::YieldPolicy::NO_YIELD,
+ direction)
: InternalPlanner::deleteWithCollectionScan(
opCtx,
- &collection,
+ collection,
makeDeleteStageParamsForDeleteDocuments(),
PlanYieldPolicy::YieldPolicy::NO_YIELD,
direction);
- } else if (*indexName == kIdIndexName && collection->isClustered() &&
- collection->getClusteredInfo()
+ } else if (*indexName == kIdIndexName && collection.getCollectionPtr()->isClustered() &&
+ collection.getCollectionPtr()
+ ->getClusteredInfo()
->getIndexSpec()
.getKey()
.firstElement()
@@ -752,7 +760,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
planExecutor = isFind
? InternalPlanner::collectionScan(opCtx,
- &collection,
+ &collection.getCollectionPtr(),
PlanYieldPolicy::YieldPolicy::NO_YIELD,
direction,
boost::none /* resumeAfterId */,
@@ -761,7 +769,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
collScanBoundInclusion)
: InternalPlanner::deleteWithCollectionScan(
opCtx,
- &collection,
+ collection,
makeDeleteStageParamsForDeleteDocuments(),
PlanYieldPolicy::YieldPolicy::NO_YIELD,
direction,
@@ -770,7 +778,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
collScanBoundInclusion);
} else {
// Use index scan.
- auto indexCatalog = collection->getIndexCatalog();
+ auto indexCatalog = collection.getCollectionPtr()->getIndexCatalog();
invariant(indexCatalog);
const IndexDescriptor* indexDescriptor = indexCatalog->findIndexByName(
opCtx, *indexName, IndexCatalog::InclusionPolicy::kReady);
@@ -801,7 +809,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
}
planExecutor = isFind
? InternalPlanner::indexScan(opCtx,
- &collection,
+ &collection.getCollectionPtr(),
indexDescriptor,
bounds.first,
bounds.second,
@@ -811,7 +819,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
InternalPlanner::IXSCAN_FETCH)
: InternalPlanner::deleteWithIndexScan(
opCtx,
- &collection,
+ collection,
makeDeleteStageParamsForDeleteDocuments(),
indexDescriptor,
bounds.first,
@@ -1135,28 +1143,28 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
request.setGod(true);
return writeConflictRetry(opCtx, "StorageInterfaceImpl::deleteByFilter", nss.ns(), [&] {
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult =
- getCollection(autoColl,
- nss,
- str::stream() << "Unable to delete documents in "
- << nss.toStringForErrorMsg() << " using filter " << filter);
- if (!collectionResult.isOK()) {
- return collectionResult.getStatus();
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ if (!collection.exists()) {
+ return Status{ErrorCodes::NamespaceNotFound,
+ str::stream() << "Collection [" << nss.toString() << "] not found. "
+ << "Unable to delete documents in "
+ << nss.toStringForErrorMsg() << " using filter " << filter};
}
- const auto& collection = *collectionResult.getValue();
// ParsedDelete needs to be inside the write conflict retry loop because it may create a
// CanonicalQuery whose ownership will be transferred to the plan executor in
// getExecutorDelete().
- ParsedDelete parsedDelete(opCtx, &request, collection);
+ ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr());
auto parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
return parsedDeleteStatus;
}
auto planExecutorResult = mongo::getExecutorDelete(
- nullptr, &collection, &parsedDelete, boost::none /* verbosity */);
+ nullptr, collection, &parsedDelete, boost::none /* verbosity */);
if (!planExecutorResult.isOK()) {
return planExecutorResult.getStatus();
}
diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
index 634a769a43c..22f096f396c 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/ops/update_request.h"
#include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h"
#include "mongo/db/repl/tenant_migration_state_machine_gen.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/util/str.h"
@@ -116,9 +117,15 @@ Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc
StatusWith<bool> deleteStateDocIfMarkedAsGarbageCollectable(OperationContext* opCtx,
StringData tenantId) {
const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace;
- AutoGetCollection collection(opCtx, nss, MODE_IX);
-
- if (!collection) {
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(NamespaceString(nss),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+
+ if (!collection.exists()) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << nss.toStringForErrorMsg() << " does not exist");
}
@@ -128,8 +135,7 @@ StatusWith<bool> deleteStateDocIfMarkedAsGarbageCollectable(OperationContext* op
<< BSON("$exists" << 1));
return writeConflictRetry(
opCtx, "deleteTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> bool {
- auto nDeleted =
- deleteObjects(opCtx, collection.getCollection(), nss, query, true /* justOne */);
+ auto nDeleted = deleteObjects(opCtx, collection, query, true /* justOne */);
return nDeleted > 0;
});
}
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 2c2b05c84d7..01ad3f0aa27 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/repl/timestamp_block.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/transaction/transaction_history_iterator.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/logv2/log.h"
@@ -123,10 +124,21 @@ Status _applyOperationsForTransaction(OperationContext* opCtx,
// Presently, it is not allowed to run a prepared transaction with a command
// inside. TODO(SERVER-46105)
invariant(!op.isCommand());
- AutoGetCollection coll(opCtx, op.getNss(), MODE_IX);
+ const auto coll = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(op.getNss(),
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ const auto db = [opCtx, &coll]() {
+ AutoGetDb autoDb(opCtx, coll.nss().dbName(), MODE_IX);
+ return autoDb.getDb();
+ }();
const bool isDataConsistent = true;
auto status = repl::applyOperation_inlock(opCtx,
- coll.getDb(),
+ db,
+ coll,
ApplierOperation{&op},
false /*alwaysUpsert*/,
oplogApplicationMode,
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 6864363bc64..47558dbb88a 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -70,6 +70,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/session/logical_session_id_helpers.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/db/vector_clock.h"
@@ -1683,11 +1684,17 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
BSONObjIterator i(xfer["deleted"].Obj());
while (i.more()) {
totalDocs++;
- AutoGetCollection autoColl(opCtx, _nss, MODE_IX);
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(_nss,
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Collection " << _nss.toStringForErrorMsg()
<< " was dropped in the middle of the migration",
- autoColl.getCollection());
+ collection.exists());
BSONObj id = i.next().Obj();
@@ -1708,8 +1715,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
writeConflictRetry(opCtx, "transferModsDeletes", _nss.ns(), [&] {
deleteObjects(opCtx,
- autoColl.getCollection(),
- _nss,
+ collection,
id,
true /* justOne */,
false /* god */,
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 65513cf225f..212664c4ebb 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -33,7 +33,6 @@
#include <boost/optional.hpp>
#include <utility>
-#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/dbhelpers.h"
@@ -54,6 +53,7 @@
#include "mongo/db/s/sharding_runtime_d_params_gen.h"
#include "mongo/db/s/sharding_statistics.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/write_concern.h"
#include "mongo/executor/task_executor.h"
@@ -81,18 +81,18 @@ MONGO_FAIL_POINT_DEFINE(throwInternalErrorInDeleteRange);
* the range failed.
*/
StatusWith<int> deleteNextBatch(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
BSONObj const& keyPattern,
ChunkRange const& range,
int numDocsToRemovePerBatch) {
- invariant(collection);
+ invariant(collection.exists());
- auto const nss = collection->ns();
+ auto const nss = collection.nss();
// The IndexChunk has a keyPattern that may apply to more than one index - we need to
// select the index and get the full index keyPattern here.
- const auto shardKeyIdx =
- findShardKeyPrefixedIndex(opCtx, collection, keyPattern, /*requireSingleKey=*/false);
+ const auto shardKeyIdx = findShardKeyPrefixedIndex(
+ opCtx, collection.getCollectionPtr(), keyPattern, /*requireSingleKey=*/false);
if (!shardKeyIdx) {
LOGV2_ERROR(
23765, "Unable to find shard key index", "keyPattern"_attr = keyPattern, logAttrs(nss));
@@ -137,7 +137,7 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
auto exec =
InternalPlanner::deleteWithShardKeyIndexScan(opCtx,
- &collection,
+ collection,
std::move(deleteStageParams),
*shardKeyIdx,
min,
@@ -346,25 +346,28 @@ Status deleteRangeInBatches(OperationContext* opCtx,
int numDeleted;
const auto nss = [&]() {
try {
- AutoGetCollection collection(
- opCtx, NamespaceStringOrUUID{dbName.toString(), collectionUuid}, MODE_IX);
+ const auto nssOrUuid = NamespaceStringOrUUID{dbName.toString(), collectionUuid};
+ const auto collection =
+ acquireCollection(opCtx,
+ {nssOrUuid,
+ AcquisitionPrerequisites::kPretendUnsharded,
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite},
+ MODE_IX);
LOGV2_DEBUG(6777800,
1,
"Starting batch deletion",
- logAttrs(collection.getNss()),
+ logAttrs(collection.nss()),
"collectionUUID"_attr = collectionUuid,
"range"_attr = redact(range.toString()),
"numDocsToRemovePerBatch"_attr = numDocsToRemovePerBatch,
"delayBetweenBatches"_attr = delayBetweenBatches);
- numDeleted = uassertStatusOK(deleteNextBatch(opCtx,
- collection.getCollection(),
- keyPattern,
- range,
- numDocsToRemovePerBatch));
+ numDeleted = uassertStatusOK(deleteNextBatch(
+ opCtx, collection, keyPattern, range, numDocsToRemovePerBatch));
- return collection.getNss();
+ return collection.nss();
} catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
// Throw specific error code that stops range deletions in case of errors
uasserted(
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index ed23079f5fd..09aa5aa0845 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/s/sharding_index_catalog_ddl_util.h"
#include "mongo/db/s/sharding_recovery_service.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/write_block_bypass.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/logv2/log.h"
@@ -1022,9 +1023,15 @@ void ReshardingDonorService::DonorStateMachine::_removeDonorDocument(
const auto& nss = NamespaceString::kDonorReshardingOperationsNamespace;
writeConflictRetry(opCtx.get(), "DonorStateMachine::_removeDonorDocument", nss.toString(), [&] {
- AutoGetCollection coll(opCtx.get(), nss, MODE_X);
-
- if (!coll) {
+ const auto coll = acquireCollection(
+ opCtx.get(),
+ CollectionAcquisitionRequest(NamespaceString(nss),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx.get()),
+ AcquisitionPrerequisites::kWrite),
+ MODE_X);
+
+ if (!coll.exists()) {
return;
}
@@ -1037,8 +1044,7 @@ void ReshardingDonorService::DonorStateMachine::_removeDonorDocument(
});
deleteObjects(opCtx.get(),
- *coll,
- nss,
+ coll,
BSON(ReshardingDonorDocument::kReshardingUUIDFieldName
<< _metadata.getReshardingUUID()),
true /* justOne */);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 3cbdc60451a..c25c379139a 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/db/session/logical_session_cache.h"
#include "mongo/db/session/session_catalog_mongod.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/logv2/log.h"
@@ -149,46 +150,62 @@ Status ReshardingOplogApplicationRules::applyOperation(
try {
WriteUnitOfWork wuow(opCtx);
- AutoGetCollection autoCollOutput(
- opCtx,
- _outputNss,
- MODE_IX,
- AutoGetCollection::Options{}.deadline(getDeadline(opCtx)));
+ const auto outputDb = AutoGetDb(opCtx, _outputNss.dbName(), MODE_IX);
+
+ const auto outputColl =
+ opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
+ return acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, _outputNss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ });
+
uassert(
ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply op during resharding due to missing collection "
<< _outputNss.toStringForErrorMsg(),
- autoCollOutput);
+ outputColl.exists());
+
+ const auto stashColl =
+ opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
+ return acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, _myStashNss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ });
- AutoGetCollection autoCollStash(
- opCtx,
- _myStashNss,
- MODE_IX,
- AutoGetCollection::Options{}.deadline(getDeadline(opCtx)));
uassert(
ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply op during resharding due to missing collection "
<< _myStashNss.toStringForErrorMsg(),
- autoCollStash);
+ stashColl.exists());
auto opType = op.getOpType();
switch (opType) {
case repl::OpTypeEnum::kInsert:
- _applyInsert_inlock(
- opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, op);
+ _applyInsert_inlock(opCtx,
+ outputDb.getDb(),
+ outputColl.getCollectionPtr(),
+ stashColl.getCollectionPtr(),
+ op);
_applierMetrics->onInsertApplied();
break;
case repl::OpTypeEnum::kUpdate:
- _applyUpdate_inlock(
- opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, op);
+ _applyUpdate_inlock(opCtx,
+ outputDb.getDb(),
+ outputColl.getCollectionPtr(),
+ stashColl.getCollectionPtr(),
+ op);
_applierMetrics->onUpdateApplied();
break;
- case repl::OpTypeEnum::kDelete:
- _applyDelete_inlock(
- opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, sii, op);
+ case repl::OpTypeEnum::kDelete: {
+ _applyDelete_inlock(opCtx, outputColl, stashColl, sii, op);
_applierMetrics->onDeleteApplied();
break;
+ }
default:
MONGO_UNREACHABLE;
}
@@ -406,9 +423,8 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
void ReshardingOplogApplicationRules::_applyDelete_inlock(
OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ const ScopedCollectionAcquisition& outputColl,
+ const ScopedCollectionAcquisition& stashColl,
const boost::optional<ShardingIndexesCatalogCache>& sii,
const repl::OplogEntry& op) const {
/**
@@ -442,9 +458,9 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and delete the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery);
if (!stashCollDoc.isEmpty()) {
- auto nDeleted = deleteObjects(opCtx, stashColl, _myStashNss, idQuery, true /* justOne */);
+ auto nDeleted = deleteObjects(opCtx, stashColl, idQuery, true /* justOne */);
invariant(nDeleted != 0);
_applierMetrics->onWriteToStashCollections();
@@ -457,17 +473,24 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
// single replica set transaction that is executed if we apply rule #4, so we therefore must run
// 'findByIdAndNoopUpdate' as a part of the single replica set transaction.
runWithTransaction(opCtx, _outputNss, sii, [this, idQuery](OperationContext* opCtx) {
- AutoGetCollection autoCollOutput(
- opCtx, _outputNss, MODE_IX, AutoGetCollection::Options{}.deadline(getDeadline(opCtx)));
+ const auto outputColl =
+ opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
+ return acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, _outputNss, AcquisitionPrerequisites::OperationType::kWrite),
+ MODE_IX);
+ });
+
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply op during resharding due to missing collection "
<< _outputNss.toStringForErrorMsg(),
- autoCollOutput);
+ outputColl.exists());
// Query the output collection for a doc with _id == [op _id].
BSONObj outputCollDoc;
- auto foundDoc =
- Helpers::findByIdAndNoopUpdate(opCtx, *autoCollOutput, idQuery, outputCollDoc);
+ auto foundDoc = Helpers::findByIdAndNoopUpdate(
+ opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc);
if (!foundDoc ||
!_sourceChunkMgr.keyBelongsToShard(
@@ -489,8 +512,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
// 3. Insert the doc just deleted into the output collection
// Delete from the output collection
- auto nDeleted =
- deleteObjects(opCtx, *autoCollOutput, _outputNss, idQuery, true /* justOne */);
+ auto nDeleted = deleteObjects(opCtx, outputColl, idQuery, true /* justOne */);
invariant(nDeleted != 0);
// Attempt to delete a doc from one of the stash collections. Once we've matched a doc in
@@ -503,13 +525,20 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
continue;
}
- AutoGetCollection autoCollStash(
- opCtx, coll, MODE_IX, AutoGetCollection::Options{}.deadline(getDeadline(opCtx)));
+ const auto stashColl =
+ opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] {
+ return acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx, coll, AcquisitionPrerequisites::OperationType::kWrite),
+ MODE_IX);
+ });
+
uassert(
ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply op during resharding due to missing collection "
<< coll.toStringForErrorMsg(),
- autoCollStash);
+ stashColl.exists());
auto request = DeleteRequest{};
request.setNsString(coll);
@@ -517,11 +546,11 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
request.setMulti(false);
request.setReturnDeleted(true);
- ParsedDelete parsedDelete(opCtx, &request, autoCollStash.getCollection());
+ ParsedDelete parsedDelete(opCtx, &request, stashColl.getCollectionPtr());
uassertStatusOK(parsedDelete.parseRequest());
auto exec = uassertStatusOK(getExecutorDelete(&CurOp::get(opCtx)->debug(),
- &(*autoCollStash),
+ stashColl,
&parsedDelete,
boost::none /* verbosity */));
BSONObj res;
@@ -543,7 +572,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(
// collection.
if (!doc.isEmpty()) {
uassertStatusOK(collection_internal::insertDocument(opCtx,
- *autoCollOutput,
+ outputColl.getCollectionPtr(),
InsertStatement(doc),
nullptr /* OpDebug */,
false /* fromMigrate */));
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h
index 6bd35d92f6d..4a1d16f66ca 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.h
@@ -42,6 +42,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h"
+#include "mongo/db/shard_role.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/sharding_index_catalog_cache.h"
@@ -92,9 +93,8 @@ private:
// Applies a delete operation
void _applyDelete_inlock(OperationContext* opCtx,
- Database* db,
- const CollectionPtr& outputColl,
- const CollectionPtr& stashColl,
+ const ScopedCollectionAcquisition& outputColl,
+ const ScopedCollectionAcquisition& stashColl,
const boost::optional<ShardingIndexesCatalogCache>& gii,
const repl::OplogEntry& op) const;
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 1a75eb2831f..b67e2b28403 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -56,6 +56,7 @@
#include "mongo/db/s/sharding_index_catalog_ddl_util.h"
#include "mongo/db/s/sharding_recovery_service.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/write_block_bypass.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/thread_pool_task_executor.h"
@@ -1121,9 +1122,16 @@ void ReshardingRecipientService::RecipientStateMachine::_removeRecipientDocument
const auto& nss = NamespaceString::kRecipientReshardingOperationsNamespace;
writeConflictRetry(
opCtx.get(), "RecipientStateMachine::_removeRecipientDocument", nss.toString(), [&] {
- AutoGetCollection coll(opCtx.get(), nss, MODE_IX);
-
- if (!coll) {
+ const auto coll =
+ acquireCollection(opCtx.get(),
+ CollectionAcquisitionRequest(
+ NamespaceString(nss),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx.get()),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+
+ if (!coll.exists()) {
return;
}
@@ -1135,8 +1143,7 @@ void ReshardingRecipientService::RecipientStateMachine::_removeRecipientDocument
});
deleteObjects(opCtx.get(),
- *coll,
- nss,
+ coll,
BSON(ReshardingRecipientDocument::kReshardingUUIDFieldName
<< _metadata.getReshardingUUID()),
true /* justOne */);
diff --git a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
index 3c137b52488..3f088911d2b 100644
--- a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/s/shard_authoritative_catalog_gen.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_index_catalog.h"
@@ -49,13 +50,20 @@ namespace {
* Remove all indexes by uuid.
*/
void deleteShardingIndexCatalogEntries(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
const UUID& uuid) {
- mongo::deleteObjects(opCtx,
- collection,
- NamespaceString::kShardIndexCatalogNamespace,
- BSON(IndexCatalogType::kCollectionUUIDFieldName << uuid),
- false);
+ mongo::deleteObjects(
+ opCtx, collection, BSON(IndexCatalogType::kCollectionUUIDFieldName << uuid), false);
+}
+
+
+const ScopedCollectionAcquisition& getAcquisitionForNss(
+ const std::vector<ScopedCollectionAcquisition>& acquisitions, const NamespaceString& nss) {
+ auto it = std::find_if(acquisitions.begin(), acquisitions.end(), [&nss](auto& acquisition) {
+ return acquisition.nss() == nss;
+ });
+ invariant(it != acquisitions.end());
+ return *it;
}
} // namespace
@@ -72,18 +80,32 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
AutoGetCollection fromToColl(
opCtx, fromNss, MODE_IX, AutoGetCollection::Options{}.secondaryNssOrUUIDs({toNss}));
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ const auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ const auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
+
{
// First get the document to check the index version if the document already exists
const auto queryTo =
BSON(ShardAuthoritativeCollectionType::kNssFieldName << toNss.ns());
BSONObj collectionToDoc;
bool docExists =
- Helpers::findOne(opCtx, collsColl.getCollection(), queryTo, collectionToDoc);
+ Helpers::findOne(opCtx, collsColl.getCollectionPtr(), queryTo, collectionToDoc);
if (docExists) {
auto collectionTo = ShardAuthoritativeCollectionType::parse(
IDLParserContext("RenameCollectionShardingIndexCatalogCtx"),
@@ -108,42 +130,32 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx,
// Save uuid to remove the 'to' indexes later on.
if (docExists) {
// Remove the 'to' entry.
- mongo::deleteObjects(opCtx,
- collsColl.getCollection(),
- NamespaceString::kShardCollectionCatalogNamespace,
- queryTo,
- true);
+ mongo::deleteObjects(opCtx, collsColl, queryTo, true);
}
// Replace the _id in the 'From' entry.
BSONObj collectionFromDoc;
auto queryFrom = BSON(CollectionType::kNssFieldName << fromNss.ns());
fassert(7082801,
Helpers::findOne(
- opCtx, collsColl.getCollection(), queryFrom, collectionFromDoc));
+ opCtx, collsColl.getCollectionPtr(), queryFrom, collectionFromDoc));
auto collectionFrom = ShardAuthoritativeCollectionType::parse(
IDLParserContext("RenameCollectionShardingIndexCatalogCtx"), collectionFromDoc);
collectionFrom.setNss(toNss);
- mongo::deleteObjects(opCtx,
- collsColl.getCollection(),
- NamespaceString::kShardCollectionCatalogNamespace,
- queryFrom,
- true);
+ mongo::deleteObjects(opCtx, collsColl, queryFrom, true);
uassertStatusOK(
collection_internal::insertDocument(opCtx,
- collsColl.getCollection(),
+ collsColl.getCollectionPtr(),
InsertStatement(collectionFrom.toBSON()),
nullptr));
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
if (toUuid) {
// Remove the 'to' indexes.
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
mongo::deleteObjects(
opCtx,
- idxColl.getCollection(),
- NamespaceString::kShardIndexCatalogNamespace,
+ idxColl,
BSON(IndexCatalogType::kCollectionUUIDFieldName << toUuid.value()),
false);
}
@@ -151,7 +163,7 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
opCtx,
fromNss,
- idxColl->uuid(),
+ idxColl.uuid(),
ShardingIndexCatalogRenameEntry(fromNss, toNss, indexVersion).toBSON());
wunit.commit();
});
@@ -292,13 +304,19 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx,
mongo::update(opCtx, collsColl.getDb(), request);
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
+ const auto idxColl =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
mongo::deleteObjects(opCtx,
- idxColl.getCollection(),
- NamespaceString::kShardIndexCatalogNamespace,
+ idxColl,
BSON(IndexCatalogType::kCollectionUUIDFieldName
<< uuid << IndexCatalogType::kNameFieldName << indexName),
true);
@@ -307,7 +325,7 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
opCtx,
nss,
- idxColl->uuid(),
+ idxColl.uuid(),
ShardingIndexCatalogRemoveEntry(indexName.toString(), uuid, lastmod).toBSON());
wunit.commit();
});
@@ -368,11 +386,18 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx,
mongo::update(opCtx, collsColl.getDb(), request);
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
+ const auto idxColl =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
{
// Clear old indexes.
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), uuid);
+ deleteShardingIndexCatalogEntries(opCtx, idxColl, uuid);
// Add new indexes.
for (const auto& i : indexes) {
@@ -384,7 +409,7 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx,
builder.append("_id", idStr);
uassertStatusOK(
collection_internal::insertDocument(opCtx,
- idxColl.getCollection(),
+ idxColl.getCollectionPtr(),
InsertStatement{builder.done()},
nullptr,
false));
@@ -394,7 +419,7 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
opCtx,
nss,
- idxColl->uuid(),
+ idxColl.uuid(),
ShardingIndexCatalogReplaceEntry(uuid, indexVersion, indexes).toBSON());
wunit.commit();
});
@@ -410,17 +435,31 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace
WriteUnitOfWork wunit(opCtx);
Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX);
Lock::CollectionLock collLock(opCtx, nss, MODE_IX);
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ const auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ const auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
+
{
const auto query =
BSON(ShardAuthoritativeCollectionType::kNssFieldName << nss.ns());
BSONObj collectionDoc;
// Get the collection UUID, if nothing is found, return early.
- if (!Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc)) {
+ if (!Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc)) {
LOGV2_DEBUG(6712305,
1,
"dropCollectionGlobalIndexesMetadata did not found collection, "
@@ -432,24 +471,21 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace
IDLParserContext("dropCollectionShardingIndexCatalog"), collectionDoc);
collectionUUID.emplace(collection.getUuid());
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- mongo::deleteObjects(opCtx,
- collsColl.getCollection(),
- NamespaceString::kShardCollectionCatalogNamespace,
- query,
- true);
+ mongo::deleteObjects(opCtx, collsColl, query, true);
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
+ // AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace,
+ // MODE_IX);
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), *collectionUUID);
+ deleteShardingIndexCatalogEntries(opCtx, idxColl, *collectionUUID);
}
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
opCtx,
nss,
- idxColl->uuid(),
+ idxColl.uuid(),
ShardingIndexCatalogDropEntry(*collectionUUID).toBSON());
wunit.commit();
});
@@ -465,11 +501,24 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx,
[&]() {
WriteUnitOfWork wunit(opCtx);
AutoGetCollection userColl(opCtx, nss, MODE_IX);
- AutoGetCollection collsColl(opCtx,
- NamespaceString::kShardCollectionCatalogNamespace,
- MODE_IX,
- AutoGetCollection::Options{}.secondaryNssOrUUIDs(
- {NamespaceString::kShardIndexCatalogNamespace}));
+ const auto acquisitions = acquireCollections(
+ opCtx,
+ {CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardCollectionCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ CollectionAcquisitionRequest(
+ NamespaceString(NamespaceString::kShardIndexCatalogNamespace),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite)},
+ MODE_IX);
+
+ const auto& collsColl = getAcquisitionForNss(
+ acquisitions, NamespaceString::kShardCollectionCatalogNamespace);
+ const auto& idxColl =
+ getAcquisitionForNss(acquisitions, NamespaceString::kShardIndexCatalogNamespace);
{
// First unset the index version.
const auto query =
@@ -477,7 +526,7 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx,
<< nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid);
BSONObj collectionDoc;
bool docExists =
- Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc);
+ Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc);
// Return if there is nothing to clear.
if (!docExists) {
@@ -492,28 +541,22 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx,
}
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- mongo::deleteObjects(opCtx,
- collsColl.getCollection(),
- NamespaceString::kShardCollectionCatalogNamespace,
- query,
- true);
+ mongo::deleteObjects(opCtx, collsColl, query, true);
collection.setIndexVersion(boost::none);
uassertStatusOK(
collection_internal::insertDocument(opCtx,
- collsColl.getCollection(),
+ collsColl.getCollectionPtr(),
InsertStatement(collection.toBSON()),
nullptr));
}
- AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX);
-
{
repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), uuid);
+ deleteShardingIndexCatalogEntries(opCtx, idxColl, uuid);
}
opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog(
- opCtx, nss, idxColl->uuid(), ShardingIndexCatalogClearEntry(uuid).toBSON());
+ opCtx, nss, idxColl.uuid(), ShardingIndexCatalogClearEntry(uuid).toBSON());
wunit.commit();
});
}
diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp
index 9d2835dbd6e..6f8a42a417c 100644
--- a/src/mongo/db/serverless/shard_split_utils.cpp
+++ b/src/mongo/db/serverless/shard_split_utils.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/repl/repl_set_config.h"
+#include "mongo/db/shard_role.h"
#include "mongo/logv2/log_debug.h"
namespace mongo {
@@ -200,16 +201,21 @@ Status updateStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st
StatusWith<bool> deleteStateDoc(OperationContext* opCtx, const UUID& shardSplitId) {
const auto nss = NamespaceString::kShardSplitDonorsNamespace;
- AutoGetCollection collection(opCtx, nss, MODE_IX);
-
- if (!collection) {
+ const auto collection = acquireCollection(
+ opCtx,
+ CollectionAcquisitionRequest(NamespaceString(nss),
+ PlacementConcern{boost::none, ShardVersion::UNSHARDED()},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+
+ if (!collection.exists()) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << nss.toStringForErrorMsg() << " does not exist");
}
auto query = BSON(ShardSplitDonorDocument::kIdFieldName << shardSplitId);
return writeConflictRetry(opCtx, "ShardSplitDonorDeleteStateDoc", nss.ns(), [&]() -> bool {
- auto nDeleted =
- deleteObjects(opCtx, collection.getCollection(), nss, query, true /* justOne */);
+ auto nDeleted = deleteObjects(opCtx, collection, query, true /* justOne */);
return nDeleted > 0;
});
}
diff --git a/src/mongo/db/shard_role.cpp b/src/mongo/db/shard_role.cpp
index 52618b98f0f..7d54396bfb8 100644
--- a/src/mongo/db/shard_role.cpp
+++ b/src/mongo/db/shard_role.cpp
@@ -360,6 +360,22 @@ CollectionAcquisitionRequest CollectionAcquisitionRequest::fromOpCtx(
operationType);
}
+CollectionAcquisitionRequest CollectionAcquisitionRequest::fromOpCtx(
+ OperationContext* opCtx,
+ NamespaceStringOrUUID nssOrUUID,
+ AcquisitionPrerequisites::OperationType operationType) {
+ auto& oss = OperationShardingState::get(opCtx);
+ auto& readConcern = repl::ReadConcernArgs::get(opCtx);
+
+ // Acquisitions by uuid cannot possibly have a corresponding ShardVersion attached.
+ PlacementConcern placementConcern = nssOrUUID.nss()
+ ? PlacementConcern{oss.getDbVersion(nssOrUUID.dbName().db()),
+ oss.getShardVersion(*nssOrUUID.nss())}
+ : PlacementConcern{oss.getDbVersion(nssOrUUID.dbName().db()), {}};
+
+ return CollectionAcquisitionRequest(nssOrUUID, placementConcern, readConcern, operationType);
+}
+
const UUID& ScopedCollectionAcquisition::uuid() const {
invariant(exists(),
str::stream() << "Collection " << nss().toStringForErrorMsg()
diff --git a/src/mongo/db/shard_role.h b/src/mongo/db/shard_role.h
index a1fc20e9e1d..e5b95864f52 100644
--- a/src/mongo/db/shard_role.h
+++ b/src/mongo/db/shard_role.h
@@ -169,6 +169,11 @@ struct CollectionAcquisitionRequest : public CollectionOrViewAcquisitionRequest
NamespaceString nss,
AcquisitionPrerequisites::OperationType operationType,
boost::optional<UUID> expectedUUID = boost::none);
+
+ static CollectionAcquisitionRequest fromOpCtx(
+ OperationContext* opCtx,
+ NamespaceStringOrUUID nssOrUUID,
+ AcquisitionPrerequisites::OperationType operationType);
};
class ScopedCollectionAcquisition {
diff --git a/src/mongo/db/transaction_resources.cpp b/src/mongo/db/transaction_resources.cpp
index aee3202cc03..4c286f76c63 100644
--- a/src/mongo/db/transaction_resources.cpp
+++ b/src/mongo/db/transaction_resources.cpp
@@ -30,6 +30,10 @@
#include "mongo/db/transaction_resources.h"
namespace mongo {
+
+const PlacementConcern AcquisitionPrerequisites::kPretendUnsharded =
+ PlacementConcern{boost::none, boost::none};
+
namespace shard_role_details {
TransactionResources::TransactionResources(repl::ReadConcernArgs readConcern)
diff --git a/src/mongo/db/transaction_resources.h b/src/mongo/db/transaction_resources.h
index a0a03c8799c..57b3f87b0f6 100644
--- a/src/mongo/db/transaction_resources.h
+++ b/src/mongo/db/transaction_resources.h
@@ -49,6 +49,11 @@ struct PlacementConcern {
};
struct AcquisitionPrerequisites {
+ // Pretends that the collection is unsharded. Acquisitions with this PlacementConcern will have
+ // always have UNSHARDED description and filter, even if they are sharded. Only for use in
+ // internal code paths that require it. Possible data loss if used incorrectly!
+ static const PlacementConcern kPretendUnsharded;
+
enum PlacementConcernPlaceholder {
/**
* Special PlacementConcern which mimics direct connection to a shard, causing the
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 63b05951917..b2a94b47ad1 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/storage/storage_parameters_gen.h"
#include "mongo/db/ttl_collection_cache.h"
@@ -541,21 +542,27 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
? uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, *nss)).sii
: boost::none;
// Attach IGNORED placement version to skip orphans (the range deleter will clear them up)
- auto scopedRole = ScopedSetShardRole(
- opCtx,
- *nss,
- ShardVersionFactory::make(ChunkVersion::IGNORED(),
- sii ? boost::make_optional(sii->getCollectionIndexes())
- : boost::none),
- boost::none);
- AutoGetCollection coll(opCtx, *nss, MODE_IX);
- // The collection with `uuid` might be renamed before the lock and the wrong namespace would
- // be locked and looked up so we double check here.
- if (!coll || coll->uuid() != uuid)
+ const auto shardVersion = ShardVersionFactory::make(
+ ChunkVersion::IGNORED(),
+ sii ? boost::make_optional(sii->getCollectionIndexes()) : boost::none);
+ auto scopedRole = ScopedSetShardRole(opCtx, *nss, shardVersion, boost::none);
+ const auto coll =
+ acquireCollection(opCtx,
+ CollectionAcquisitionRequest(*nss,
+ {boost::none, shardVersion},
+ repl::ReadConcernArgs::get(opCtx),
+ AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+
+ // The collection with `uuid` might be renamed before the lock and the wrong namespace
+ // would be locked and looked up so we double check here.
+ if (!coll.exists() || coll.uuid() != uuid)
return false;
// Allow TTL deletion on non-capped collections, and on capped clustered collections.
- invariant(!coll->isCapped() || (coll->isCapped() && coll->isClustered()));
+ const auto& collectionPtr = coll.getCollectionPtr();
+ invariant(!collectionPtr->isCapped() ||
+ (collectionPtr->isCapped() && collectionPtr->isClustered()));
if (MONGO_unlikely(hangTTLMonitorWithLock.shouldFail())) {
LOGV2(22534,
@@ -569,28 +576,25 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
}
std::shared_ptr<TenantMigrationAccessBlocker> mtab;
- if (coll.getDb() &&
- nullptr !=
+ if (nullptr !=
(mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext())
- .getTenantMigrationAccessBlockerForDbName(coll.getDb()->name(),
+ .getTenantMigrationAccessBlockerForDbName(coll.nss().dbName(),
MtabType::kRecipient)) &&
mtab->checkIfShouldBlockTTL()) {
LOGV2_DEBUG(53768,
1,
"Postpone TTL of DB because of active tenant migration",
"tenantMigrationAccessBlocker"_attr = mtab->getDebugInfo().jsonString(),
- "database"_attr = coll.getDb()->name());
+ "database"_attr = coll.nss().dbName());
return false;
}
ResourceConsumption::ScopedMetricsCollector scopedMetrics(opCtx, nss->db().toString());
- const auto& collection = coll.getCollection();
if (info.isClustered()) {
- return _deleteExpiredWithCollscan(opCtx, ttlCollectionCache, collection);
+ return _deleteExpiredWithCollscan(opCtx, ttlCollectionCache, coll);
} else {
- return _deleteExpiredWithIndex(
- opCtx, ttlCollectionCache, collection, info.getIndexName());
+ return _deleteExpiredWithIndex(opCtx, ttlCollectionCache, coll, info.getIndexName());
}
} catch (const ExceptionForCat<ErrorCategory::StaleShardVersionError>& ex) {
// The TTL index tried to delete some information from a sharded collection
@@ -652,16 +656,17 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
TTLCollectionCache* ttlCollectionCache,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
std::string indexName) {
- if (!collection->isIndexPresent(indexName)) {
- ttlCollectionCache->deregisterTTLIndexByName(collection->uuid(), indexName);
+ const auto& collectionPtr = collection.getCollectionPtr();
+ if (!collectionPtr->isIndexPresent(indexName)) {
+ ttlCollectionCache->deregisterTTLIndexByName(collection.uuid(), indexName);
return false;
}
- BSONObj spec = collection->getIndexSpec(indexName);
+ BSONObj spec = collectionPtr->getIndexSpec(indexName);
const IndexDescriptor* desc =
- getValidTTLIndex(opCtx, ttlCollectionCache, collection, spec, indexName);
+ getValidTTLIndex(opCtx, ttlCollectionCache, collectionPtr, spec, indexName);
if (!desc) {
return false;
@@ -670,13 +675,13 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
LOGV2_DEBUG(22533,
1,
"running TTL job for index",
- logAttrs(collection->ns()),
+ logAttrs(collection.nss()),
"key"_attr = desc->keyPattern(),
"name"_attr = indexName);
auto expireAfterSeconds = spec[IndexDescriptor::kExpireAfterSecondsFieldName].safeNumberLong();
const Date_t kDawnOfTime = Date_t::fromMillisSinceEpoch(std::numeric_limits<long long>::min());
- const auto expirationDate = safeExpirationDate(opCtx, collection, expireAfterSeconds);
+ const auto expirationDate = safeExpirationDate(opCtx, collectionPtr, expireAfterSeconds);
const BSONObj startKey = BSON("" << kDawnOfTime);
const BSONObj endKey = BSON("" << expirationDate);
@@ -692,7 +697,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
// not actually expired when our snapshot changes during deletion.
const char* keyFieldName = key.firstElement().fieldName();
BSONObj query = BSON(keyFieldName << BSON("$gte" << kDawnOfTime << "$lte" << expirationDate));
- auto findCommand = std::make_unique<FindCommandRequest>(collection->ns());
+ auto findCommand = std::make_unique<FindCommandRequest>(collection.nss());
findCommand->setFilter(query);
auto canonicalQuery = CanonicalQuery::canonicalize(opCtx, std::move(findCommand));
invariant(canonicalQuery.getStatus());
@@ -708,7 +713,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
Timer timer;
auto exec = InternalPlanner::deleteWithIndexScan(opCtx,
- &collection,
+ collection,
std::move(params),
desc,
startKey,
@@ -730,7 +735,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
.first) {
LOGV2(5479200,
"Deleted expired documents using index",
- logAttrs(collection->ns()),
+ logAttrs(collection.nss()),
"index"_attr = indexName,
"numDeleted"_attr = numDeleted,
"duration"_attr = duration);
@@ -750,25 +755,26 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx,
TTLCollectionCache* ttlCollectionCache,
- const CollectionPtr& collection) {
- const auto& collOptions = collection->getCollectionOptions();
+ const ScopedCollectionAcquisition& collection) {
+ const auto& collectionPtr = collection.getCollectionPtr();
+ const auto& collOptions = collectionPtr->getCollectionOptions();
uassert(5400701,
"collection is not clustered but is described as being TTL",
collOptions.clusteredIndex);
- invariant(collection->isClustered());
+ invariant(collectionPtr->isClustered());
auto expireAfterSeconds = collOptions.expireAfterSeconds;
if (!expireAfterSeconds) {
- ttlCollectionCache->deregisterTTLClusteredIndex(collection->uuid());
+ ttlCollectionCache->deregisterTTLClusteredIndex(collection.uuid());
return false;
}
- LOGV2_DEBUG(5400704, 1, "running TTL job for clustered collection", logAttrs(collection->ns()));
+ LOGV2_DEBUG(5400704, 1, "running TTL job for clustered collection", logAttrs(collection.nss()));
- const auto startId = makeCollScanStartBound(collection, Date_t::min());
+ const auto startId = makeCollScanStartBound(collectionPtr, Date_t::min());
- const auto expirationDate = safeExpirationDate(opCtx, collection, *expireAfterSeconds);
- const auto endId = makeCollScanEndBound(collection, expirationDate);
+ const auto expirationDate = safeExpirationDate(opCtx, collectionPtr, *expireAfterSeconds);
+ const auto endId = makeCollScanEndBound(collectionPtr, expirationDate);
auto params = std::make_unique<DeleteStageParams>();
params->isMulti = true;
@@ -783,7 +789,7 @@ bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx,
Timer timer;
auto exec = InternalPlanner::deleteWithCollectionScan(
opCtx,
- &collection,
+ collection,
std::move(params),
PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
InternalPlanner::Direction::FORWARD,
@@ -804,7 +810,7 @@ bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx,
.first) {
LOGV2(5400702,
"Deleted expired documents using collection scan",
- logAttrs(collection->ns()),
+ logAttrs(collection.nss()),
"numDeleted"_attr = numDeleted,
"duration"_attr = duration);
}
diff --git a/src/mongo/db/ttl.h b/src/mongo/db/ttl.h
index 6aa0f137ea3..1d41e4fa6ef 100644
--- a/src/mongo/db/ttl.h
+++ b/src/mongo/db/ttl.h
@@ -29,6 +29,7 @@
#pragma once
+#include "mongo/db/shard_role.h"
#include "mongo/db/ttl_collection_cache.h"
#include "mongo/util/background.h"
@@ -137,7 +138,7 @@ private:
*/
bool _deleteExpiredWithIndex(OperationContext* opCtx,
TTLCollectionCache* ttlCollectionCache,
- const CollectionPtr& collection,
+ const ScopedCollectionAcquisition& collection,
std::string indexName);
/*
@@ -153,7 +154,7 @@ private:
*/
bool _deleteExpiredWithCollscan(OperationContext* opCtx,
TTLCollectionCache* ttlCollectionCache,
- const CollectionPtr& collection);
+ const ScopedCollectionAcquisition& collection);
// Protects the state below.
mutable Mutex _stateMutex = MONGO_MAKE_LATCH("TTLMonitorStateMutex");
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 6db45c25324..03ab444acc4 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
@@ -193,18 +194,21 @@ private:
WriteUnitOfWork wuow1(opCtx1);
WriteUnitOfWork wuow2(opCtx2);
- auto collection2 =
- CollectionCatalog::get(opCtx2)->lookupCollectionByNamespace(opCtx2, nss);
- ASSERT(collection2);
+ const auto collection2 =
+ acquireCollection(opCtx2,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx2, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(collection2.exists());
auto lastApplied = repl::ReplicationCoordinator::get(opCtx2->getServiceContext())
->getMyLastAppliedOpTime()
.getTimestamp();
ASSERT_OK(opCtx2->recoveryUnit()->setTimestamp(lastApplied + 1));
BSONObj res;
- ASSERT_TRUE(
- Helpers::findByIdAndNoopUpdate(opCtx2, CollectionPtr(collection2), idQuery, res));
+ ASSERT_TRUE(Helpers::findByIdAndNoopUpdate(
+ opCtx2, collection2.getCollectionPtr(), idQuery, res));
- ASSERT_THROWS(Helpers::emptyCollection(opCtx1, nss), WriteConflictException);
+ ASSERT_THROWS(Helpers::emptyCollection(opCtx1, collection2), WriteConflictException);
wuow2.commit();
}
@@ -233,11 +237,18 @@ private:
const BSONObj& idQuery) {
{
WriteUnitOfWork wuow1(opCtx1);
- auto lastApplied = repl::ReplicationCoordinator::get(opCtx1->getServiceContext())
- ->getMyLastAppliedOpTime()
- .getTimestamp();
- ASSERT_OK(opCtx1->recoveryUnit()->setTimestamp(lastApplied + 1));
- Helpers::emptyCollection(opCtx1, nss);
+ {
+ const auto coll =
+ acquireCollection(opCtx1,
+ CollectionAcquisitionRequest::fromOpCtx(
+ opCtx1, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ auto lastApplied = repl::ReplicationCoordinator::get(opCtx1->getServiceContext())
+ ->getMyLastAppliedOpTime()
+ .getTimestamp();
+ ASSERT_OK(opCtx1->recoveryUnit()->setTimestamp(lastApplied + 1));
+ Helpers::emptyCollection(opCtx1, coll);
+ }
{
WriteUnitOfWork wuow2(opCtx2);
diff --git a/src/mongo/dbtests/query_stage_batched_delete.cpp b/src/mongo/dbtests/query_stage_batched_delete.cpp
index ada2e2af46c..bfa75a595da 100644
--- a/src/mongo/dbtests/query_stage_batched_delete.cpp
+++ b/src/mongo/dbtests/query_stage_batched_delete.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/op_observer/op_observer_noop.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/checkpointer.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/tick_source_mock.h"
@@ -194,14 +195,16 @@ public:
// Uses the default _expCtx tied to the test suite.
std::unique_ptr<BatchedDeleteStage> makeBatchedDeleteStage(
- WorkingSet* ws, const CollectionPtr& coll, CanonicalQuery* deleteParamsFilter = nullptr) {
+ WorkingSet* ws,
+ const ScopedCollectionAcquisition& coll,
+ CanonicalQuery* deleteParamsFilter = nullptr) {
return makeBatchedDeleteStage(ws, coll, _expCtx.get(), deleteParamsFilter);
}
// Defaults batch params to be test defaults for targetBatchTimeMS and targetBatchDocs.
std::unique_ptr<BatchedDeleteStage> makeBatchedDeleteStage(
WorkingSet* ws,
- const CollectionPtr& coll,
+ const ScopedCollectionAcquisition& coll,
ExpressionContext* expCtx,
CanonicalQuery* deleteParamsFilter = nullptr) {
@@ -214,7 +217,7 @@ public:
std::unique_ptr<BatchedDeleteStage> makeBatchedDeleteStage(
WorkingSet* ws,
- const CollectionPtr& coll,
+ const ScopedCollectionAcquisition& coll,
ExpressionContext* expCtx,
std::unique_ptr<BatchedDeleteStageParams> batchedDeleteParams,
CanonicalQuery* deleteParamsFilter = nullptr) {
@@ -231,7 +234,7 @@ public:
std::move(batchedDeleteParams),
ws,
coll,
- new CollectionScan(expCtx, coll, collScanParams, ws, nullptr));
+ new CollectionScan(expCtx, coll.getCollectionPtr(), collScanParams, ws, nullptr));
}
protected:
@@ -252,12 +255,14 @@ TickSourceMock<Milliseconds>* QueryStageBatchedDeleteTest::_tickSource = nullptr
// Confirms batched deletes wait until a batch meets the targetBatchDocs before deleting documents.
TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) {
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
auto nDocs = 52;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
auto deleteStage = makeBatchedDeleteStage(&ws, coll);
@@ -288,16 +293,18 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) {
// state, BatchedDeleteStage's snapshot is incremented and it can see the document has been removed
// and skips over it.
TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeleted) {
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
auto nDocs = 11;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
// Get the RecordIds that would be returned by an in-order scan.
std::vector<RecordId> recordIds;
- getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
+ getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds);
WorkingSet ws;
auto deleteStage = makeBatchedDeleteStage(&ws, coll);
@@ -318,11 +325,12 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeleted) {
{
// Delete a document that has already been added to the delete batch.
deleteStage->saveState();
- BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[pauseBatchingIdx - 2]).value();
+ BSONObj targetDoc =
+ coll.getCollectionPtr()->docFor(&_opCtx, recordIds[pauseBatchingIdx - 2]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
// Increases the snapshotId.
- deleteStage->restoreState(&coll);
+ deleteStage->restoreState(&coll.getCollectionPtr());
}
while ((state = deleteStage->work(&id)) != PlanStage::IS_EOF) {
@@ -355,14 +363,17 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict
auto nDocs = 11;
prePopulateCollection(nDocs);
- CollectionPtr coll(CollectionCatalog::get(batchedDeleteOpCtx.get())
- ->lookupCollectionByNamespace(batchedDeleteOpCtx.get(), nss));
- ASSERT(coll);
+ const auto coll =
+ acquireCollection(batchedDeleteOpCtx.get(),
+ CollectionAcquisitionRequest::fromOpCtx(
+ batchedDeleteOpCtx.get(), nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
// Get the RecordIds that would be returned by an in-order scan.
std::vector<RecordId> recordIds;
- getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
+ getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds);
WorkingSet ws;
@@ -382,8 +393,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict
}
// Find the document to delete with the same OpertionContext that holds the locks.
- BSONObj targetDoc =
- coll->docFor(batchedDeleteOpCtx.get(), recordIds[pauseBatchingIdx - 2]).value();
+ BSONObj targetDoc = coll.getCollectionPtr()
+ ->docFor(batchedDeleteOpCtx.get(), recordIds[pauseBatchingIdx - 2])
+ .value();
ASSERT(!targetDoc.isEmpty());
{
@@ -417,12 +429,14 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict
// One of the staged documents is updated and then the BatchedDeleteStage increments its snapshot
// before discovering the mismatch.
TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatch) {
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
auto nDocs = 11;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
// Only delete documents whose 'a' field is greater than or equal to 0.
const BSONObj query = BSON("a" << BSON("$gte" << 0));
@@ -451,7 +465,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatch) {
BSONObj updateObj = BSON("a" << -1);
update(queryObj, updateObj);
// Increases the snapshotId.
- deleteStage->restoreState(&coll);
+ deleteStage->restoreState(&coll.getCollectionPtr());
}
while ((state = deleteStage->work(&id)) != PlanStage::IS_EOF) {
@@ -484,10 +498,13 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatchCli
auto nDocs = 11;
prePopulateCollection(nDocs);
- CollectionPtr coll(CollectionCatalog::get(batchedDeleteOpCtx.get())
- ->lookupCollectionByNamespace(batchedDeleteOpCtx.get(), nss));
- ASSERT(coll);
+ const auto coll =
+ acquireCollection(batchedDeleteOpCtx.get(),
+ CollectionAcquisitionRequest::fromOpCtx(
+ batchedDeleteOpCtx.get(), nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
// Only delete documents whose 'a' field is greater than or equal to 0.
const BSONObj query = BSON("a" << BSON("$gte" << 0));
@@ -560,8 +577,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) {
int batchSize1 = timedBatch1.size();
int nDocs = batchSize0 + batchSize1;
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
auto deleteStage = makeBatchedDeleteStage(&ws, coll);
@@ -639,8 +659,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatc
int batchSize2 = timedBatch2.size();
int nDocs = batchSize0 + batchSize1 + batchSize2;
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
auto deleteStage = makeBatchedDeleteStage(&ws, coll);
@@ -707,8 +730,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsBasic) {
auto nDocs = 52;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
@@ -776,8 +802,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsWithUnlimitedBatc
auto nDocs = 52;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
@@ -829,8 +858,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) {
auto nDocs = 52;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
@@ -877,8 +909,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSWithUnlimitedBa
auto nDocs = 52;
prePopulateCollection(nDocs);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
@@ -976,8 +1011,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSReachedBeforeTa
batchedDeleteParams->targetPassTimeMS = targetPassTimeMS;
batchedDeleteParams->targetPassDocs = targetPassDocs;
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+ ASSERT(coll.exists());
WorkingSet ws;
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index f0b76d0c319..7015bad0ee8 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/shard_role.h"
#include "mongo/dbtests/dbtests.h"
namespace mongo {
@@ -122,14 +123,16 @@ private:
class QueryStageDeleteUpcomingObjectWasDeleted : public QueryStageDeleteBase {
public:
void run() {
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ ASSERT(coll.exists());
// Get the RecordIds that would be returned by an in-order scan.
std::vector<RecordId> recordIds;
- getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
+ getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds);
// Configure the scan.
CollectionScanParams collScanParams;
@@ -146,7 +149,8 @@ public:
std::move(deleteStageParams),
&ws,
coll,
- new CollectionScan(_expCtx.get(), coll, collScanParams, &ws, nullptr));
+ new CollectionScan(
+ _expCtx.get(), coll.getCollectionPtr(), collScanParams, &ws, nullptr));
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
@@ -160,10 +164,11 @@ public:
// Remove recordIds[targetDocIndex];
static_cast<PlanStage*>(&deleteStage)->saveState();
- BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value();
+ BSONObj targetDoc =
+ coll.getCollectionPtr()->docFor(&_opCtx, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
- static_cast<PlanStage*>(&deleteStage)->restoreState(&coll);
+ static_cast<PlanStage*>(&deleteStage)->restoreState(&coll.getCollectionPtr());
// Remove the rest.
while (!deleteStage.isEOF()) {
@@ -184,9 +189,12 @@ class QueryStageDeleteReturnOldDoc : public QueryStageDeleteBase {
public:
void run() {
// Various variables we'll need.
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const CollectionPtr& coll = ctx.getCollection();
- ASSERT(coll);
+ const auto coll = acquireCollection(
+ &_opCtx,
+ CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite),
+ MODE_IX);
+
+ ASSERT(coll.exists());
const int targetDocIndex = 0;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = std::make_unique<WorkingSet>();
@@ -194,7 +202,7 @@ public:
// Get the RecordIds that would be returned by an in-order scan.
std::vector<RecordId> recordIds;
- getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
+ getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds);
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 60d7630a358..11680dc1997 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/shard_role.h"
#include "mongo/db/storage/storage_parameters_gen.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/idl/server_parameter_test_util.h"
@@ -243,6 +244,8 @@ protected:
uassertStatusOK(applyCommand_inlock(
&_opCtx, ApplierOperation{&entry}, getOplogApplicationMode()));
} else {
+ const auto coll = acquireCollection(
+ &_opCtx, {nss(), {}, {}, AcquisitionPrerequisites::kWrite}, MODE_IX);
WriteUnitOfWork wunit(&_opCtx);
auto lastApplied = repl::ReplicationCoordinator::get(_opCtx.getServiceContext())
->getMyLastAppliedOpTime()
@@ -252,6 +255,7 @@ protected:
const bool dataIsConsistent = true;
uassertStatusOK(applyOperation_inlock(&_opCtx,
ctx.db(),
+ coll,
ApplierOperation{&entry},
false,
getOplogApplicationMode(),