summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/auth_op_observer.cpp2
-rw-r--r--src/mongo/db/auth/auth_op_observer.h2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp6
-rw-r--r--src/mongo/db/catalog/capped_utils_test.cpp3
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.h1
-rw-r--r--src/mongo/db/catalog/collection.cpp57
-rw-r--r--src/mongo/db/catalog/collection.h84
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp52
-rw-r--r--src/mongo/db/catalog/collection_catalog.h26
-rw-r--r--src/mongo/db/catalog/collection_catalog_helper.cpp5
-rw-r--r--src/mongo/db/catalog/collection_catalog_helper.h1
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp74
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp11
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp21
-rw-r--r--src/mongo/db/catalog/collection_impl.h1
-rw-r--r--src/mongo/db/catalog/collection_mock.h1
-rw-r--r--src/mongo/db/catalog/collection_test.cpp18
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp2
-rw-r--r--src/mongo/db/catalog/collection_validation.h1
-rw-r--r--src/mongo/db/catalog/collection_validation_test.cpp6
-rw-r--r--src/mongo/db/catalog/create_collection.cpp4
-rw-r--r--src/mongo/db/catalog/create_collection_test.cpp3
-rw-r--r--src/mongo/db/catalog/database_impl.cpp12
-rw-r--r--src/mongo/db/catalog/database_impl.h6
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp15
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp8
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp2
-rw-r--r--src/mongo/db/catalog/index_build_block.h3
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp16
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h18
-rw-r--r--src/mongo/db/catalog/index_catalog.h9
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h3
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp6
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h6
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp18
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.h19
-rw-r--r--src/mongo/db/catalog/index_catalog_noop.h8
-rw-r--r--src/mongo/db/catalog/index_signature_test.cpp2
-rw-r--r--src/mongo/db/catalog/list_indexes.cpp13
-rw-r--r--src/mongo/db/catalog/list_indexes.h2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp32
-rw-r--r--src/mongo/db/catalog/multi_index_block.h26
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp17
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp31
-rw-r--r--src/mongo/db/catalog/throttle_cursor_test.cpp21
-rw-r--r--src/mongo/db/catalog/validate_adaptor.cpp2
-rw-r--r--src/mongo/db/catalog/validate_state.cpp4
-rw-r--r--src/mongo/db/catalog/validate_state.h4
-rw-r--r--src/mongo/db/catalog/validate_state_test.cpp15
-rw-r--r--src/mongo/db/catalog_raii.cpp73
-rw-r--r--src/mongo/db/catalog_raii.h56
-rw-r--r--src/mongo/db/client.h1
-rw-r--r--src/mongo/db/clientcursor.h1
-rw-r--r--src/mongo/db/cloner.cpp4
-rw-r--r--src/mongo/db/commands/count_cmd.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp10
-rw-r--r--src/mongo/db/commands/dbcheck.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp7
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp70
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp61
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp4
-rw-r--r--src/mongo/db/commands/haystack.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp4
-rw-r--r--src/mongo/db/commands/list_collections.cpp6
-rw-r--r--src/mongo/db/commands/list_indexes.cpp12
-rw-r--r--src/mongo/db/commands/mr_test.cpp4
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp4
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/db_raii.h20
-rw-r--r--src/mongo/db/db_raii_test.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp25
-rw-r--r--src/mongo/db/dbhelpers.h9
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp2
-rw-r--r--src/mongo/db/exec/collection_scan.h2
-rw-r--r--src/mongo/db/exec/collection_scan_common.h2
-rw-r--r--src/mongo/db/exec/count.cpp2
-rw-r--r--src/mongo/db/exec/count.h2
-rw-r--r--src/mongo/db/exec/count_scan.cpp2
-rw-r--r--src/mongo/db/exec/count_scan.h2
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/delete.h2
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp2
-rw-r--r--src/mongo/db/exec/distinct_scan.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp2
-rw-r--r--src/mongo/db/exec/fetch.h2
-rw-r--r--src/mongo/db/exec/geo_near.cpp14
-rw-r--r--src/mongo/db/exec/geo_near.h16
-rw-r--r--src/mongo/db/exec/idhack.cpp4
-rw-r--r--src/mongo/db/exec/idhack.h4
-rw-r--r--src/mongo/db/exec/index_scan.cpp2
-rw-r--r--src/mongo/db/exec/index_scan.h2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp2
-rw-r--r--src/mongo/db/exec/multi_iterator.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp2
-rw-r--r--src/mongo/db/exec/multi_plan.h2
-rw-r--r--src/mongo/db/exec/near.cpp2
-rw-r--r--src/mongo/db/exec/near.h4
-rw-r--r--src/mongo/db/exec/plan_cache_util.h2
-rw-r--r--src/mongo/db/exec/plan_stage.h1
-rw-r--r--src/mongo/db/exec/record_store_fast_count.cpp2
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.h2
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp5
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h8
-rw-r--r--src/mongo/db/exec/requires_index_stage.cpp2
-rw-r--r--src/mongo/db/exec/requires_index_stage.h4
-rw-r--r--src/mongo/db/exec/sbe/parser/parser.cpp15
-rw-r--r--src/mongo/db/exec/sbe/stages/ix_scan.cpp2
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.cpp4
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.h2
-rw-r--r--src/mongo/db/exec/sort_key_generator.h1
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp6
-rw-r--r--src/mongo/db/exec/subplan.cpp2
-rw-r--r--src/mongo/db/exec/subplan.h2
-rw-r--r--src/mongo/db/exec/text.cpp4
-rw-r--r--src/mongo/db/exec/text.h4
-rw-r--r--src/mongo/db/exec/text_or.cpp2
-rw-r--r--src/mongo/db/exec/text_or.h2
-rw-r--r--src/mongo/db/exec/trial_period_utils.cpp4
-rw-r--r--src/mongo/db/exec/trial_period_utils.h3
-rw-r--r--src/mongo/db/exec/update_stage.cpp4
-rw-r--r--src/mongo/db/exec/update_stage.h4
-rw-r--r--src/mongo/db/exec/upsert_stage.cpp2
-rw-r--r--src/mongo/db/exec/upsert_stage.h2
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp2
-rw-r--r--src/mongo/db/exec/write_stage_common.h3
-rw-r--r--src/mongo/db/fcv_op_observer.h2
-rw-r--r--src/mongo/db/free_mon/free_mon_op_observer.h2
-rw-r--r--src/mongo/db/index/haystack_access_method.h1
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h4
-rw-r--r--src/mongo/db/index/index_access_method.cpp10
-rw-r--r--src/mongo/db/index/index_access_method.h20
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp6
-rw-r--r--src/mongo/db/index/index_build_interceptor.h6
-rw-r--r--src/mongo/db/index/index_descriptor.cpp2
-rw-r--r--src/mongo/db/index/index_descriptor.h3
-rw-r--r--src/mongo/db/index/skipped_record_tracker.cpp2
-rw-r--r--src/mongo/db/index/skipped_record_tracker.h2
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp33
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp40
-rw-r--r--src/mongo/db/index_builds_coordinator.h14
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp3
-rw-r--r--src/mongo/db/introspect.cpp5
-rw-r--r--src/mongo/db/matcher/expression_text.cpp2
-rw-r--r--src/mongo/db/mongod_main.cpp2
-rw-r--r--src/mongo/db/op_observer.h2
-rw-r--r--src/mongo/db/op_observer_impl.cpp5
-rw-r--r--src/mongo/db/op_observer_impl.h2
-rw-r--r--src/mongo/db/op_observer_noop.h2
-rw-r--r--src/mongo/db/op_observer_registry.h2
-rw-r--r--src/mongo/db/ops/delete.cpp2
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/update.cpp2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.h4
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near_cursor.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near_cursor.h4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp25
-rw-r--r--src/mongo/db/pipeline/pipeline_d.h17
-rw-r--r--src/mongo/db/pipeline/plan_executor_pipeline.h2
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp19
-rw-r--r--src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp2
-rw-r--r--src/mongo/db/query/classic_stage_builder.h2
-rw-r--r--src/mongo/db/query/collection_query_info.cpp16
-rw-r--r--src/mongo/db/query/collection_query_info.h27
-rw-r--r--src/mongo/db/query/explain.cpp11
-rw-r--r--src/mongo/db/query/explain.h7
-rw-r--r--src/mongo/db/query/find.cpp28
-rw-r--r--src/mongo/db/query/find.h4
-rw-r--r--src/mongo/db/query/get_executor.cpp46
-rw-r--r--src/mongo/db/query/get_executor.h19
-rw-r--r--src/mongo/db/query/internal_plans.cpp16
-rw-r--r--src/mongo/db/query/internal_plans.h15
-rw-r--r--src/mongo/db/query/plan_executor.h2
-rw-r--r--src/mongo/db/query/plan_executor_factory.cpp10
-rw-r--r--src/mongo/db/query/plan_executor_factory.h10
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp21
-rw-r--r--src/mongo/db/query/plan_executor_impl.h6
-rw-r--r--src/mongo/db/query/plan_executor_sbe.cpp4
-rw-r--r--src/mongo/db/query/plan_executor_sbe.h4
-rw-r--r--src/mongo/db/query/plan_yield_policy.h6
-rw-r--r--src/mongo/db/query/plan_yield_policy_impl.cpp21
-rw-r--r--src/mongo/db/query/plan_yield_policy_impl.h10
-rw-r--r--src/mongo/db/query/planner_analysis.h1
-rw-r--r--src/mongo/db/query/query_planner.cpp2
-rw-r--r--src/mongo/db/query/query_planner.h3
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.h2
-rw-r--r--src/mongo/db/query/sbe_multi_planner.h2
-rw-r--r--src/mongo/db/query/sbe_runtime_planner.cpp1
-rw-r--r--src/mongo/db/query/sbe_runtime_planner.h4
-rw-r--r--src/mongo/db/query/sbe_stage_builder.h2
-rw-r--r--src/mongo/db/query/sbe_stage_builder_coll_scan.cpp12
-rw-r--r--src/mongo/db/query/sbe_stage_builder_coll_scan.h2
-rw-r--r--src/mongo/db/query/sbe_stage_builder_index_scan.cpp10
-rw-r--r--src/mongo/db/query/sbe_stage_builder_index_scan.h4
-rw-r--r--src/mongo/db/query/sbe_sub_planner.h2
-rw-r--r--src/mongo/db/query/stage_builder.h4
-rw-r--r--src/mongo/db/query/stage_builder_util.cpp4
-rw-r--r--src/mongo/db/query/stage_builder_util.h4
-rw-r--r--src/mongo/db/rebuild_indexes.cpp2
-rw-r--r--src/mongo/db/rebuild_indexes.h5
-rw-r--r--src/mongo/db/repair.cpp2
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/apply_ops.cpp2
-rw-r--r--src/mongo/db/repl/collection_bulk_loader.h1
-rw-r--r--src/mongo/db/repl/dbcheck.cpp13
-rw-r--r--src/mongo/db/repl/dbcheck.h21
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp12
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.h3
-rw-r--r--src/mongo/db/repl/local_oplog_info.cpp8
-rw-r--r--src/mongo/db/repl/local_oplog_info.h6
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.cpp5
-rw-r--r--src/mongo/db/repl/oplog.cpp14
-rw-r--r--src/mongo/db/repl/oplog.h3
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.h4
-rw-r--r--src/mongo/db/repl/primary_only_service_op_observer.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp6
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp19
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp16
-rw-r--r--src/mongo/db/repl/storage_interface.h5
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp54
-rw-r--r--src/mongo/db/repl/storage_interface_impl.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp6
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h4
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_op_observer.h2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp18
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier_test.cpp4
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp3
-rw-r--r--src/mongo/db/s/config_server_op_observer.h2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp16
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h9
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp12
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp27
-rw-r--r--src/mongo/db/s/resharding/resharding_op_observer.h2
-rw-r--r--src/mongo/db/s/resharding_util.cpp2
-rw-r--r--src/mongo/db/s/shard_local.cpp4
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp2
-rw-r--r--src/mongo/db/s/shard_server_op_observer.h2
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp2
-rw-r--r--src/mongo/db/s/split_chunk.cpp10
-rw-r--r--src/mongo/db/s/split_vector.cpp9
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp3
-rw-r--r--src/mongo/db/startup_recovery.cpp6
-rw-r--r--src/mongo/db/stats/storage_stats.cpp7
-rw-r--r--src/mongo/db/storage/oplog_cap_maintainer_thread.cpp2
-rw-r--r--src/mongo/db/storage/record_store.h1
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_test_fixture.h6
-rw-r--r--src/mongo/db/storage/wiredtiger/oplog_stones_server_status_section.cpp2
-rw-r--r--src/mongo/db/system_index.cpp2
-rw-r--r--src/mongo/db/system_index.h3
-rw-r--r--src/mongo/db/transaction_participant.cpp6
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp6
-rw-r--r--src/mongo/db/yieldable.h39
-rw-r--r--src/mongo/dbtests/clienttests.cpp2
-rw-r--r--src/mongo/dbtests/counttests.cpp2
-rw-r--r--src/mongo/dbtests/dbtests.h2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp8
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp57
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp4
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp30
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp16
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp42
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp63
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp64
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp44
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp28
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp22
-rw-r--r--src/mongo/dbtests/querytests.cpp6
-rw-r--r--src/mongo/dbtests/repltests.cpp6
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp3
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp65
-rw-r--r--src/mongo/dbtests/validate_tests.cpp52
-rw-r--r--src/mongo/dbtests/wildcard_multikey_persistence_test.cpp19
301 files changed, 1650 insertions, 1332 deletions
diff --git a/src/mongo/db/auth/auth_op_observer.cpp b/src/mongo/db/auth/auth_op_observer.cpp
index 4e10e8d2d21..b70883a7df1 100644
--- a/src/mongo/db/auth/auth_op_observer.cpp
+++ b/src/mongo/db/auth/auth_op_observer.cpp
@@ -90,7 +90,7 @@ void AuthOpObserver::onDelete(OperationContext* opCtx,
}
void AuthOpObserver::onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/auth/auth_op_observer.h b/src/mongo/db/auth/auth_op_observer.h
index b8d0e132b7a..19831510685 100644
--- a/src/mongo/db/auth/auth_op_observer.h
+++ b/src/mongo/db/auth/auth_op_observer.h
@@ -106,7 +106,7 @@ public:
const boost::optional<OplogSlot> slot) final{};
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index a96cb39b739..4b000f92e81 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -113,7 +113,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
const NamespaceString& toNss,
long long size,
bool temp) {
- const Collection* fromCollection =
+ const CollectionPtr& fromCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, fromNss);
if (!fromCollection) {
uassert(ErrorCodes::CommandNotSupportedOnView,
@@ -152,7 +152,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassertStatusOK(createCollection(opCtx, toNss.db().toString(), cmd.done()));
}
- const Collection* toCollection =
+ const CollectionPtr& toCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss);
invariant(toCollection); // we created above
@@ -231,7 +231,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
// abandonSnapshot.
exec->saveState();
opCtx->recoveryUnit()->abandonSnapshot();
- exec->restoreState(); // Handles any WCEs internally.
+ exec->restoreState(&fromCollection); // Handles any WCEs internally.
}
}
diff --git a/src/mongo/db/catalog/capped_utils_test.cpp b/src/mongo/db/catalog/capped_utils_test.cpp
index b3cef4c8279..9bba12741bd 100644
--- a/src/mongo/db/catalog/capped_utils_test.cpp
+++ b/src/mongo/db/catalog/capped_utils_test.cpp
@@ -98,8 +98,7 @@ bool collectionExists(OperationContext* opCtx, const NamespaceString& nss) {
* Returns collection options.
*/
CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) {
- AutoGetCollectionForRead autoColl(opCtx, nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index e46ec8fd298..52653d1c0d1 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -112,7 +112,7 @@ struct CollModRequest {
StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
const NamespaceString& nss,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& cmdObj,
BSONObjBuilder* oplogEntryBuilder) {
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
index e92205e5888..a6b5236f355 100644
--- a/src/mongo/db/catalog/coll_mod.h
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -35,6 +35,7 @@ namespace mongo {
class BSONObj;
class BSONObjBuilder;
class Collection;
+class CollectionPtr;
class NamespaceString;
class OperationContext;
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index e02689c7622..36f73778483 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -67,6 +67,63 @@ bool CappedInsertNotifier::isDead() {
return _dead;
}
+// We can't reference the catalog from this library as it would create a cyclic library dependency.
+// Setup a weak dependency using a std::function that is installed by the catalog lib
+std::function<CollectionPtr(OperationContext*, CollectionUUID, uint64_t)>& _catalogLookup() {
+ static std::function<CollectionPtr(OperationContext*, CollectionUUID, uint64_t)> func;
+ return func;
+}
+
+void CollectionPtr::installCatalogLookupImpl(
+ std::function<CollectionPtr(OperationContext*, CollectionUUID, uint64_t)> impl) {
+ _catalogLookup() = std::move(impl);
+}
+
+CollectionPtr CollectionPtr::null;
+
+CollectionPtr::CollectionPtr() : _collection(nullptr), _opCtx(nullptr) {}
+CollectionPtr::CollectionPtr(OperationContext* opCtx,
+ const Collection* collection,
+ uint64_t catalogEpoch)
+ : _collection(collection), _opCtx(opCtx), _catalogEpoch(catalogEpoch) {}
+CollectionPtr::CollectionPtr(const Collection* collection, NoYieldTag)
+ : CollectionPtr(nullptr, collection, 0) {}
+CollectionPtr::CollectionPtr(Collection* collection) : CollectionPtr(collection, NoYieldTag{}) {}
+CollectionPtr::CollectionPtr(const std::shared_ptr<const Collection>& collection)
+ : CollectionPtr(collection.get(), NoYieldTag{}) {}
+CollectionPtr::CollectionPtr(CollectionPtr&&) = default;
+CollectionPtr::~CollectionPtr() {}
+CollectionPtr& CollectionPtr::operator=(CollectionPtr&&) = default;
+
+CollectionPtr CollectionPtr::detached() const {
+ return CollectionPtr(_opCtx, _collection, _catalogEpoch);
+}
+
+bool CollectionPtr::_canYield() const {
+ // We only set the opCtx when we use a constructor that allows yielding.
+ // When we are doing a lock free read or having a writable pointer in a WUOW it is not allowed
+ // to yield.
+ return _opCtx;
+}
+
+void CollectionPtr::yield() const {
+ if (_canYield()) {
+ _uuid = _collection->uuid();
+ _ns = _collection->ns();
+ _collection = nullptr;
+ }
+}
+void CollectionPtr::restore() const {
+ if (_canYield()) {
+ // We may only do yield restore when we were holding locks that was yielded so we need to
+ // refresh from the catalog to make sure we have a valid collection pointer.
+ auto coll = _catalogLookup()(_opCtx, *_uuid, _catalogEpoch);
+ if (coll && coll->ns() == _ns) {
+ _collection = coll.get();
+ }
+ }
+}
+
// ----
namespace {
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 134fc67df0e..756d2f16171 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -51,6 +51,7 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/snapshot.h"
+#include "mongo/db/yieldable.h"
#include "mongo/logv2/log_attr.h"
#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
@@ -58,6 +59,7 @@
namespace mongo {
class CappedCallback;
+class CollectionPtr;
class ExtentManager;
class IndexCatalog;
class IndexCatalogEntry;
@@ -552,6 +554,7 @@ public:
*/
virtual std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
OperationContext* opCtx,
+ const CollectionPtr& yieldableCollection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ScanDirection scanDirection,
boost::optional<RecordId> resumeAfterRecordId = boost::none) const = 0;
@@ -576,4 +579,85 @@ public:
}
};
+/**
+ * Smart-pointer'esque type to handle yielding of Collection lock that may invalidate pointers when
+ * resuming. CollectionPtr will re-load the Collection from the Catalog when restoring from a yield
+ * that dropped Collection locks. If this is constructed from a Lock-Free Reads context (shared_ptr)
+ * or writable pointer, then it is not allowed to reload from the catalog and the yield operations
+ * are no-ops.
+ */
+class CollectionPtr : public Yieldable {
+public:
+ static CollectionPtr null;
+
+ CollectionPtr();
+
+ // Creates a Yieldable CollectionPtr that reloads the Collection pointer from the catalog when
+ // restoring from yield
+ CollectionPtr(OperationContext* opCtx, const Collection* collection, uint64_t catalogEpoch);
+
+ // Creates non-yieldable CollectionPtr, performing yield/restore will be a no-op.
+ struct NoYieldTag {};
+ CollectionPtr(const Collection* collection, NoYieldTag);
+ CollectionPtr(const std::shared_ptr<const Collection>& collection);
+ CollectionPtr(Collection* collection);
+
+ CollectionPtr(const CollectionPtr&) = delete;
+ CollectionPtr(CollectionPtr&&);
+ ~CollectionPtr();
+
+ CollectionPtr& operator=(const CollectionPtr&) = delete;
+ CollectionPtr& operator=(CollectionPtr&&);
+
+ explicit operator bool() const {
+ return static_cast<bool>(_collection);
+ }
+
+ bool operator==(const CollectionPtr& other) const {
+ return get() == other.get();
+ }
+ bool operator!=(const CollectionPtr& other) const {
+ return !operator==(other);
+ }
+ const Collection* operator->() const {
+ return _collection;
+ }
+ const Collection* get() const {
+ return _collection;
+ }
+
+ // Creates a new CollectionPtr that is detached from the current, if the current instance is
+ // yieldable the new CollectionPtr will also be. Yielding on the new instance may cause the
+ // instance we detached from to dangle.
+ CollectionPtr detached() const;
+
+ void reset() {
+ *this = CollectionPtr();
+ }
+
+ void yield() const override;
+ void restore() const override;
+
+ static void installCatalogLookupImpl(
+ std::function<CollectionPtr(OperationContext*, CollectionUUID, uint64_t)> impl);
+
+ friend std::ostream& operator<<(std::ostream& os, const CollectionPtr& coll);
+
+private:
+ bool _canYield() const;
+
+ // These members needs to be mutable so the yield/restore interface can be const. We don't want
+ // yield/restore to require a non-const instance when it otherwise could be const.
+ mutable const Collection* _collection;
+ mutable OptionalCollectionUUID _uuid;
+ mutable NamespaceString _ns;
+ OperationContext* _opCtx;
+ uint64_t _catalogEpoch;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const CollectionPtr& coll) {
+ os << coll.get();
+ return os;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 34444d2a45f..ce2fea09cb7 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -47,6 +47,19 @@ namespace {
const ServiceContext::Decoration<CollectionCatalog> getCatalog =
ServiceContext::declareDecoration<CollectionCatalog>();
+struct installCatalogLookupFn {
+ installCatalogLookupFn() {
+ CollectionPtr::installCatalogLookupImpl(
+ [](OperationContext* opCtx, CollectionUUID uuid, uint64_t catalogEpoch) {
+ const auto& catalog = CollectionCatalog::get(opCtx);
+ if (catalog.getEpoch() != catalogEpoch)
+ return CollectionPtr();
+
+ return catalog.lookupCollectionByUUID(opCtx, uuid);
+ });
+ }
+} inst;
+
class FinishDropCollectionChange : public RecoveryUnit::Change {
public:
FinishDropCollectionChange(CollectionCatalog* catalog,
@@ -70,10 +83,11 @@ private:
} // namespace
-CollectionCatalog::iterator::iterator(StringData dbName,
+CollectionCatalog::iterator::iterator(OperationContext* opCtx,
+ StringData dbName,
uint64_t genNum,
const CollectionCatalog& catalog)
- : _dbName(dbName), _genNum(genNum), _catalog(&catalog) {
+ : _opCtx(opCtx), _dbName(dbName), _genNum(genNum), _catalog(&catalog) {
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
@@ -89,18 +103,19 @@ CollectionCatalog::iterator::iterator(StringData dbName,
}
}
-CollectionCatalog::iterator::iterator(std::map<std::pair<std::string, CollectionUUID>,
+CollectionCatalog::iterator::iterator(OperationContext* opCtx,
+ std::map<std::pair<std::string, CollectionUUID>,
std::shared_ptr<Collection>>::const_iterator mapIter)
- : _mapIter(mapIter) {}
+ : _opCtx(opCtx), _mapIter(mapIter) {}
CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*() {
stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_repositionIfNeeded();
if (_exhausted()) {
- return _nullCollection;
+ return CollectionPtr();
}
- return _mapIter->second.get();
+ return {_opCtx, _mapIter->second.get(), _catalog->getEpoch()};
}
Collection* CollectionCatalog::iterator::getWritableCollection(OperationContext* opCtx,
@@ -297,15 +312,16 @@ Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationC
return nullptr;
}
-const Collection* CollectionCatalog::lookupCollectionByUUID(OperationContext* opCtx,
- CollectionUUID uuid) const {
+CollectionPtr CollectionCatalog::lookupCollectionByUUID(OperationContext* opCtx,
+ CollectionUUID uuid) const {
if (auto coll = UncommittedCollections::getForTxn(opCtx, uuid)) {
- return coll.get();
+ return {opCtx, coll.get(), getEpoch()};
}
stdx::lock_guard<Latch> lock(_catalogLock);
auto coll = _lookupCollectionByUUID(lock, uuid);
- return (coll && coll->isCommitted()) ? coll.get() : nullptr;
+ return (coll && coll->isCommitted()) ? CollectionPtr(opCtx, coll.get(), getEpoch())
+ : CollectionPtr();
}
void CollectionCatalog::makeCollectionVisible(CollectionUUID uuid) {
@@ -361,16 +377,16 @@ Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite(
return nullptr;
}
-const Collection* CollectionCatalog::lookupCollectionByNamespace(OperationContext* opCtx,
- const NamespaceString& nss) const {
+CollectionPtr CollectionCatalog::lookupCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const {
if (auto coll = UncommittedCollections::getForTxn(opCtx, nss)) {
- return coll.get();
+ return {opCtx, coll.get(), getEpoch()};
}
stdx::lock_guard<Latch> lock(_catalogLock);
auto it = _collections.find(nss);
auto coll = (it == _collections.end() ? nullptr : it->second);
- return (coll && coll->isCommitted()) ? coll.get() : nullptr;
+ return (coll && coll->isCommitted()) ? CollectionPtr(opCtx, coll.get(), getEpoch()) : nullptr;
}
boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(OperationContext* opCtx,
@@ -626,12 +642,12 @@ void CollectionCatalog::deregisterAllCollections() {
_generationNumber++;
}
-CollectionCatalog::iterator CollectionCatalog::begin(StringData db) const {
- return iterator(db, _generationNumber, *this);
+CollectionCatalog::iterator CollectionCatalog::begin(OperationContext* opCtx, StringData db) const {
+ return iterator(opCtx, db, _generationNumber, *this);
}
-CollectionCatalog::iterator CollectionCatalog::end() const {
- return iterator(_orderedCollections.end());
+CollectionCatalog::iterator CollectionCatalog::end(OperationContext* opCtx) const {
+ return iterator(opCtx, _orderedCollections.end());
}
boost::optional<std::string> CollectionCatalog::lookupResourceName(const ResourceId& rid) {
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 32afe461f04..26b64a3ce6d 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -55,7 +55,7 @@ class CollectionCatalog {
friend class iterator;
public:
- using CollectionInfoFn = std::function<bool(const Collection* collection)>;
+ using CollectionInfoFn = std::function<bool(const CollectionPtr& collection)>;
enum class LifetimeMode {
// Lifetime of writable Collection is managed by an active write unit of work. The writable
@@ -73,10 +73,14 @@ public:
class iterator {
public:
- using value_type = const Collection*;
-
- iterator(StringData dbName, uint64_t genNum, const CollectionCatalog& catalog);
- iterator(std::map<std::pair<std::string, CollectionUUID>,
+ using value_type = CollectionPtr;
+
+ iterator(OperationContext* opCtx,
+ StringData dbName,
+ uint64_t genNum,
+ const CollectionCatalog& catalog);
+ iterator(OperationContext* opCtx,
+ std::map<std::pair<std::string, CollectionUUID>,
std::shared_ptr<Collection>>::const_iterator mapIter);
value_type operator*();
iterator operator++();
@@ -104,13 +108,13 @@ public:
bool _repositionIfNeeded();
bool _exhausted();
+ OperationContext* _opCtx;
std::string _dbName;
boost::optional<CollectionUUID> _uuid;
uint64_t _genNum;
std::map<std::pair<std::string, CollectionUUID>,
std::shared_ptr<Collection>>::const_iterator _mapIter;
const CollectionCatalog* _catalog;
- static constexpr Collection* _nullCollection = nullptr;
};
struct ProfileSettings {
@@ -182,7 +186,7 @@ public:
Collection* lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
LifetimeMode mode,
CollectionUUID uuid);
- const Collection* lookupCollectionByUUID(OperationContext* opCtx, CollectionUUID uuid) const;
+ CollectionPtr lookupCollectionByUUID(OperationContext* opCtx, CollectionUUID uuid) const;
std::shared_ptr<const Collection> lookupCollectionByUUIDForRead(OperationContext* opCtx,
CollectionUUID uuid) const;
@@ -205,8 +209,8 @@ public:
Collection* lookupCollectionByNamespaceForMetadataWrite(OperationContext* opCtx,
LifetimeMode mode,
const NamespaceString& nss);
- const Collection* lookupCollectionByNamespace(OperationContext* opCtx,
- const NamespaceString& nss) const;
+ CollectionPtr lookupCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const;
std::shared_ptr<const Collection> lookupCollectionByNamespaceForRead(
OperationContext* opCtx, const NamespaceString& nss) const;
@@ -324,8 +328,8 @@ public:
*/
uint64_t getEpoch() const;
- iterator begin(StringData db) const;
- iterator end() const;
+ iterator begin(OperationContext* opCtx, StringData db) const;
+ iterator end(OperationContext* opCtx) const;
/**
* Lookup the name of a resource by its ResourceId. If there are multiple namespaces mapped to
diff --git a/src/mongo/db/catalog/collection_catalog_helper.cpp b/src/mongo/db/catalog/collection_catalog_helper.cpp
index 069bc1558f0..8b220b79464 100644
--- a/src/mongo/db/catalog/collection_catalog_helper.cpp
+++ b/src/mongo/db/catalog/collection_catalog_helper.cpp
@@ -45,14 +45,15 @@ void forEachCollectionFromDb(OperationContext* opCtx,
CollectionCatalog::CollectionInfoFn predicate) {
CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
- for (auto collectionIt = catalog.begin(dbName); collectionIt != catalog.end(); ++collectionIt) {
+ for (auto collectionIt = catalog.begin(opCtx, dbName); collectionIt != catalog.end(opCtx);
+ ++collectionIt) {
auto uuid = collectionIt.uuid().get();
if (predicate && !catalog.checkIfCollectionSatisfiable(uuid, predicate)) {
continue;
}
boost::optional<Lock::CollectionLock> clk;
- const Collection* collection = nullptr;
+ CollectionPtr collection;
while (auto nss = catalog.lookupNSSByUUID(opCtx, uuid)) {
// Get a fresh snapshot for each locked collection to see any catalog changes.
diff --git a/src/mongo/db/catalog/collection_catalog_helper.h b/src/mongo/db/catalog/collection_catalog_helper.h
index db9010db85a..aa5756e7baf 100644
--- a/src/mongo/db/catalog/collection_catalog_helper.h
+++ b/src/mongo/db/catalog/collection_catalog_helper.h
@@ -36,6 +36,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class CollectionCatalogEntry;
namespace catalog {
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 5ac89ed76a8..e4bb122e48c 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -45,7 +45,7 @@ namespace mongo {
namespace {
/**
- * A test fixture that creates a CollectionCatalog and const Collection* pointer to store in it.
+ * A test fixture that creates a CollectionCatalog and const CollectionPtr& pointer to store in it.
*/
class CollectionCatalogTest : public ServiceContextMongoDTest {
public:
@@ -69,7 +69,7 @@ public:
ServiceContextMongoDTest::setUp();
std::shared_ptr<Collection> collection = std::make_shared<CollectionMock>(nss);
- col = collection.get();
+ col = CollectionPtr(collection.get(), CollectionPtr::NoYieldTag{});
// Register dummy collection in catalog.
catalog.registerCollection(colUUID, std::move(collection));
}
@@ -78,7 +78,7 @@ protected:
CollectionCatalog catalog;
OperationContextNoop opCtx;
NamespaceString nss;
- const Collection* col;
+ CollectionPtr col;
CollectionUUID colUUID;
CollectionUUID nextUUID;
CollectionUUID prevUUID;
@@ -112,13 +112,13 @@ public:
}
}
- std::map<CollectionUUID, const Collection*>::iterator collsIterator(std::string dbName) {
+ std::map<CollectionUUID, CollectionPtr>::iterator collsIterator(std::string dbName) {
auto it = dbMap.find(dbName);
ASSERT(it != dbMap.end());
return it->second.begin();
}
- std::map<CollectionUUID, const Collection*>::iterator collsIteratorEnd(std::string dbName) {
+ std::map<CollectionUUID, CollectionPtr>::iterator collsIteratorEnd(std::string dbName) {
auto it = dbMap.find(dbName);
ASSERT(it != dbMap.end());
return it->second.end();
@@ -127,13 +127,14 @@ public:
void checkCollections(std::string dbName) {
unsigned long counter = 0;
- for (auto [orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
- catalogIt != catalog.end() && orderedIt != collsIteratorEnd(dbName);
+ for (auto [orderedIt, catalogIt] =
+ std::tuple{collsIterator(dbName), catalog.begin(&opCtx, dbName)};
+ catalogIt != catalog.end(&opCtx) && orderedIt != collsIteratorEnd(dbName);
++catalogIt, ++orderedIt) {
auto catalogColl = *catalogIt;
ASSERT(catalogColl != nullptr);
- auto orderedColl = orderedIt->second;
+ const auto& orderedColl = orderedIt->second;
ASSERT_EQ(catalogColl->ns(), orderedColl->ns());
++counter;
}
@@ -148,7 +149,7 @@ public:
protected:
CollectionCatalog catalog;
OperationContextNoop opCtx;
- std::map<std::string, std::map<CollectionUUID, const Collection*>> dbMap;
+ std::map<std::string, std::map<CollectionUUID, CollectionPtr>> dbMap;
};
class CollectionCatalogResourceMapTest : public unittest::Test {
@@ -281,7 +282,7 @@ public:
}
int numEntries = 0;
- for (auto it = catalog.begin("resourceDb"); it != catalog.end(); it++) {
+ for (auto it = catalog.begin(&opCtx, "resourceDb"); it != catalog.end(&opCtx); it++) {
auto coll = *it;
std::string collName = coll->ns().ns();
ResourceId rid(RESOURCE_COLLECTION, collName);
@@ -293,7 +294,7 @@ public:
}
void tearDown() {
- for (auto it = catalog.begin("resourceDb"); it != catalog.end(); ++it) {
+ for (auto it = catalog.begin(&opCtx, "resourceDb"); it != catalog.end(&opCtx); ++it) {
auto coll = *it;
auto uuid = coll->uuid();
if (!coll) {
@@ -304,7 +305,7 @@ public:
}
int numEntries = 0;
- for (auto it = catalog.begin("resourceDb"); it != catalog.end(); it++) {
+ for (auto it = catalog.begin(&opCtx, "resourceDb"); it != catalog.end(&opCtx); it++) {
numEntries++;
}
ASSERT_EQ(0, numEntries);
@@ -382,7 +383,7 @@ TEST_F(CollectionCatalogIterationTest, EndAtEndOfSection) {
// Delete an entry in the catalog while iterating.
TEST_F(CollectionCatalogIterationTest, InvalidateEntry) {
- auto it = catalog.begin("bar");
+ auto it = catalog.begin(&opCtx, "bar");
// Invalidate bar.coll1.
for (auto collsIt = collsIterator("bar"); collsIt != collsIteratorEnd("bar"); ++collsIt) {
@@ -394,7 +395,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateEntry) {
}
- for (; it != catalog.end(); ++it) {
+ for (; it != catalog.end(&opCtx); ++it) {
auto coll = *it;
ASSERT(coll && coll->ns().ns() != "bar.coll1");
}
@@ -402,13 +403,13 @@ TEST_F(CollectionCatalogIterationTest, InvalidateEntry) {
// Delete the entry pointed to by the iterator and dereference the iterator.
TEST_F(CollectionCatalogIterationTest, InvalidateAndDereference) {
- auto it = catalog.begin("bar");
+ auto it = catalog.begin(&opCtx, "bar");
auto collsIt = collsIterator("bar");
auto uuid = collsIt->first;
catalog.deregisterCollection(uuid);
++collsIt;
- ASSERT(it != catalog.end());
+ ASSERT(it != catalog.end(&opCtx));
auto catalogColl = *it;
ASSERT(catalogColl != nullptr);
ASSERT_EQUALS(catalogColl->ns(), collsIt->second->ns());
@@ -418,7 +419,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateAndDereference) {
// Delete the last entry for a database while pointing to it and dereference the iterator.
TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryAndDereference) {
- auto it = catalog.begin("bar");
+ auto it = catalog.begin(&opCtx, "bar");
NamespaceString lastNs;
boost::optional<CollectionUUID> uuid;
for (auto collsIt = collsIterator("bar"); collsIt != collsIteratorEnd("bar"); ++collsIt) {
@@ -427,7 +428,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryAndDereference) {
}
// Increment until it points to the last collection.
- for (; it != catalog.end(); ++it) {
+ for (; it != catalog.end(&opCtx); ++it) {
auto coll = *it;
ASSERT(coll != nullptr);
if (coll->ns() == lastNs) {
@@ -442,7 +443,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryAndDereference) {
// Delete the last entry in the map while pointing to it and dereference the iterator.
TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryInMapAndDereference) {
- auto it = catalog.begin("foo");
+ auto it = catalog.begin(&opCtx, "foo");
NamespaceString lastNs;
boost::optional<CollectionUUID> uuid;
for (auto collsIt = collsIterator("foo"); collsIt != collsIteratorEnd("foo"); ++collsIt) {
@@ -451,7 +452,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryInMapAndDereference) {
}
// Increment until it points to the last collection.
- for (; it != catalog.end(); ++it) {
+ for (; it != catalog.end(&opCtx); ++it) {
auto coll = *it;
ASSERT(coll != nullptr);
if (coll->ns() == lastNs) {
@@ -465,7 +466,7 @@ TEST_F(CollectionCatalogIterationTest, InvalidateLastEntryInMapAndDereference) {
}
TEST_F(CollectionCatalogIterationTest, GetUUIDWontRepositionEvenIfEntryIsDropped) {
- auto it = catalog.begin("bar");
+ auto it = catalog.begin(&opCtx, "bar");
auto collsIt = collsIterator("bar");
auto uuid = collsIt->first;
catalog.deregisterCollection(uuid);
@@ -726,7 +727,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) {
{
auto dbLock = std::make_unique<Lock::DBLock>(opCtx, "db", MODE_IX);
int numCollectionsTraversed = 0;
- catalog::forEachCollectionFromDb(opCtx, "db", MODE_X, [&](const Collection* collection) {
+ catalog::forEachCollectionFromDb(opCtx, "db", MODE_X, [&](const CollectionPtr& collection) {
ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
numCollectionsTraversed++;
return true;
@@ -738,11 +739,13 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) {
{
auto dbLock = std::make_unique<Lock::DBLock>(opCtx, "db2", MODE_IX);
int numCollectionsTraversed = 0;
- catalog::forEachCollectionFromDb(opCtx, "db2", MODE_IS, [&](const Collection* collection) {
- ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IS));
- numCollectionsTraversed++;
- return true;
- });
+ catalog::forEachCollectionFromDb(
+ opCtx, "db2", MODE_IS, [&](const CollectionPtr& collection) {
+ ASSERT_TRUE(
+ opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IS));
+ numCollectionsTraversed++;
+ return true;
+ });
ASSERT_EQUALS(numCollectionsTraversed, 1);
}
@@ -750,10 +753,11 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) {
{
auto dbLock = std::make_unique<Lock::DBLock>(opCtx, "db3", MODE_IX);
int numCollectionsTraversed = 0;
- catalog::forEachCollectionFromDb(opCtx, "db3", MODE_S, [&](const Collection* collection) {
- numCollectionsTraversed++;
- return true;
- });
+ catalog::forEachCollectionFromDb(
+ opCtx, "db3", MODE_S, [&](const CollectionPtr& collection) {
+ numCollectionsTraversed++;
+ return true;
+ });
ASSERT_EQUALS(numCollectionsTraversed, 0);
}
@@ -770,13 +774,13 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) {
opCtx,
"db",
MODE_X,
- [&](const Collection* collection) {
+ [&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
numCollectionsTraversed++;
return true;
},
- [&](const Collection* collection) {
+ [&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_NONE));
return DurableCatalog::get(opCtx)
@@ -794,13 +798,13 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) {
opCtx,
"db",
MODE_IX,
- [&](const Collection* collection) {
+ [&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IX));
numCollectionsTraversed++;
return true;
},
- [&](const Collection* collection) {
+ [&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_NONE));
return !DurableCatalog::get(opCtx)
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index f143dd89d1d..dbb1130ce2c 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -50,14 +50,13 @@ using logv2::LogComponent;
namespace {
-const Collection* getCollectionForCompact(OperationContext* opCtx,
- Database* database,
- const NamespaceString& collectionNss) {
+CollectionPtr getCollectionForCompact(OperationContext* opCtx,
+ Database* database,
+ const NamespaceString& collectionNss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(collectionNss, MODE_IX));
CollectionCatalog& collectionCatalog = CollectionCatalog::get(opCtx);
- const Collection* collection =
- collectionCatalog.lookupCollectionByNamespace(opCtx, collectionNss);
+ CollectionPtr collection = collectionCatalog.lookupCollectionByNamespace(opCtx, collectionNss);
if (!collection) {
std::shared_ptr<ViewDefinition> view =
@@ -82,7 +81,7 @@ StatusWith<int64_t> compactCollection(OperationContext* opCtx,
boost::optional<Lock::CollectionLock> collLk;
collLk.emplace(opCtx, collectionNss, MODE_X);
- const Collection* collection = getCollectionForCompact(opCtx, database, collectionNss);
+ CollectionPtr collection = getCollectionForCompact(opCtx, database, collectionNss);
DisableDocumentValidation validationDisabler(opCtx);
auto recordStore = collection->getRecordStore();
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index f6eb83db709..022735e9110 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -690,7 +690,8 @@ Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
}
int64_t keysInserted;
- status = _indexCatalog->indexRecords(opCtx, this, bsonRecords, &keysInserted);
+ status = _indexCatalog->indexRecords(
+ opCtx, {this, CollectionPtr::NoYieldTag{}}, bsonRecords, &keysInserted);
if (opDebug) {
opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
}
@@ -841,8 +842,13 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
if (indexesAffected) {
int64_t keysInserted, keysDeleted;
- uassertStatusOK(_indexCatalog->updateRecord(
- opCtx, this, *args->preImageDoc, newDoc, oldLocation, &keysInserted, &keysDeleted));
+ uassertStatusOK(_indexCatalog->updateRecord(opCtx,
+ {this, CollectionPtr::NoYieldTag{}},
+ *args->preImageDoc,
+ newDoc,
+ oldLocation,
+ &keysInserted,
+ &keysDeleted));
if (opDebug) {
opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
@@ -1233,13 +1239,18 @@ StatusWith<std::vector<BSONObj>> CollectionImpl::addCollationDefaultsToIndexSpec
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> CollectionImpl::makePlanExecutor(
OperationContext* opCtx,
+ const CollectionPtr& yieldableCollection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ScanDirection scanDirection,
boost::optional<RecordId> resumeAfterRecordId) const {
auto isForward = scanDirection == ScanDirection::kForward;
auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD;
- return InternalPlanner::collectionScan(
- opCtx, _ns.ns(), this, yieldPolicy, direction, resumeAfterRecordId);
+ return InternalPlanner::collectionScan(opCtx,
+ yieldableCollection->ns().ns(),
+ yieldableCollection,
+ yieldPolicy,
+ direction,
+ resumeAfterRecordId);
}
void CollectionImpl::setNs(NamespaceString nss) {
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 7f5c03cb018..e6037e04ebc 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -352,6 +352,7 @@ public:
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
OperationContext* opCtx,
+ const CollectionPtr& yieldableCollection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ScanDirection scanDirection,
boost::optional<RecordId> resumeAfterRecordId) const final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 23220322696..ad5b6bc8580 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -273,6 +273,7 @@ public:
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
OperationContext* opCtx,
+ const CollectionPtr& yieldableCollection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ScanDirection scanDirection,
boost::optional<RecordId> resumeAfterRecordId) const {
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index b973cf5a949..d9c03af2d61 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -62,7 +62,7 @@ TEST_F(CollectionTest, CappedNotifierKillAndIsDead) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
ASSERT_FALSE(notifier->isDead());
notifier->kill();
@@ -74,7 +74,7 @@ TEST_F(CollectionTest, CappedNotifierTimeouts) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
ASSERT_EQ(notifier->getVersion(), 0u);
@@ -90,7 +90,7 @@ TEST_F(CollectionTest, CappedNotifierWaitAfterNotifyIsImmediate) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
auto prevVersion = notifier->getVersion();
@@ -109,7 +109,7 @@ TEST_F(CollectionTest, CappedNotifierWaitUntilAsynchronousNotifyAll) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
auto prevVersion = notifier->getVersion();
auto thisVersion = prevVersion + 1;
@@ -134,7 +134,7 @@ TEST_F(CollectionTest, CappedNotifierWaitUntilAsynchronousKill) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
auto prevVersion = notifier->getVersion();
@@ -158,7 +158,7 @@ TEST_F(CollectionTest, HaveCappedWaiters) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
ASSERT_FALSE(col->getCappedCallback()->haveCappedWaiters());
{
auto notifier = col->getCappedInsertNotifier();
@@ -172,7 +172,7 @@ TEST_F(CollectionTest, NotifyCappedWaitersIfNeeded) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
col->getCappedCallback()->notifyCappedWaitersIfNeeded();
{
auto notifier = col->getCappedInsertNotifier();
@@ -187,14 +187,14 @@ TEST_F(CollectionTest, AsynchronouslyNotifyCappedWaitersIfNeeded) {
makeCapped(nss);
AutoGetCollectionForRead acfr(operationContext(), nss);
- const Collection* col = acfr.getCollection();
+ const CollectionPtr& col = acfr.getCollection();
auto notifier = col->getCappedInsertNotifier();
auto prevVersion = notifier->getVersion();
auto thisVersion = prevVersion + 1;
auto before = Date_t::now();
notifier->waitUntil(prevVersion, before + Milliseconds(25));
- stdx::thread thread([before, prevVersion, col] {
+ stdx::thread thread([before, prevVersion, &col] {
auto after = Date_t::now();
ASSERT_GTE(after - before, Milliseconds(25));
col->getCappedCallback()->notifyCappedWaitersIfNeeded();
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index 2dad6e071cd..621435fec39 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -354,7 +354,7 @@ std::string multikeyPathsToString(MultikeyPaths paths) {
void _validateCatalogEntry(OperationContext* opCtx,
ValidateState* validateState,
ValidateResults* results) {
- const Collection* collection = validateState->getCollection();
+ const auto& collection = validateState->getCollection();
CollectionOptions options =
DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
if (options.uuid) {
diff --git a/src/mongo/db/catalog/collection_validation.h b/src/mongo/db/catalog/collection_validation.h
index e5ac3d1329e..d8eca6226d6 100644
--- a/src/mongo/db/catalog/collection_validation.h
+++ b/src/mongo/db/catalog/collection_validation.h
@@ -36,6 +36,7 @@ namespace mongo {
class OperationContext;
class Collection;
+class CollectionPtr;
class BSONObjBuilder;
class Status;
diff --git a/src/mongo/db/catalog/collection_validation_test.cpp b/src/mongo/db/catalog/collection_validation_test.cpp
index 63dcc0d415a..c0955df1ed9 100644
--- a/src/mongo/db/catalog/collection_validation_test.cpp
+++ b/src/mongo/db/catalog/collection_validation_test.cpp
@@ -163,8 +163,7 @@ int insertDataRange(OperationContext* opCtx, int startIDNum, int endIDNum) {
<< " to " << endIDNum);
- AutoGetCollection autoColl(opCtx, kNss, MODE_IX);
- const Collection* coll = autoColl.getCollection();
+ AutoGetCollection coll(opCtx, kNss, MODE_IX);
std::vector<InsertStatement> inserts;
for (int i = startIDNum; i < endIDNum; ++i) {
auto doc = BSON("_id" << i);
@@ -183,8 +182,7 @@ int insertDataRange(OperationContext* opCtx, int startIDNum, int endIDNum) {
* Inserts a single invalid document into the kNss collection and then returns that count.
*/
int setUpInvalidData(OperationContext* opCtx) {
- AutoGetCollection autoColl(opCtx, kNss, MODE_IX);
- const Collection* coll = autoColl.getCollection();
+ AutoGetCollection coll(opCtx, kNss, MODE_IX);
RecordStore* rs = coll->getRecordStore();
{
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 1c74a34159f..26cf5eb1f89 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -76,7 +76,7 @@ Status _createView(OperationContext* opCtx,
// Create 'system.views' in a separate WUOW if it does not exist.
WriteUnitOfWork wuow(opCtx);
- const Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ CollectionPtr coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, NamespaceString(db->getSystemViewsName()));
if (!coll) {
coll = db->createCollection(opCtx, NamespaceString(db->getSystemViewsName()));
@@ -119,7 +119,7 @@ Status _createCollection(OperationContext* opCtx,
// This is a top-level handler for collection creation name conflicts. New commands coming
// in, or commands that generated a WriteConflict must return a NamespaceExists error here
// on conflict.
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) != nullptr) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "Collection already exists. NS: " << nss);
}
diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp
index 7f16293665f..eae07bc5804 100644
--- a/src/mongo/db/catalog/create_collection_test.cpp
+++ b/src/mongo/db/catalog/create_collection_test.cpp
@@ -125,8 +125,7 @@ bool collectionExists(OperationContext* opCtx, const NamespaceString& nss) {
* Returns collection options.
*/
CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) {
- AutoGetCollectionForRead autoColl(opCtx, nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 97be806b2a3..83d902c9b2e 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -204,7 +204,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IX));
- CollectionCatalog::CollectionInfoFn callback = [&](const Collection* collection) {
+ CollectionCatalog::CollectionInfoFn callback = [&](const CollectionPtr& collection) {
try {
WriteUnitOfWork wuow(opCtx);
Status status = dropCollection(opCtx, collection->ns(), {});
@@ -227,7 +227,7 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
return true;
};
- CollectionCatalog::CollectionInfoFn predicate = [&](const Collection* collection) {
+ CollectionCatalog::CollectionInfoFn predicate = [&](const CollectionPtr& collection) {
return DurableCatalog::get(opCtx)
->getCollectionOptions(opCtx, collection->getCatalogId())
.temp;
@@ -260,7 +260,7 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IS));
catalog::forEachCollectionFromDb(
- opCtx, name(), MODE_IS, [&](const Collection* collection) -> bool {
+ opCtx, name(), MODE_IS, [&](const CollectionPtr& collection) -> bool {
nCollections += 1;
objects += collection->numRecords(opCtx);
size += collection->dataSize(opCtx);
@@ -490,7 +490,7 @@ void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
const NamespaceString& nss,
- const Collection* collection) const {
+ const CollectionPtr& collection) const {
UUID uuid = collection->uuid();
LOGV2(20318,
"Finishing collection drop for {namespace} ({uuid}).",
@@ -565,7 +565,7 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) const {
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) != nullptr) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
if (options.isView()) {
uasserted(17399,
str::stream()
@@ -819,7 +819,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
if (nss.isSystem())
continue;
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!coll)
continue;
diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h
index a1a7f9a153e..56bc5ec78b4 100644
--- a/src/mongo/db/catalog/database_impl.h
+++ b/src/mongo/db/catalog/database_impl.h
@@ -113,11 +113,11 @@ public:
void checkForIdIndexesAndDropPendingCollections(OperationContext* opCtx) const final;
CollectionCatalog::iterator begin(OperationContext* opCtx) const final {
- return CollectionCatalog::get(opCtx).begin(_name);
+ return CollectionCatalog::get(opCtx).begin(opCtx, _name);
}
CollectionCatalog::iterator end(OperationContext* opCtx) const final {
- return CollectionCatalog::get(opCtx).end();
+ return CollectionCatalog::get(opCtx).end(opCtx);
}
private:
@@ -137,7 +137,7 @@ private:
*/
Status _finishDropCollection(OperationContext* opCtx,
const NamespaceString& nss,
- const Collection* collection) const;
+ const CollectionPtr& collection) const;
/**
* Removes all indexes for a collection.
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 3ed987e475f..99826f3082a 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -53,7 +53,7 @@ namespace mongo {
MONGO_FAIL_POINT_DEFINE(hangDropCollectionBeforeLockAcquisition);
MONGO_FAIL_POINT_DEFINE(hangDuringDropCollection);
-Status _checkNssAndReplState(OperationContext* opCtx, const Collection* coll) {
+Status _checkNssAndReplState(OperationContext* opCtx, const CollectionPtr& coll) {
if (!coll) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
@@ -134,7 +134,7 @@ Status _abortIndexBuildsAndDropCollection(OperationContext* opCtx,
// which may have changed when we released the collection lock temporarily.
opCtx->recoveryUnit()->abandonSnapshot();
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, startingNss);
Status status = _checkNssAndReplState(opCtx, coll);
if (!status.isOK()) {
@@ -185,7 +185,7 @@ Status _abortIndexBuildsAndDropCollection(OperationContext* opCtx,
// disk state, which may have changed when we released the collection lock temporarily.
opCtx->recoveryUnit()->abandonSnapshot();
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
status = _checkNssAndReplState(opCtx, coll);
if (!status.isOK()) {
@@ -237,7 +237,7 @@ Status _dropCollection(OperationContext* opCtx,
DropCollectionSystemCollectionMode systemCollectionMode,
BSONObjBuilder& result) {
Lock::CollectionLock collLock(opCtx, collectionName, MODE_X);
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collectionName);
Status status = _checkNssAndReplState(opCtx, coll);
if (!status.isOK()) {
@@ -300,8 +300,9 @@ Status dropCollection(OperationContext* opCtx,
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
- const Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- opCtx, collectionName);
+ const CollectionPtr& coll =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx,
+ collectionName);
if (!coll) {
return _dropView(opCtx, db, collectionName, result);
@@ -337,7 +338,7 @@ Status dropCollectionForApplyOps(OperationContext* opCtx,
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collectionName);
BSONObjBuilder unusedBuilder;
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index bf684381922..98c2025918d 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -60,7 +60,7 @@ constexpr auto kIndexFieldName = "index"_sd;
Status checkView(OperationContext* opCtx,
const NamespaceString& nss,
Database* db,
- const Collection* collection) {
+ const CollectionPtr& collection) {
if (!collection) {
if (db && ViewCatalog::get(db)->lookup(opCtx, nss.ns())) {
return Status(ErrorCodes::CommandNotSupportedOnView,
@@ -73,7 +73,7 @@ Status checkView(OperationContext* opCtx,
Status checkReplState(OperationContext* opCtx,
NamespaceStringOrUUID dbAndUUID,
- const Collection* collection) {
+ const CollectionPtr& collection) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto canAcceptWrites = replCoord->canAcceptWritesFor(opCtx, dbAndUUID);
bool writesAreReplicatedAndNotPrimary = opCtx->writesAreReplicated() && !canAcceptWrites;
@@ -142,7 +142,7 @@ StatusWith<const IndexDescriptor*> getDescriptorByKeyPattern(OperationContext* o
* to be held to look up the index name from the key pattern.
*/
StatusWith<std::vector<std::string>> getIndexNames(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONElement& indexElem) {
invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IX));
@@ -187,7 +187,7 @@ std::vector<UUID> abortIndexBuildByIndexNames(OperationContext* opCtx,
* Drops single index given a descriptor.
*/
Status dropIndexByDescriptor(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
IndexCatalog* indexCatalog,
const IndexDescriptor* desc) {
if (desc->isIdIndex()) {
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index d49a71bbc7e..1409d7147cb 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -243,7 +243,7 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
}
const IndexCatalogEntry* IndexBuildBlock::getEntry(OperationContext* opCtx,
- const Collection* collection) const {
+ const CollectionPtr& collection) const {
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, _indexName, true /* includeUnfinishedIndexes */);
diff --git a/src/mongo/db/catalog/index_build_block.h b/src/mongo/db/catalog/index_build_block.h
index 793d0e871fa..d248df83eab 100644
--- a/src/mongo/db/catalog/index_build_block.h
+++ b/src/mongo/db/catalog/index_build_block.h
@@ -97,7 +97,8 @@ public:
*
* This entry is owned by the IndexCatalog.
*/
- const IndexCatalogEntry* getEntry(OperationContext* opCtx, const Collection* collection) const;
+ const IndexCatalogEntry* getEntry(OperationContext* opCtx,
+ const CollectionPtr& collection) const;
IndexCatalogEntry* getEntry(OperationContext* opCtx, Collection* collection);
/**
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 273be3df5a6..1edf3e07a87 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -121,7 +121,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
}
Status IndexBuildsManager::startBuildingIndex(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
boost::optional<RecordId> resumeAfterRecordId) {
auto builder = invariant(_getBuilder(buildUUID));
@@ -130,13 +130,13 @@ Status IndexBuildsManager::startBuildingIndex(OperationContext* opCtx,
}
Status IndexBuildsManager::resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID) {
return invariant(_getBuilder(buildUUID))->dumpInsertsFromBulk(opCtx, collection);
}
StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingIndexForRecovery(
- OperationContext* opCtx, const Collection* coll, const UUID& buildUUID, RepairData repair) {
+ OperationContext* opCtx, const CollectionPtr& coll, const UUID& buildUUID, RepairData repair) {
auto builder = invariant(_getBuilder(buildUUID));
// Iterate all records in the collection. Validate the records and index them
@@ -278,13 +278,13 @@ Status IndexBuildsManager::drainBackgroundWrites(
Status IndexBuildsManager::retrySkippedRecords(OperationContext* opCtx,
const UUID& buildUUID,
- const Collection* collection) {
+ const CollectionPtr& collection) {
auto builder = invariant(_getBuilder(buildUUID));
return builder->retrySkippedRecords(opCtx, collection);
}
Status IndexBuildsManager::checkIndexConstraintViolations(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID) {
auto builder = invariant(_getBuilder(buildUUID));
@@ -334,7 +334,7 @@ bool IndexBuildsManager::abortIndexBuild(OperationContext* opCtx,
}
bool IndexBuildsManager::abortIndexBuildWithoutCleanupForRollback(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
bool isResumable) {
auto builder = _getBuilder(buildUUID);
@@ -356,7 +356,7 @@ bool IndexBuildsManager::abortIndexBuildWithoutCleanupForRollback(OperationConte
}
bool IndexBuildsManager::abortIndexBuildWithoutCleanupForShutdown(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
bool isResumable) {
auto builder = _getBuilder(buildUUID);
@@ -414,7 +414,7 @@ StatusWith<int> IndexBuildsManager::_moveRecordToLostAndFound(
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
auto originalCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
- const Collection* localCollection =
+ CollectionPtr localCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, lostAndFoundNss);
// Create the collection if it doesn't exist.
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index 5928e088afb..bf731dfeb15 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -44,6 +44,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
class ServiceContext;
@@ -97,12 +98,12 @@ public:
* Runs the scanning/insertion phase of the index build..
*/
Status startBuildingIndex(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
boost::optional<RecordId> resumeAfterRecordId = boost::none);
Status resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID);
/**
@@ -112,7 +113,10 @@ public:
* Returns the number of records and the size of the data iterated over.
*/
StatusWith<std::pair<long long, long long>> startBuildingIndexForRecovery(
- OperationContext* opCtx, const Collection* coll, const UUID& buildUUID, RepairData repair);
+ OperationContext* opCtx,
+ const CollectionPtr& coll,
+ const UUID& buildUUID,
+ RepairData repair);
/**
* Document inserts observed during the scanning/insertion phase of an index build are not
@@ -129,13 +133,13 @@ public:
*/
Status retrySkippedRecords(OperationContext* opCtx,
const UUID& buildUUID,
- const Collection* collection);
+ const CollectionPtr& collection);
/**
* Runs the index constraint violation checking phase of the index build..
*/
Status checkIndexConstraintViolations(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID);
/**
@@ -168,7 +172,7 @@ public:
* been cleared away, or not having yet started..
*/
bool abortIndexBuildWithoutCleanupForRollback(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
bool isResumable);
@@ -178,7 +182,7 @@ public:
* index build and resumable index builds are supported.
*/
bool abortIndexBuildWithoutCleanupForShutdown(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UUID& buildUUID,
bool isResumable);
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 559d5529fb7..f59ee2a05c8 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -45,6 +45,7 @@ namespace mongo {
class Client;
class Collection;
+class CollectionPtr;
class IndexDescriptor;
struct InsertDeleteOptions;
@@ -410,7 +411,7 @@ public:
* See IndexCatalogEntry::setMultikey().
*/
virtual void setMultikeyPaths(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* const desc,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) const = 0;
@@ -424,7 +425,7 @@ public:
* This method may throw.
*/
virtual Status indexRecords(OperationContext* const opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BsonRecord>& bsonRecords,
int64_t* const keysInsertedOut) = 0;
@@ -435,7 +436,7 @@ public:
* This method may throw.
*/
virtual Status updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
@@ -483,7 +484,7 @@ public:
InsertDeleteOptions* options) const = 0;
virtual void indexBuildSuccess(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index f0078b662af..8abc7000fdb 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -47,6 +47,7 @@
namespace mongo {
class CollatorInterface;
class Collection;
+class CollectionPtr;
class CollectionCatalogEntry;
class IndexAccessMethod;
class IndexBuildInterceptor;
@@ -132,7 +133,7 @@ public:
* as multikey here.
*/
virtual void setMultikey(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) = 0;
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 7c604a98a99..7787183bb59 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -170,7 +170,7 @@ void IndexCatalogEntryImpl::setIsReady(bool newIsReady) {
}
void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) {
// An index can either track path-level multikey information in the catalog or as metadata keys
@@ -256,7 +256,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
}
Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction(
- OperationContext* opCtx, const Collection* collection, const MultikeyPaths& multikeyPaths) {
+ OperationContext* opCtx, const CollectionPtr& collection, const MultikeyPaths& multikeyPaths) {
// If we are inside a multi-document transaction, we write the on-disk multikey update in a
// separate transaction so that it will not generate prepare conflicts with other operations
// that try to set the multikey flag. In general, it should always be safe to update the
@@ -340,7 +340,7 @@ bool IndexCatalogEntryImpl::_catalogIsMultikey(OperationContext* opCtx,
}
void IndexCatalogEntryImpl::_catalogSetMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const MultikeyPaths& multikeyPaths) {
// It's possible that the index type (e.g. ascending/descending index) supports tracking
// path-level multikey information, but this particular index doesn't.
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index 80a101c4a85..f1feb5e0f92 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -155,7 +155,7 @@ public:
* as multikey here.
*/
void setMultikey(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) final;
@@ -188,7 +188,7 @@ private:
* Used by setMultikey() only.
*/
Status _setMultikeyInMultiDocumentTransaction(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const MultikeyPaths& multikeyPaths);
bool _catalogIsReady(OperationContext* opCtx) const;
@@ -205,7 +205,7 @@ private:
* Sets on-disk multikey flag for this index.
*/
void _catalogSetMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const MultikeyPaths& multikeyPaths);
KVPrefix _catalogGetPrefix(OperationContext* opCtx) const;
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 8874008e553..2341345310b 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1092,7 +1092,7 @@ void IndexCatalogImpl::deleteIndexFromDisk(OperationContext* opCtx, const string
}
void IndexCatalogImpl::setMultikeyPaths(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* desc,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) const {
@@ -1325,7 +1325,7 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
// ---------------------------
Status IndexCatalogImpl::_indexKeys(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
@@ -1378,7 +1378,7 @@ Status IndexCatalogImpl::_indexKeys(OperationContext* opCtx,
}
Status IndexCatalogImpl::_indexFilteredRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
@@ -1429,7 +1429,7 @@ Status IndexCatalogImpl::_indexFilteredRecords(OperationContext* opCtx,
}
Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
@@ -1451,7 +1451,7 @@ Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx,
}
Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const BSONObj& oldDoc,
const BSONObj& newDoc,
@@ -1592,7 +1592,7 @@ void IndexCatalogImpl::_unindexRecord(OperationContext* opCtx,
}
Status IndexCatalogImpl::indexRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
if (keysInsertedOut) {
@@ -1615,7 +1615,7 @@ Status IndexCatalogImpl::indexRecords(OperationContext* opCtx,
}
Status IndexCatalogImpl::updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
@@ -1741,7 +1741,7 @@ void IndexCatalogImpl::prepareInsertDeleteOptions(OperationContext* opCtx,
}
void IndexCatalogImpl::indexBuildSuccess(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index) {
auto releasedEntry = _buildingIndexes.release(index->descriptor());
invariant(releasedEntry.get() == index);
@@ -1765,7 +1765,7 @@ void IndexCatalogImpl::indexBuildSuccess(OperationContext* opCtx,
}
StatusWith<BSONObj> IndexCatalogImpl::_fixIndexSpec(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& spec) const {
auto statusWithSpec = adjustIndexSpecObject(spec);
if (!statusWithSpec.isOK()) {
diff --git a/src/mongo/db/catalog/index_catalog_impl.h b/src/mongo/db/catalog/index_catalog_impl.h
index b18ed1a80e8..191a76bb529 100644
--- a/src/mongo/db/catalog/index_catalog_impl.h
+++ b/src/mongo/db/catalog/index_catalog_impl.h
@@ -46,6 +46,7 @@ namespace mongo {
class Client;
class Collection;
+class CollectionPtr;
class IndexDescriptor;
struct InsertDeleteOptions;
@@ -221,7 +222,7 @@ public:
// ---- modify single index
void setMultikeyPaths(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* desc,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths) const override;
@@ -235,7 +236,7 @@ public:
* This method may throw.
*/
Status indexRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) override;
@@ -243,7 +244,7 @@ public:
* See IndexCatalog::updateRecord
*/
Status updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
@@ -281,7 +282,7 @@ public:
InsertDeleteOptions* options) const override;
void indexBuildSuccess(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
IndexCatalogEntry* index) override;
private:
@@ -296,7 +297,7 @@ private:
std::string _getAccessMethodName(const BSONObj& keyPattern) const;
Status _indexKeys(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
@@ -307,19 +308,19 @@ private:
int64_t* keysInsertedOut);
Status _indexFilteredRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
Status _indexRecords(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
Status _updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index,
const BSONObj& oldDoc,
const BSONObj& newDoc,
@@ -348,7 +349,7 @@ private:
* plugin-level transformations if appropriate, etc.
*/
StatusWith<BSONObj> _fixIndexSpec(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& spec) const;
Status _isSpecOk(OperationContext* opCtx, const BSONObj& spec) const;
diff --git a/src/mongo/db/catalog/index_catalog_noop.h b/src/mongo/db/catalog/index_catalog_noop.h
index 9cc63d34ed5..64d7b9c46b2 100644
--- a/src/mongo/db/catalog/index_catalog_noop.h
+++ b/src/mongo/db/catalog/index_catalog_noop.h
@@ -194,19 +194,19 @@ public:
}
void setMultikeyPaths(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* const desc,
const MultikeyPaths& multikeyPaths) const override {}
Status indexRecords(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const std::vector<BsonRecord>& bsonRecords,
int64_t* const keysInsertedOut) override {
return Status::OK();
}
Status updateRecord(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
@@ -243,7 +243,7 @@ public:
InsertDeleteOptions* options) const override {}
void indexBuildSuccess(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
IndexCatalogEntry* index) override {}
};
diff --git a/src/mongo/db/catalog/index_signature_test.cpp b/src/mongo/db/catalog/index_signature_test.cpp
index 4aee9d6a052..e7a83ffefc6 100644
--- a/src/mongo/db/catalog/index_signature_test.cpp
+++ b/src/mongo/db/catalog/index_signature_test.cpp
@@ -68,7 +68,7 @@ public:
return _nss;
}
- const Collection* coll() const {
+ const CollectionPtr& coll() const {
return (*_coll).getCollection();
}
diff --git a/src/mongo/db/catalog/list_indexes.cpp b/src/mongo/db/catalog/list_indexes.cpp
index 937598f30fd..b97fcf4cf6b 100644
--- a/src/mongo/db/catalog/list_indexes.cpp
+++ b/src/mongo/db/catalog/list_indexes.cpp
@@ -52,20 +52,19 @@ namespace mongo {
StatusWith<std::list<BSONObj>> listIndexes(OperationContext* opCtx,
const NamespaceStringOrUUID& ns,
bool includeBuildUUIDs) {
- AutoGetCollectionForReadCommand ctx(opCtx, ns);
- const Collection* collection = ctx.getCollection();
- auto nss = ctx.getNss();
+ AutoGetCollectionForReadCommand collection(opCtx, ns);
+ auto nss = collection.getNss();
if (!collection) {
return StatusWith<std::list<BSONObj>>(ErrorCodes::NamespaceNotFound,
- str::stream()
- << "ns does not exist: " << ctx.getNss().ns());
+ str::stream() << "ns does not exist: "
+ << collection.getNss().ns());
}
return StatusWith<std::list<BSONObj>>(
- listIndexesInLock(opCtx, collection, nss, includeBuildUUIDs));
+ listIndexesInLock(opCtx, collection.getCollection(), nss, includeBuildUUIDs));
}
std::list<BSONObj> listIndexesInLock(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
bool includeBuildUUIDs) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
diff --git a/src/mongo/db/catalog/list_indexes.h b/src/mongo/db/catalog/list_indexes.h
index 8ceeb2d019e..1dd298c1b15 100644
--- a/src/mongo/db/catalog/list_indexes.h
+++ b/src/mongo/db/catalog/list_indexes.h
@@ -44,7 +44,7 @@ StatusWith<std::list<BSONObj>> listIndexes(OperationContext* opCtx,
const NamespaceStringOrUUID& ns,
bool includeBuildUUIDs);
std::list<BSONObj> listIndexesInLock(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
bool includeBuildUUIDs);
std::list<BSONObj> listIndexesEmptyListIfMissing(OperationContext* opCtx,
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index bd6343b9c3d..f7ec01cb624 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -140,7 +140,7 @@ MultiIndexBlock::OnInitFn MultiIndexBlock::kNoopOnInitFn =
[](std::vector<BSONObj>& specs) -> Status { return Status::OK(); };
MultiIndexBlock::OnInitFn MultiIndexBlock::makeTimestampedIndexOnInitFn(OperationContext* opCtx,
- const Collection* coll) {
+ const CollectionPtr& coll) {
return [opCtx, ns = coll->ns()](std::vector<BSONObj>& specs) -> Status {
opCtx->getServiceContext()->getOpObserver()->onStartIndexBuildSinglePhase(opCtx, ns);
return Status::OK();
@@ -350,7 +350,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
Status MultiIndexBlock::insertAllDocumentsInCollection(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
boost::optional<RecordId> resumeAfterRecordId) {
invariant(!_buildIsCleanedUp);
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
@@ -411,7 +411,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(
yieldPolicy = PlanYieldPolicy::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY;
}
auto exec = collection->makePlanExecutor(
- opCtx, yieldPolicy, Collection::ScanDirection::kForward, resumeAfterRecordId);
+ opCtx, collection, yieldPolicy, Collection::ScanDirection::kForward, resumeAfterRecordId);
// Hint to the storage engine that this collection scan should not keep data in the cache.
bool readOnce = useReadOnceCursorsForIndexBuilds.load();
@@ -558,13 +558,14 @@ Status MultiIndexBlock::insertSingleDocumentForInitialSyncOrRecovery(OperationCo
return Status::OK();
}
-Status MultiIndexBlock::dumpInsertsFromBulk(OperationContext* opCtx, const Collection* collection) {
+Status MultiIndexBlock::dumpInsertsFromBulk(OperationContext* opCtx,
+ const CollectionPtr& collection) {
return dumpInsertsFromBulk(opCtx, collection, nullptr);
}
Status MultiIndexBlock::dumpInsertsFromBulk(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord) {
invariant(!_buildIsCleanedUp);
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
@@ -647,7 +648,7 @@ Status MultiIndexBlock::drainBackgroundWrites(
IndexBuildPhase_serializer(_phase).toString());
_phase = IndexBuildPhaseEnum::kDrainWrites;
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, _collectionUUID.get());
// Drain side-writes table for each index. This only drains what is visible. Assuming intent
@@ -672,7 +673,8 @@ Status MultiIndexBlock::drainBackgroundWrites(
return Status::OK();
}
-Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx, const Collection* collection) {
+Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx,
+ const CollectionPtr& collection) {
invariant(!_buildIsCleanedUp);
for (auto&& index : _indexes) {
auto interceptor = index.block->getEntry(opCtx, collection)->indexBuildInterceptor();
@@ -687,7 +689,7 @@ Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx, const Colle
return Status::OK();
}
-Status MultiIndexBlock::checkConstraints(OperationContext* opCtx, const Collection* collection) {
+Status MultiIndexBlock::checkConstraints(OperationContext* opCtx, const CollectionPtr& collection) {
invariant(!_buildIsCleanedUp);
// For each index that may be unique, check that no recorded duplicates still exist. This can
@@ -707,12 +709,12 @@ Status MultiIndexBlock::checkConstraints(OperationContext* opCtx, const Collecti
}
boost::optional<ResumeIndexInfo> MultiIndexBlock::abortWithoutCleanupForRollback(
- OperationContext* opCtx, const Collection* collection, bool isResumable) {
+ OperationContext* opCtx, const CollectionPtr& collection, bool isResumable) {
return _abortWithoutCleanup(opCtx, collection, false /* shutdown */, isResumable);
}
void MultiIndexBlock::abortWithoutCleanupForShutdown(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool isResumable) {
_abortWithoutCleanup(opCtx, collection, true /* shutdown */, isResumable);
}
@@ -794,10 +796,8 @@ void MultiIndexBlock::setIndexBuildMethod(IndexBuildMethod indexBuildMethod) {
_method = indexBuildMethod;
}
-boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(OperationContext* opCtx,
- const Collection* collection,
- bool shutdown,
- bool isResumable) {
+boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(
+ OperationContext* opCtx, const CollectionPtr& collection, bool shutdown, bool isResumable) {
invariant(!_buildIsCleanedUp);
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
// Lock if it's not already locked, to ensure storage engine cannot be destructed out from
@@ -834,7 +834,7 @@ boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(Operation
}
void MultiIndexBlock::_writeStateToDisk(OperationContext* opCtx,
- const Collection* collection) const {
+ const CollectionPtr& collection) const {
auto obj = _constructStateObject(opCtx, collection);
auto rs = opCtx->getServiceContext()
->getStorageEngine()
@@ -864,7 +864,7 @@ void MultiIndexBlock::_writeStateToDisk(OperationContext* opCtx,
}
BSONObj MultiIndexBlock::_constructStateObject(OperationContext* opCtx,
- const Collection* collection) const {
+ const CollectionPtr& collection) const {
BSONObjBuilder builder;
_buildUUID->appendToBuilder(&builder, "_id");
builder.append("phase", IndexBuildPhase_serializer(_phase));
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index f601e22a12c..7ce46547d04 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -56,6 +56,7 @@ namespace mongo {
extern FailPoint leaveIndexBuildUnfinishedForShutdown;
class Collection;
+class CollectionPtr;
class MatchExpression;
class NamespaceString;
class OperationContext;
@@ -121,7 +122,7 @@ public:
const BSONObj& spec,
OnInitFn onInit);
StatusWith<std::vector<BSONObj>> initForResume(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& specs,
const ResumeIndexInfo& resumeInfo);
@@ -136,7 +137,8 @@ public:
* When called on primaries, this generates a new optime, writes a no-op oplog entry, and
* timestamps the first catalog write. Does nothing on secondaries.
*/
- static OnInitFn makeTimestampedIndexOnInitFn(OperationContext* opCtx, const Collection* coll);
+ static OnInitFn makeTimestampedIndexOnInitFn(OperationContext* opCtx,
+ const CollectionPtr& coll);
/**
* Inserts all documents in the Collection into the indexes and logs with timing info.
@@ -152,7 +154,7 @@ public:
*/
Status insertAllDocumentsInCollection(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
boost::optional<RecordId> resumeAfterRecordId = boost::none);
/**
@@ -177,9 +179,9 @@ public:
*
* Should not be called inside of a WriteUnitOfWork.
*/
- Status dumpInsertsFromBulk(OperationContext* opCtx, const Collection* collection);
+ Status dumpInsertsFromBulk(OperationContext* opCtx, const CollectionPtr& collection);
Status dumpInsertsFromBulk(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord);
/**
* For background indexes using an IndexBuildInterceptor to capture inserts during a build,
@@ -208,7 +210,7 @@ public:
* of an index build, so it must ensure that before it finishes, it has indexed all documents in
* a collection, requiring a call to this function upon completion.
*/
- Status retrySkippedRecords(OperationContext* opCtx, const Collection* collection);
+ Status retrySkippedRecords(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Check any constraits that may have been temporarily violated during the index build for
@@ -217,7 +219,7 @@ public:
*
* Must not be in a WriteUnitOfWork.
*/
- Status checkConstraints(OperationContext* opCtx, const Collection* collection);
+ Status checkConstraints(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Marks the index ready for use. Should only be called as the last method after
@@ -279,7 +281,7 @@ public:
* This should only be used during rollback.
*/
boost::optional<ResumeIndexInfo> abortWithoutCleanupForRollback(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool isResumable);
/**
@@ -292,7 +294,7 @@ public:
* This should only be used during shutdown.
*/
void abortWithoutCleanupForShutdown(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool isResumable);
/**
@@ -321,13 +323,13 @@ private:
* supported.
*/
boost::optional<ResumeIndexInfo> _abortWithoutCleanup(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool shutdown,
bool isResumable);
- void _writeStateToDisk(OperationContext* opCtx, const Collection* collection) const;
+ void _writeStateToDisk(OperationContext* opCtx, const CollectionPtr& collection) const;
- BSONObj _constructStateObject(OperationContext* opCtx, const Collection* collection) const;
+ BSONObj _constructStateObject(OperationContext* opCtx, const CollectionPtr& collection) const;
Status _failPointHangDuringBuild(OperationContext* opCtx,
FailPoint* fp,
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 9272fd2e3ff..1a02467c000 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -220,7 +220,7 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx,
OptionalCollectionUUID uuid,
NamespaceString source,
NamespaceString target,
- const Collection* targetColl,
+ const CollectionPtr& targetColl,
RenameCollectionOptions options,
repl::OpTime renameOpTimeFromApplyOps) {
return writeConflictRetry(opCtx, "renameCollection", target.ns(), [&] {
@@ -560,7 +560,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
"temporaryCollection"_attr = tmpName,
"sourceCollection"_attr = source);
- const Collection* tmpColl = nullptr;
+ CollectionPtr tmpColl = nullptr;
{
auto collectionOptions =
DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, sourceColl->getCatalogId());
@@ -648,10 +648,10 @@ Status renameBetweenDBs(OperationContext* opCtx,
// drop the exclusive database lock on the target and grab an intent lock on the temporary
// collection.
targetDBLock.reset();
+ tmpColl.reset();
AutoGetCollection autoTmpColl(opCtx, tmpCollUUID, MODE_IX);
- tmpColl = autoTmpColl.getCollection();
- if (!tmpColl) {
+ if (!autoTmpColl) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Temporary collection '" << tmpName
<< "' was removed while renaming collection across DBs");
@@ -672,7 +672,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
for (int i = 0; record && (i < internalInsertMaxBatchSize.load()); i++) {
const InsertStatement stmt(record->data.releaseToBson());
OpDebug* const opDebug = nullptr;
- auto status = tmpColl->insertDocument(opCtx, stmt, opDebug, true);
+ auto status = autoTmpColl->insertDocument(opCtx, stmt, opDebug, true);
if (!status.isOK()) {
return status;
}
@@ -892,11 +892,10 @@ Status renameCollectionForApplyOps(OperationContext* opCtx,
str::stream() << "Cannot rename collection to the oplog");
}
- const Collection* const sourceColl =
- AutoGetCollectionForRead(opCtx, sourceNss, AutoGetCollectionViewMode::kViewsPermitted)
- .getCollection();
+ AutoGetCollectionForRead sourceColl(
+ opCtx, sourceNss, AutoGetCollectionViewMode::kViewsPermitted);
- if (sourceNss.isDropPendingNamespace() || sourceColl == nullptr) {
+ if (sourceNss.isDropPendingNamespace() || !sourceColl) {
boost::optional<NamespaceString> dropTargetNss;
if (options.dropTarget)
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 726265f469f..67ae7957158 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -110,7 +110,7 @@ public:
bool fromMigrate) override;
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
@@ -224,7 +224,7 @@ void OpObserverMock::onInserts(OperationContext* opCtx,
}
void OpObserverMock::onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
@@ -419,8 +419,7 @@ bool _collectionExists(OperationContext* opCtx, const NamespaceString& nss) {
* Returns collection options.
*/
CollectionOptions _getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) {
- AutoGetCollectionForRead autoColl(opCtx, nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
@@ -439,7 +438,7 @@ CollectionUUID _getCollectionUuid(OperationContext* opCtx, const NamespaceString
* Get collection namespace by UUID.
*/
NamespaceString _getCollectionNssFromUUID(OperationContext* opCtx, const UUID& uuid) {
- const Collection* source = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
+ const CollectionPtr& source = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
return source ? source->ns() : NamespaceString();
}
@@ -447,8 +446,7 @@ NamespaceString _getCollectionNssFromUUID(OperationContext* opCtx, const UUID& u
* Returns true if namespace refers to a temporary collection.
*/
bool _isTempCollection(OperationContext* opCtx, const NamespaceString& nss) {
- AutoGetCollectionForRead autoColl(opCtx, nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to check if " << nss
<< " is a temporary collection because collection does not exist.";
auto options = _getCollectionOptions(opCtx, nss);
@@ -483,8 +481,7 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx,
*/
void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
writeConflictRetry(opCtx, "_insertDocument", nss.ns(), [=] {
- AutoGetCollection autoColl(opCtx, nss, MODE_X);
- auto collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_X);
ASSERT_TRUE(collection) << "Cannot insert document " << doc << " into collection " << nss
<< " because collection " << nss << " does not exist.";
@@ -499,7 +496,7 @@ void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const
* Retrieves the pointer to a collection associated with the given namespace string from the
* catalog. The caller must hold the appropriate locks from the lock manager.
*/
-const Collection* _getCollection_inlock(OperationContext* opCtx, const NamespaceString& nss) {
+CollectionPtr _getCollection_inlock(OperationContext* opCtx, const NamespaceString& nss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
auto databaseHolder = DatabaseHolder::get(opCtx);
auto* db = databaseHolder->getDb(opCtx, nss.db());
@@ -1185,14 +1182,14 @@ TEST_F(RenameCollectionTest, CollectionPointerRemainsValidThroughRename) {
// Get a pointer to the source collection, and ensure that it reports the expected namespace
// string.
- const Collection* sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
+ CollectionPtr sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
ASSERT(sourceColl);
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
// Retrieve the pointer associated with the target namespace, and ensure that its the same
// pointer (i.e. the renamed collection has the very same Collection instance).
- const Collection* targetColl = _getCollection_inlock(_opCtx.get(), _targetNss);
+ CollectionPtr targetColl = _getCollection_inlock(_opCtx.get(), _targetNss);
ASSERT(targetColl);
ASSERT_EQ(targetColl, sourceColl);
@@ -1202,8 +1199,7 @@ TEST_F(RenameCollectionTest, CollectionPointerRemainsValidThroughRename) {
TEST_F(RenameCollectionTest, CatalogPointersRenameValidThroughRenameForApplyOps) {
_createCollection(_opCtx.get(), _sourceNss);
- const Collection* sourceColl =
- AutoGetCollectionForRead(_opCtx.get(), _sourceNss).getCollection();
+ AutoGetCollectionForRead sourceColl(_opCtx.get(), _sourceNss);
ASSERT(sourceColl);
auto uuid = UUID::gen();
@@ -1211,10 +1207,9 @@ TEST_F(RenameCollectionTest, CatalogPointersRenameValidThroughRenameForApplyOps)
ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), _sourceNss.db().toString(), uuid, cmd, {}));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss));
- const Collection* targetColl =
- AutoGetCollectionForRead(_opCtx.get(), _targetNss).getCollection();
+ AutoGetCollectionForRead targetColl(_opCtx.get(), _targetNss);
ASSERT(targetColl);
- ASSERT_EQ(targetColl, sourceColl);
+ ASSERT_EQ(targetColl.getCollection(), sourceColl.getCollection());
ASSERT_EQ(targetColl->ns(), _targetNss);
}
@@ -1223,7 +1218,7 @@ TEST_F(RenameCollectionTest, CollectionCatalogMappingRemainsIntactThroughRename)
Lock::DBLock sourceLk(_opCtx.get(), _sourceNss.db(), MODE_X);
Lock::DBLock targetLk(_opCtx.get(), _targetNss.db(), MODE_X);
auto& catalog = CollectionCatalog::get(_opCtx.get());
- const Collection* sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
+ CollectionPtr sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
ASSERT(sourceColl);
ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(_opCtx.get(), sourceColl->uuid()));
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
diff --git a/src/mongo/db/catalog/throttle_cursor_test.cpp b/src/mongo/db/catalog/throttle_cursor_test.cpp
index 91097350d36..97db1fffad4 100644
--- a/src/mongo/db/catalog/throttle_cursor_test.cpp
+++ b/src/mongo/db/catalog/throttle_cursor_test.cpp
@@ -59,7 +59,7 @@ public:
Date_t getTime();
int64_t getDifferenceInMillis(Date_t start, Date_t end);
- SortedDataInterfaceThrottleCursor getIdIndex(const Collection* coll);
+ SortedDataInterfaceThrottleCursor getIdIndex(const CollectionPtr& coll);
std::unique_ptr<DataThrottle> _dataThrottle;
};
@@ -72,8 +72,7 @@ void ThrottleCursorTest::setUp() {
// Insert random data into the collection. We don't need to create an index as the _id index is
// created by default.
- AutoGetCollection autoColl(operationContext(), kNss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(operationContext(), kNss, MODE_X);
invariant(collection);
OpDebug* const nullOpDebug = nullptr;
@@ -108,7 +107,7 @@ int64_t ThrottleCursorTest::getDifferenceInMillis(Date_t start, Date_t end) {
return end.toMillisSinceEpoch() - start.toMillisSinceEpoch();
}
-SortedDataInterfaceThrottleCursor ThrottleCursorTest::getIdIndex(const Collection* coll) {
+SortedDataInterfaceThrottleCursor ThrottleCursorTest::getIdIndex(const CollectionPtr& coll) {
const IndexDescriptor* idDesc = coll->getIndexCatalog()->findIdIndex(operationContext());
const IndexCatalogEntry* idEntry = coll->getIndexCatalog()->getEntry(idDesc);
const IndexAccessMethod* iam = idEntry->accessMethod();
@@ -119,7 +118,7 @@ SortedDataInterfaceThrottleCursor ThrottleCursorTest::getIdIndex(const Collectio
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
@@ -152,7 +151,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
@@ -204,7 +203,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOnLargeDocs) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf2MBForDataThrottle");
@@ -265,7 +264,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOnLargeDocs) {
TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOff) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
@@ -292,7 +291,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOff) {
TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
@@ -343,7 +342,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOff) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
@@ -385,7 +384,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOff) {
TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
auto opCtx = operationContext();
AutoGetCollection autoColl(opCtx, kNss, MODE_X);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
// Use a fixed record data size to simplify the timing calculations.
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp
index 15af297f6b7..d5b07f65207 100644
--- a/src/mongo/db/catalog/validate_adaptor.cpp
+++ b/src/mongo/db/catalog/validate_adaptor.cpp
@@ -82,7 +82,7 @@ Status ValidateAdaptor::validateRecord(OperationContext* opCtx,
LOGV2(4666601, "[validate]", "recordId"_attr = recordId, "recordData"_attr = recordBson);
}
- const Collection* coll = _validateState->getCollection();
+ const CollectionPtr& coll = _validateState->getCollection();
if (!coll->getIndexCatalog()->haveAnyIndexes()) {
return status;
}
diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp
index 3d91889e954..23dea464e37 100644
--- a/src/mongo/db/catalog/validate_state.cpp
+++ b/src/mongo/db/catalog/validate_state.cpp
@@ -77,8 +77,8 @@ ValidateState::ValidateState(OperationContext* opCtx,
}
_database = _databaseLock->getDb() ? _databaseLock->getDb() : nullptr;
- _collection = _database ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _nss)
- : nullptr;
+ if (_database)
+ _collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _nss);
if (!_collection) {
if (_database && ViewCatalog::get(_database)->lookup(opCtx, _nss.ns())) {
diff --git a/src/mongo/db/catalog/validate_state.h b/src/mongo/db/catalog/validate_state.h
index ec2e35d689d..b9d5db77714 100644
--- a/src/mongo/db/catalog/validate_state.h
+++ b/src/mongo/db/catalog/validate_state.h
@@ -100,7 +100,7 @@ public:
return _database;
}
- const Collection* getCollection() const {
+ const CollectionPtr& getCollection() const {
invariant(_collection);
return _collection;
}
@@ -204,7 +204,7 @@ private:
boost::optional<Lock::CollectionLock> _collectionLock;
Database* _database;
- const Collection* _collection;
+ CollectionPtr _collection;
// Stores the indexes that are going to be validated. When validate yields periodically we'll
// use this list to determine if validation should abort when an existing index that was
diff --git a/src/mongo/db/catalog/validate_state_test.cpp b/src/mongo/db/catalog/validate_state_test.cpp
index bcd94aa94ad..ac023d807dd 100644
--- a/src/mongo/db/catalog/validate_state_test.cpp
+++ b/src/mongo/db/catalog/validate_state_test.cpp
@@ -63,8 +63,7 @@ public:
/**
* Create collection 'nss' and insert some documents. It will possess a default _id index.
*/
- const Collection* createCollectionAndPopulateIt(OperationContext* opCtx,
- const NamespaceString& nss);
+ void createCollectionAndPopulateIt(OperationContext* opCtx, const NamespaceString& nss);
private:
void setUp() override;
@@ -76,13 +75,12 @@ void ValidateStateTest::createCollection(OperationContext* opCtx, const Namespac
ASSERT_OK(storageInterface()->createCollection(opCtx, nss, defaultCollectionOptions));
}
-const Collection* ValidateStateTest::createCollectionAndPopulateIt(OperationContext* opCtx,
- const NamespaceString& nss) {
+void ValidateStateTest::createCollectionAndPopulateIt(OperationContext* opCtx,
+ const NamespaceString& nss) {
// Create collection.
createCollection(opCtx, nss);
- AutoGetCollection autoColl(opCtx, nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_X);
invariant(collection);
// Insert some data.
@@ -93,8 +91,6 @@ const Collection* ValidateStateTest::createCollectionAndPopulateIt(OperationCont
collection->insertDocument(opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug));
wuow.commit();
}
-
- return collection;
}
void ValidateStateTest::setUp() {
@@ -124,8 +120,7 @@ void ValidateStateTest::setUp() {
* Builds an index on the given 'nss'. 'indexKey' specifies the index key, e.g. {'a': 1};
*/
void createIndex(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& indexKey) {
- AutoGetCollection autoColl(opCtx, nss, MODE_X);
- auto collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_X);
ASSERT(collection);
ASSERT_EQ(1, indexKey.nFields()) << nss << "/" << indexKey;
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index ab90c0a3587..41e5e2576b6 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -112,7 +112,7 @@ AutoGetCollectionBase<CatalogCollectionLookupT>::AutoGetCollectionBase(
if (!db)
return;
- _coll = CatalogCollectionLookupT::lookupCollection(opCtx, _resolvedNss);
+ _coll = _lookup.lookupCollection(opCtx, _resolvedNss);
invariant(!nsOrUUID.uuid() || _coll,
str::stream() << "Collection for " << _resolvedNss.ns()
<< " disappeared after successufully resolving "
@@ -165,28 +165,38 @@ Collection* AutoGetCollection::getWritableCollection(CollectionCatalog::Lifetime
class WritableCollectionReset : public RecoveryUnit::Change {
public:
WritableCollectionReset(AutoGetCollection& autoColl,
- const Collection* rollbackCollection)
- : _autoColl(autoColl), _rollbackCollection(rollbackCollection) {}
+ const CollectionPtr& rollbackCollection,
+ uint64_t catalogEpoch)
+ : _autoColl(autoColl),
+ _rollbackCollection(rollbackCollection.get()),
+ _catalogEpoch(catalogEpoch) {}
void commit(boost::optional<Timestamp> commitTime) final {
+ // Restore coll to a yieldable collection
+ _autoColl._coll = {
+ _autoColl.getOperationContext(), _autoColl._coll.get(), _catalogEpoch};
_autoColl._writableColl = nullptr;
}
void rollback() final {
- _autoColl._coll = _rollbackCollection;
+ _autoColl._coll = {
+ _autoColl.getOperationContext(), _rollbackCollection, _catalogEpoch};
_autoColl._writableColl = nullptr;
}
private:
AutoGetCollection& _autoColl;
const Collection* _rollbackCollection;
+ uint64_t _catalogEpoch;
};
- _writableColl = CollectionCatalog::get(_opCtx).lookupCollectionByNamespaceForMetadataWrite(
- _opCtx, mode, _resolvedNss);
+ auto& catalog = CollectionCatalog::get(_opCtx);
+ _writableColl =
+ catalog.lookupCollectionByNamespaceForMetadataWrite(_opCtx, mode, _resolvedNss);
if (mode == CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork) {
_opCtx->recoveryUnit()->registerChange(
- std::make_unique<WritableCollectionReset>(*this, _coll));
+ std::make_unique<WritableCollectionReset>(*this, _coll, catalog.getEpoch()));
}
+ // Set to writable collection. We are no longer yieldable.
_coll = _writableColl;
}
return _writableColl;
@@ -202,9 +212,12 @@ struct CollectionWriter::SharedImpl {
CollectionWriter::CollectionWriter(OperationContext* opCtx,
const CollectionUUID& uuid,
CollectionCatalog::LifetimeMode mode)
- : _opCtx(opCtx), _mode(mode), _sharedImpl(std::make_shared<SharedImpl>(this)) {
+ : _collection(&_storedCollection),
+ _opCtx(opCtx),
+ _mode(mode),
+ _sharedImpl(std::make_shared<SharedImpl>(this)) {
- _collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
+ _storedCollection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
_sharedImpl->_writableCollectionInitializer = [opCtx,
uuid](CollectionCatalog::LifetimeMode mode) {
return CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(
@@ -215,8 +228,11 @@ CollectionWriter::CollectionWriter(OperationContext* opCtx,
CollectionWriter::CollectionWriter(OperationContext* opCtx,
const NamespaceString& nss,
CollectionCatalog::LifetimeMode mode)
- : _opCtx(opCtx), _mode(mode), _sharedImpl(std::make_shared<SharedImpl>(this)) {
- _collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
+ : _collection(&_storedCollection),
+ _opCtx(opCtx),
+ _mode(mode),
+ _sharedImpl(std::make_shared<SharedImpl>(this)) {
+ _storedCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
_sharedImpl->_writableCollectionInitializer = [opCtx,
nss](CollectionCatalog::LifetimeMode mode) {
return CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(
@@ -226,10 +242,10 @@ CollectionWriter::CollectionWriter(OperationContext* opCtx,
CollectionWriter::CollectionWriter(AutoGetCollection& autoCollection,
CollectionCatalog::LifetimeMode mode)
- : _opCtx(autoCollection.getOperationContext()),
+ : _collection(&autoCollection.getCollection()),
+ _opCtx(autoCollection.getOperationContext()),
_mode(mode),
_sharedImpl(std::make_shared<SharedImpl>(this)) {
- _collection = autoCollection.getCollection();
_sharedImpl->_writableCollectionInitializer =
[&autoCollection](CollectionCatalog::LifetimeMode mode) {
return autoCollection.getWritableCollection(mode);
@@ -237,7 +253,8 @@ CollectionWriter::CollectionWriter(AutoGetCollection& autoCollection,
}
CollectionWriter::CollectionWriter(Collection* writableCollection)
- : _collection(writableCollection),
+ : _collection(&_storedCollection),
+ _storedCollection(writableCollection),
_writableCollection(writableCollection),
_mode(CollectionCatalog::LifetimeMode::kInplace) {}
@@ -264,30 +281,34 @@ Collection* CollectionWriter::getWritableCollection() {
class WritableCollectionReset : public RecoveryUnit::Change {
public:
WritableCollectionReset(std::shared_ptr<SharedImpl> shared,
- const Collection* rollbackCollection)
- : _shared(std::move(shared)), _rollbackCollection(rollbackCollection) {}
+ CollectionPtr rollbackCollection)
+ : _shared(std::move(shared)), _rollbackCollection(std::move(rollbackCollection)) {}
void commit(boost::optional<Timestamp> commitTime) final {
if (_shared->_parent)
_shared->_parent->_writableCollection = nullptr;
}
void rollback() final {
if (_shared->_parent) {
- _shared->_parent->_collection = _rollbackCollection;
+ _shared->_parent->_storedCollection = std::move(_rollbackCollection);
_shared->_parent->_writableCollection = nullptr;
}
}
private:
std::shared_ptr<SharedImpl> _shared;
- const Collection* _rollbackCollection;
+ CollectionPtr _rollbackCollection;
};
- if (_mode == CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork) {
- _opCtx->recoveryUnit()->registerChange(
- std::make_unique<WritableCollectionReset>(_sharedImpl, _collection));
- }
+ // If we are using our stored Collection then we are not managed by an AutoGetCollection and
+ // we need to manage lifetime here.
+ if (*_collection == _storedCollection) {
+ if (_mode == CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork) {
+ _opCtx->recoveryUnit()->registerChange(std::make_unique<WritableCollectionReset>(
+ _sharedImpl, std::move(_storedCollection)));
+ }
- _collection = _writableCollection;
+ _storedCollection = _writableCollection;
+ }
}
return _writableCollection;
}
@@ -306,7 +327,9 @@ CatalogCollectionLookup::CollectionStorage CatalogCollectionLookup::lookupCollec
CatalogCollectionLookupForRead::CollectionStorage CatalogCollectionLookupForRead::lookupCollection(
OperationContext* opCtx, const NamespaceString& nss) {
- return CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForRead(opCtx, nss);
+ auto ptr = CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForRead(opCtx, nss);
+ _collection = CollectionPtr(ptr);
+ return ptr;
}
LockMode fixLockModeForSystemDotViewsChanges(const NamespaceString& nss, LockMode mode) {
@@ -364,7 +387,7 @@ AutoGetOplog::AutoGetOplog(OperationContext* opCtx, OplogAccessMode mode, Date_t
}
_oplogInfo = repl::LocalOplogInfo::get(opCtx);
- _oplog = _oplogInfo->getCollection();
+ _oplog = &_oplogInfo->getCollection();
}
template class AutoGetCollectionBase<CatalogCollectionLookup>;
diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h
index 917bff29707..76fe75ba9c8 100644
--- a/src/mongo/db/catalog_raii.h
+++ b/src/mongo/db/catalog_raii.h
@@ -37,6 +37,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/local_oplog_info.h"
#include "mongo/db/views/view.h"
+#include "mongo/db/yieldable.h"
namespace mongo {
@@ -106,7 +107,6 @@ class AutoGetCollectionBase {
AutoGetCollectionBase& operator=(const AutoGetCollectionBase&) = delete;
using CollectionStorage = typename CatalogCollectionLookupT::CollectionStorage;
- using CollectionPtr = typename CatalogCollectionLookupT::CollectionPtr;
public:
AutoGetCollectionBase(
@@ -123,15 +123,15 @@ public:
/**
* AutoGetCollection can be used as a pointer with the -> operator.
*/
- CollectionPtr operator->() const {
- return getCollection();
+ const Collection* operator->() const {
+ return getCollection().get();
}
/**
* Dereference operator, returns a lvalue reference to the collection.
*/
- std::add_lvalue_reference_t<std::remove_pointer_t<CollectionPtr>> operator*() const {
- return *getCollection();
+ const Collection& operator*() const {
+ return *getCollection().get();
}
/**
@@ -151,8 +151,8 @@ public:
/**
* Returns nullptr if the collection didn't exist.
*/
- CollectionPtr getCollection() const {
- return CatalogCollectionLookupT::toCollectionPtr(_coll);
+ const CollectionPtr& getCollection() const {
+ return _lookup.toCollectionPtr(_coll);
}
/**
@@ -181,26 +181,28 @@ protected:
boost::optional<Lock::CollectionLock> _collLock;
CollectionStorage _coll = nullptr;
+ CatalogCollectionLookupT _lookup;
std::shared_ptr<ViewDefinition> _view;
};
struct CatalogCollectionLookup {
- using CollectionStorage = const Collection*;
- using CollectionPtr = const Collection*;
+ using CollectionStorage = CollectionPtr;
- static CollectionStorage lookupCollection(OperationContext* opCtx, const NamespaceString& nss);
- static CollectionPtr toCollectionPtr(CollectionStorage collection) {
+ CollectionStorage lookupCollection(OperationContext* opCtx, const NamespaceString& nss);
+ const CollectionPtr& toCollectionPtr(const CollectionStorage& collection) const {
return collection;
}
};
struct CatalogCollectionLookupForRead {
using CollectionStorage = std::shared_ptr<const Collection>;
- using CollectionPtr = const Collection*;
- static CollectionStorage lookupCollection(OperationContext* opCtx, const NamespaceString& nss);
- static CollectionPtr toCollectionPtr(const CollectionStorage& collection) {
- return collection.get();
+ CollectionStorage lookupCollection(OperationContext* opCtx, const NamespaceString& nss);
+ const CollectionPtr& toCollectionPtr(const CollectionStorage&) const {
+ return _collection;
}
+
+private:
+ CollectionPtr _collection;
};
class AutoGetCollection : public AutoGetCollectionBase<CatalogCollectionLookup> {
@@ -266,19 +268,19 @@ public:
CollectionWriter& operator=(CollectionWriter&&) = delete;
explicit operator bool() const {
- return get();
+ return static_cast<bool>(get());
}
const Collection* operator->() const {
- return get();
+ return get().get();
}
const Collection& operator*() const {
- return *get();
+ return *get().get();
}
- const Collection* get() const {
- return _collection;
+ const CollectionPtr& get() const {
+ return *_collection;
}
// Returns writable Collection, any previous Collection that has been returned may be
@@ -289,7 +291,13 @@ public:
void commitToCatalog();
private:
- const Collection* _collection = nullptr;
+ // If this class is instantiated with the constructors that take UUID or nss we need somewhere
+ // to store the CollectionPtr used. But if it is instantiated with an AutoGetCollection then the
+ // lifetime of the object is managed there. To unify the two code paths we have a pointer that
+ // points to either the CollectionPtr in an AutoGetCollection or to a stored CollectionPtr in
+ // this instance. This can also be used to determine how we were instantiated.
+ const CollectionPtr* _collection = nullptr;
+ CollectionPtr _storedCollection;
Collection* _writableCollection = nullptr;
OperationContext* _opCtx = nullptr;
CollectionCatalog::LifetimeMode _mode;
@@ -418,8 +426,8 @@ public:
/**
* Returns a pointer to the oplog collection or nullptr if the oplog collection didn't exist.
*/
- const Collection* getCollection() const {
- return _oplog;
+ const CollectionPtr& getCollection() const {
+ return *_oplog;
}
private:
@@ -429,7 +437,7 @@ private:
boost::optional<Lock::DBLock> _dbWriteLock;
boost::optional<Lock::CollectionLock> _collWriteLock;
repl::LocalOplogInfo* _oplogInfo;
- const Collection* _oplog;
+ const CollectionPtr* _oplog;
};
} // namespace mongo
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index cc784cdf75c..b56ea4eb37b 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -52,7 +52,6 @@
namespace mongo {
-class Collection;
class Locker;
class OperationContext;
class ThreadClient;
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index f4d7960a759..745b03157f3 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -44,7 +44,6 @@
namespace mongo {
-class Collection;
class CursorManager;
class RecoveryUnit;
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index b0f8954adc6..47e1e5601ad 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -156,7 +156,7 @@ struct Cloner::Fun {
collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
uassert(28594,
str::stream() << "Collection " << nss << " dropped while cloning",
- collection != nullptr);
+ collection);
}
BSONObj tmp = i.nextSafe();
@@ -367,7 +367,7 @@ Status Cloner::_createCollectionsForDb(
opCtx->checkForInterrupt();
WriteUnitOfWork wunit(opCtx);
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (collection) {
if (!params.shardedColl) {
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 2014df3dcc7..b15ecdc4383 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -177,7 +177,7 @@ public:
result);
}
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
@@ -240,7 +240,7 @@ public:
return true;
}
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index c3bf793ca6b..fac19ca99eb 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -317,7 +317,7 @@ boost::optional<CommitQuorumOptions> parseAndGetCommitQuorum(OperationContext* o
* returned vector is empty after returning, no new indexes need to be built. Throws on error.
*/
std::vector<BSONObj> resolveDefaultsAndRemoveExistingIndexes(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::vector<BSONObj> indexSpecs) {
// Normalize the specs' collations, wildcard projections, and partial filters as applicable.
auto normalSpecs = IndexBuildsCoordinator::normalizeIndexSpecs(opCtx, collection, indexSpecs);
@@ -339,7 +339,7 @@ void fillCommandResultWithIndexesAlreadyExistInfo(int numIndexes, BSONObjBuilder
* Returns true, after filling in the command result, if the index creation can return early.
*/
bool indexesAlreadyExist(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& specs,
BSONObjBuilder* result) {
auto specsCopy = resolveDefaultsAndRemoveExistingIndexes(opCtx, collection, specs);
@@ -518,12 +518,12 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
}
bool indexExists = writeConflictRetry(opCtx, "createCollectionWithIndexes", ns.ns(), [&] {
- AutoGetCollection autoColl(opCtx, ns, MODE_IS);
- auto collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, ns, MODE_IS);
// Before potentially taking an exclusive collection lock, check if all indexes already
// exist while holding an intent lock.
- if (collection && indexesAlreadyExist(opCtx, collection, specs, &result)) {
+ if (collection &&
+ indexesAlreadyExist(opCtx, collection.getCollection(), specs, &result)) {
repl::ReplClientInfo::forClient(opCtx->getClient())
.setLastOpToSystemLastOpTime(opCtx);
return true;
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index fd72f8fb4b9..73b4b1cf00d 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -393,7 +393,7 @@ private:
return Status(ErrorCodes::PrimarySteppedDown, "dbCheck terminated due to stepdown");
}
- auto collection = agc.getCollection();
+ const auto& collection = agc.getCollection();
if (!collection) {
return {ErrorCodes::NamespaceNotFound, "dbCheck collection no longer exists"};
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 7a7a57d1f0c..57c21d78ebc 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -479,8 +479,7 @@ public:
bool estimate = jsobj["estimate"].trueValue();
const NamespaceString nss(ns);
- AutoGetCollectionForReadCommand ctx(opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
const auto collDesc =
CollectionShardingState::get(opCtx, nss)->getCollectionDescription(opCtx);
@@ -527,7 +526,7 @@ public:
return 1;
}
exec = InternalPlanner::collectionScan(
- opCtx, ns, collection, PlanYieldPolicy::YieldPolicy::NO_YIELD);
+ opCtx, ns, collection.getCollection(), PlanYieldPolicy::YieldPolicy::NO_YIELD);
} else if (min.isEmpty() || max.isEmpty()) {
errmsg = "only one of min or max specified";
return false;
@@ -552,7 +551,7 @@ public:
max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
exec = InternalPlanner::indexScan(opCtx,
- collection,
+ collection.getCollection(),
idx,
min,
max,
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 944a63b39bd..5a8aafa3f3a 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -301,7 +301,7 @@ public:
// We drop and re-acquire these locks every document because md5'ing is expensive
unique_ptr<AutoGetCollectionForReadCommand> ctx(
new AutoGetCollectionForReadCommand(opCtx, nss));
- const Collection* coll = ctx->getCollection();
+ const CollectionPtr& coll = ctx->getCollection();
auto exec = uassertStatusOK(getExecutor(opCtx,
coll,
@@ -389,7 +389,7 @@ public:
}
// Now that we have the lock again, we can restore the PlanExecutor.
- exec->restoreState();
+ exec->restoreState(&coll);
}
} catch (DBException& exception) {
exception.addContext("Executor error during filemd5 command");
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index be9243982ca..6ef685dd420 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -231,48 +231,50 @@ public:
std::set<std::string> cappedCollectionSet;
bool noError = true;
- catalog::forEachCollectionFromDb(opCtx, dbname, MODE_IS, [&](const Collection* collection) {
- auto collNss = collection->ns();
-
- if (collNss.size() - 1 <= dbname.size()) {
- errmsg = str::stream() << "weird fullCollectionName [" << collNss.toString() << "]";
- noError = false;
- return false;
- }
+ catalog::forEachCollectionFromDb(
+ opCtx, dbname, MODE_IS, [&](const CollectionPtr& collection) {
+ auto collNss = collection->ns();
+
+ if (collNss.size() - 1 <= dbname.size()) {
+ errmsg = str::stream()
+ << "weird fullCollectionName [" << collNss.toString() << "]";
+ noError = false;
+ return false;
+ }
- if (repl::ReplicationCoordinator::isOplogDisabledForNS(collNss)) {
- return true;
- }
+ if (repl::ReplicationCoordinator::isOplogDisabledForNS(collNss)) {
+ return true;
+ }
- if (collNss.coll().startsWith("tmp.mr.")) {
- // We skip any incremental map reduce collections as they also aren't
- // replicated.
- return true;
- }
+ if (collNss.coll().startsWith("tmp.mr.")) {
+ // We skip any incremental map reduce collections as they also aren't
+ // replicated.
+ return true;
+ }
- if (desiredCollections.size() > 0 &&
- desiredCollections.count(collNss.coll().toString()) == 0)
- return true;
+ if (desiredCollections.size() > 0 &&
+ desiredCollections.count(collNss.coll().toString()) == 0)
+ return true;
- // Don't include 'drop pending' collections.
- if (collNss.isDropPendingNamespace())
- return true;
+ // Don't include 'drop pending' collections.
+ if (collNss.isDropPendingNamespace())
+ return true;
- if (collection->isCapped()) {
- cappedCollectionSet.insert(collNss.coll().toString());
- }
+ if (collection->isCapped()) {
+ cappedCollectionSet.insert(collNss.coll().toString());
+ }
- if (OptionalCollectionUUID uuid = collection->uuid()) {
- collectionToUUIDMap[collNss.coll().toString()] = uuid;
- }
+ if (OptionalCollectionUUID uuid = collection->uuid()) {
+ collectionToUUIDMap[collNss.coll().toString()] = uuid;
+ }
- // Compute the hash for this collection.
- std::string hash = _hashCollection(opCtx, db, collNss);
+ // Compute the hash for this collection.
+ std::string hash = _hashCollection(opCtx, db, collNss);
- collectionToHashMap[collNss.coll().toString()] = hash;
+ collectionToHashMap[collNss.coll().toString()] = hash;
- return true;
- });
+ return true;
+ });
if (!noError)
return false;
@@ -315,7 +317,7 @@ public:
private:
std::string _hashCollection(OperationContext* opCtx, Database* db, const NamespaceString& nss) {
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
invariant(collection);
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 6ab423127d9..d6b76a3e7e0 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -174,7 +174,7 @@ public:
result);
}
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
auto executor = uassertStatusOK(
getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct));
@@ -235,7 +235,7 @@ public:
return true;
}
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
auto executor =
getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct);
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index ea36a9dcf1c..26851277138 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -203,7 +203,7 @@ void recordStatsForTopCommand(OperationContext* opCtx) {
curOp->getReadWriteType());
}
-void checkIfTransactionOnCappedColl(const Collection* coll, bool inTransaction) {
+void checkIfTransactionOnCappedColl(const CollectionPtr& coll, bool inTransaction) {
if (coll && coll->isCapped()) {
uassert(
ErrorCodes::OperationNotSupportedInTransaction,
@@ -282,20 +282,19 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
+ AutoGetCollection collection(opCtx, nsString, MODE_IX);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist",
- autoColl.getDb());
+ collection.getDb());
CollectionShardingState::get(opCtx, nsString)->checkShardVersionOrThrow(opCtx);
- const Collection* const collection = autoColl.getCollection();
-
- const auto exec =
- uassertStatusOK(getExecutorDelete(opDebug, collection, &parsedDelete, verbosity));
+ const auto exec = uassertStatusOK(
+ getExecutorDelete(opDebug, collection.getCollection(), &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
- Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
+ Explain::explainStages(
+ exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
} else {
auto request = UpdateRequest();
request.setNamespaceString(nsString);
@@ -307,19 +306,19 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
+ AutoGetCollection collection(opCtx, nsString, MODE_IX);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist",
- autoColl.getDb());
+ collection.getDb());
CollectionShardingState::get(opCtx, nsString)->checkShardVersionOrThrow(opCtx);
- const Collection* const collection = autoColl.getCollection();
- const auto exec =
- uassertStatusOK(getExecutorUpdate(opDebug, collection, &parsedUpdate, verbosity));
+ const auto exec = uassertStatusOK(
+ getExecutorUpdate(opDebug, collection.getCollection(), &parsedUpdate, verbosity));
auto bodyBuilder = result->getBodyBuilder();
- Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
+ Explain::explainStages(
+ exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
}
return Status::OK();
@@ -461,7 +460,7 @@ public:
ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
+ AutoGetCollection collection(opCtx, nsString, MODE_IX);
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -472,11 +471,10 @@ public:
assertCanWrite(opCtx, nsString);
- const Collection* const collection = autoColl.getCollection();
- checkIfTransactionOnCappedColl(collection, inTransaction);
+ checkIfTransactionOnCappedColl(collection.getCollection(), inTransaction);
- const auto exec = uassertStatusOK(
- getExecutorDelete(opDebug, collection, &parsedDelete, boost::none /* verbosity */));
+ const auto exec = uassertStatusOK(getExecutorDelete(
+ opDebug, collection.getCollection(), &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -490,8 +488,8 @@ public:
PlanSummaryStats summaryStats;
exec->getSummaryStats(&summaryStats);
- if (collection) {
- CollectionQueryInfo::get(collection).notifyOfQuery(opCtx, collection, summaryStats);
+ if (const auto& coll = collection.getCollection()) {
+ CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summaryStats);
}
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -528,29 +526,34 @@ public:
assertCanWrite(opCtx, nsString);
- const Collection* collection = autoColl.getCollection();
+ CollectionPtr createdCollection;
+ const CollectionPtr* collectionPtr = &autoColl.getCollection();
- // Create the collection if it does not exist when performing an upsert because the
- // update stage does not create its own collection
- if (!collection && args.isUpsert()) {
+ // TODO SERVER-50983: Create abstraction for creating collection when using
+ // AutoGetCollection Create the collection if it does not exist when performing an upsert
+ // because the update stage does not create its own collection
+ if (!*collectionPtr && args.isUpsert()) {
assertCanWrite(opCtx, nsString);
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString);
+ createdCollection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString);
// If someone else beat us to creating the collection, do nothing
- if (!collection) {
+ if (!createdCollection) {
uassertStatusOK(userAllowedCreateNS(nsString));
WriteUnitOfWork wuow(opCtx);
CollectionOptions defaultCollectionOptions;
uassertStatusOK(db->userCreateNS(opCtx, nsString, defaultCollectionOptions));
wuow.commit();
- collection =
+ createdCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString);
}
- invariant(collection);
+ invariant(createdCollection);
+ collectionPtr = &createdCollection;
}
+ const auto& collection = *collectionPtr;
checkIfTransactionOnCappedColl(collection, inTransaction);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 2cae02d0a10..1459df1fe91 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -289,7 +289,7 @@ public:
// The collection may be NULL. If so, getExecutor() should handle it by returning an
// execution tree with an EOFStage.
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
// Get the execution plan for the query.
bool permitYield = true;
@@ -412,7 +412,7 @@ public:
return;
}
- const Collection* const collection = ctx->getCollection();
+ const auto& collection = ctx->getCollection();
if (cq->getQueryRequest().isReadOnce()) {
// The readOnce option causes any storage-layer cursors created during plan
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 0285be5ab5c..bf4a69408a7 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -513,7 +513,7 @@ public:
opCtx->recoveryUnit()->setReadOnce(true);
}
exec->reattachToOperationContext(opCtx);
- exec->restoreState();
+ exec->restoreState(nullptr);
auto planSummary = exec->getPlanSummary();
{
@@ -584,7 +584,7 @@ public:
[exec, dropAndReacquireReadLock]() {
exec->saveState();
dropAndReacquireReadLock();
- exec->restoreState();
+ exec->restoreState(nullptr);
};
waitWithPinnedCursorDuringGetMoreBatch.execute([&](const BSONObj& data) {
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index bd991dffa1e..b17c97d22f9 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -110,7 +110,7 @@ public:
void searchHaystack(const HaystackAccessMethod* ham,
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& nearObj,
double maxDistance,
const BSONObj& search,
@@ -233,7 +233,7 @@ public:
uassertStatusOK(replCoord->checkCanServeReadsFor(
opCtx, nss, ReadPreferenceSetting::get(opCtx).canRunOnSecondary()));
- const Collection* collection = ctx.getCollection();
+ const auto& collection = ctx.getCollection();
if (!collection) {
errmsg = "can't find ns";
return false;
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index a7d6239eaab..ec7529c608e 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -65,13 +65,13 @@ using namespace mongo;
* Retrieves a collection's query settings and plan cache from the database.
*/
static Status getQuerySettingsAndPlanCache(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const string& ns,
QuerySettings** querySettingsOut,
PlanCache** planCacheOut) {
*querySettingsOut = nullptr;
*planCacheOut = nullptr;
- if (nullptr == collection) {
+ if (!collection) {
return Status(ErrorCodes::BadValue, "no such collection");
}
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index ba05d17ff45..d8485d09937 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -163,7 +163,7 @@ BSONObj buildViewBson(const ViewDefinition& view, bool nameOnly) {
* Return an object describing the collection. Takes a collection lock if nameOnly is false.
*/
BSONObj buildCollectionBson(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool includePendingDrops,
bool nameOnly) {
if (!collection) {
@@ -321,7 +321,7 @@ public:
}
Lock::CollectionLock clk(opCtx, nss, MODE_IS);
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
BSONObj collBson =
buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
@@ -332,7 +332,7 @@ public:
}
} else {
mongo::catalog::forEachCollectionFromDb(
- opCtx, dbname, MODE_IS, [&](const Collection* collection) {
+ opCtx, dbname, MODE_IS, [&](const CollectionPtr& collection) {
if (authorizedCollections &&
(!as->isAuthorizedForAnyActionOnResource(
ResourcePattern::forExactNamespace(collection->ns())))) {
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index bd2eab3b3fc..94d4eb1e51d 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -157,18 +157,18 @@ public:
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec;
BSONArrayBuilder firstBatch;
{
- AutoGetCollectionForReadCommand ctx(opCtx,
- CommandHelpers::parseNsOrUUID(dbname, cmdObj));
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(
+ opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj));
uassert(ErrorCodes::NamespaceNotFound,
- str::stream() << "ns does not exist: " << ctx.getNss().ns(),
+ str::stream() << "ns does not exist: " << collection.getNss().ns(),
collection);
- nss = ctx.getNss();
+ nss = collection.getNss();
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
- auto indexList = listIndexesInLock(opCtx, collection, nss, includeBuildUUIDs);
+ auto indexList =
+ listIndexesInLock(opCtx, collection.getCollection(), nss, includeBuildUUIDs);
auto ws = std::make_unique<WorkingSet>();
auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get());
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index f6af27117eb..a4ed9383018 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -267,7 +267,7 @@ public:
* Tracks the temporary collections mapReduces creates.
*/
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
@@ -321,7 +321,7 @@ void MapReduceOpObserver::onInserts(OperationContext* opCtx,
}
void MapReduceOpObserver::onCreateCollection(OperationContext*,
- const Collection*,
+ const CollectionPtr&,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj&,
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
index 61660438f04..5dc3822bd4a 100644
--- a/src/mongo/db/commands/plan_cache_clear_command.cpp
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -47,7 +47,7 @@
namespace mongo {
namespace {
-PlanCache* getPlanCache(OperationContext* opCtx, const Collection* collection) {
+PlanCache* getPlanCache(OperationContext* opCtx, const CollectionPtr& collection) {
invariant(collection);
PlanCache* planCache = CollectionQueryInfo::get(collection).getPlanCache();
invariant(planCache);
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index b3a36985b1c..927b93d9e96 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -563,7 +563,7 @@ Status runAggregate(OperationContext* opCtx,
}
}
- const Collection* collection = ctx ? ctx->getCollection() : nullptr;
+ const auto& collection = ctx ? ctx->getCollection() : CollectionPtr::null;
// If this is a view, resolve it by finding the underlying collection and stitching view
// pipelines and this request's pipeline together. We then release our locks before
@@ -781,7 +781,7 @@ Status runAggregate(OperationContext* opCtx,
// For an optimized away pipeline, signal the cache that a query operation has completed.
// For normal pipelines this is done in DocumentSourceCursor.
if (ctx && ctx->getCollection()) {
- const Collection* coll = ctx->getCollection();
+ const CollectionPtr& coll = ctx->getCollection();
CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, stats);
}
}
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index de86a427505..a5844835ccb 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -92,7 +92,7 @@ public:
WriteUnitOfWork wunit(opCtx);
UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
collection = db->createCollection(opCtx, nss);
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index 3883ae2cd40..6c0f0ed3c77 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -112,11 +112,11 @@ void DeferredWriter::_worker(InsertStatement stmt) {
auto agc = std::move(result.getValue());
- const Collection& collection = *agc->getCollection();
+ const CollectionPtr& collection = agc->getCollection();
Status status = writeConflictRetry(opCtx, "deferred insert", _nss.ns(), [&] {
WriteUnitOfWork wuow(opCtx);
- Status status = collection.insertDocument(opCtx, stmt, nullptr, false);
+ Status status = collection->insertDocument(opCtx, stmt, nullptr, false);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 5e294acefe4..7e2397f2521 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -104,7 +104,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
// If the collection doesn't exist or disappears after releasing locks and waiting, there is no
// need to check for pending catalog changes.
- while (auto coll = _autoColl->getCollection()) {
+ while (const auto& coll = _autoColl->getCollection()) {
// Ban snapshot reads on capped collections.
uassert(ErrorCodes::SnapshotUnavailable,
"Reading from capped collections with readConcern snapshot is not supported",
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index e8a1157b9ff..89406e31939 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -107,11 +107,19 @@ public:
AutoGetCollectionViewMode viewMode = AutoGetCollectionViewMode::kViewsForbidden,
Date_t deadline = Date_t::max());
+ explicit operator bool() const {
+ return static_cast<bool>(getCollection());
+ }
+
+ const Collection* operator->() const {
+ return getCollection().get();
+ }
+
Database* getDb() const {
return _autoColl->getDb();
}
- const Collection* getCollection() const {
+ const CollectionPtr& getCollection() const {
return _autoColl->getCollection();
}
@@ -151,11 +159,19 @@ public:
Date_t deadline = Date_t::max(),
AutoStatsTracker::LogMode logMode = AutoStatsTracker::LogMode::kUpdateTopAndCurOp);
+ explicit operator bool() const {
+ return static_cast<bool>(getCollection());
+ }
+
+ const Collection* operator->() const {
+ return getCollection().get();
+ }
+
Database* getDb() const {
return _autoCollForRead.getDb();
}
- const Collection* getCollection() const {
+ const CollectionPtr& getCollection() const {
return _autoCollForRead.getCollection();
}
diff --git a/src/mongo/db/db_raii_test.cpp b/src/mongo/db/db_raii_test.cpp
index a9239f0a92e..48f946ed895 100644
--- a/src/mongo/db/db_raii_test.cpp
+++ b/src/mongo/db/db_raii_test.cpp
@@ -69,7 +69,7 @@ public:
};
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeTailableQueryPlan(
- OperationContext* opCtx, const Collection* collection) {
+ OperationContext* opCtx, const CollectionPtr& collection) {
auto qr = std::make_unique<QueryRequest>(collection->ns());
qr->setTailableMode(TailableModeEnum::kTailableAndAwaitData);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index f20ce257252..21e602052ea 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -61,7 +61,7 @@ using std::unique_ptr;
set your db SavedContext first
*/
bool Helpers::findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& query,
BSONObj& result,
bool requireIndex) {
@@ -76,7 +76,7 @@ bool Helpers::findOne(OperationContext* opCtx,
set your db SavedContext first
*/
RecordId Helpers::findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& query,
bool requireIndex) {
if (!collection)
@@ -88,7 +88,7 @@ RecordId Helpers::findOne(OperationContext* opCtx,
}
RecordId Helpers::findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<QueryRequest> qr,
bool requireIndex) {
if (!collection)
@@ -130,7 +130,7 @@ bool Helpers::findById(OperationContext* opCtx,
invariant(database);
// TODO ForRead?
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString(ns));
if (!collection) {
return false;
@@ -156,7 +156,7 @@ bool Helpers::findById(OperationContext* opCtx,
}
RecordId Helpers::findById(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& idquery) {
verify(collection);
const IndexCatalog* catalog = collection->getIndexCatalog();
@@ -167,10 +167,11 @@ RecordId Helpers::findById(OperationContext* opCtx,
// Acquires necessary locks to read the collection with the given namespace. If this is an oplog
// read, use AutoGetOplog for simplified locking.
-const Collection* getCollectionForRead(OperationContext* opCtx,
- const NamespaceString& ns,
- boost::optional<AutoGetCollectionForReadCommand>& autoColl,
- boost::optional<AutoGetOplog>& autoOplog) {
+const CollectionPtr& getCollectionForRead(
+ OperationContext* opCtx,
+ const NamespaceString& ns,
+ boost::optional<AutoGetCollectionForReadCommand>& autoColl,
+ boost::optional<AutoGetOplog>& autoOplog) {
if (ns.isOplog()) {
// Simplify locking rules for oplog collection.
autoOplog.emplace(opCtx, OplogAccessMode::kRead);
@@ -184,7 +185,7 @@ const Collection* getCollectionForRead(OperationContext* opCtx,
bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result) {
boost::optional<AutoGetCollectionForReadCommand> autoColl;
boost::optional<AutoGetOplog> autoOplog;
- auto collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
+ const auto& collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
auto exec = InternalPlanner::collectionScan(
opCtx, ns, collection, PlanYieldPolicy::YieldPolicy::NO_YIELD);
@@ -206,7 +207,7 @@ bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& res
bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) {
boost::optional<AutoGetCollectionForReadCommand> autoColl;
boost::optional<AutoGetOplog> autoOplog;
- auto collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
+ const auto& collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
auto exec = InternalPlanner::collectionScan(
opCtx, ns, collection, PlanYieldPolicy::YieldPolicy::NO_YIELD, InternalPlanner::BACKWARD);
@@ -306,7 +307,7 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
void Helpers::emptyCollection(OperationContext* opCtx, const NamespaceString& nss) {
OldClientContext context(opCtx, nss.ns());
repl::UnreplicatedWritesBlock uwb(opCtx);
- const Collection* collection = context.db()
+ CollectionPtr collection = context.db()
? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)
: nullptr;
deleteObjects(opCtx, collection, nss, BSONObj(), false);
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 3bdf368490a..339f5165455 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -35,6 +35,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class Database;
class OperationContext;
class QueryRequest;
@@ -59,18 +60,18 @@ struct Helpers {
@return true if object found
*/
static bool findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& query,
BSONObj& result,
bool requireIndex = false);
static RecordId findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& query,
bool requireIndex);
static RecordId findOne(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<QueryRequest> qr,
bool requireIndex);
@@ -90,7 +91,7 @@ struct Helpers {
* uasserts if no _id index.
* @return null loc if not found */
static RecordId findById(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONObj& query);
/**
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f39253281de..f0c6181d2de 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -59,7 +59,7 @@ namespace mongo {
const char* CachedPlanStage::kStageType = "CACHED_PLAN";
CachedPlanStage::CachedPlanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
CanonicalQuery* cq,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 6719b965225..a1098c3c406 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -57,7 +57,7 @@ class PlanYieldPolicy;
class CachedPlanStage final : public RequiresAllIndicesStage {
public:
CachedPlanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
CanonicalQuery* cq,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ffabd1886b4..fc5b9c854d5 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -57,7 +57,7 @@ using std::vector;
const char* CollectionScan::kStageType = "COLLSCAN";
CollectionScan::CollectionScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index b67d8cd6603..ac9681af48e 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -55,7 +55,7 @@ public:
static const char* kStageType;
CollectionScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h
index aa0c790d1fd..fb847553b36 100644
--- a/src/mongo/db/exec/collection_scan_common.h
+++ b/src/mongo/db/exec/collection_scan_common.h
@@ -34,8 +34,6 @@
namespace mongo {
-class Collection;
-
struct CollectionScanParams {
enum Direction {
FORWARD = 1,
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index e5f3604efc1..832db7435cb 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -46,7 +46,7 @@ using std::vector;
const char* CountStage::kStageType = "COUNT";
CountStage::CountStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
long long limit,
long long skip,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 8c21f2e1cff..b8de89a9594 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -47,7 +47,7 @@ namespace mongo {
class CountStage final : public PlanStage {
public:
CountStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
long long limit,
long long skip,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index fea2fa70c4d..8174e35ce2e 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -74,7 +74,7 @@ const char* CountScan::kStageType = "COUNT_SCAN";
// the CountScanParams rather than resolving them via the IndexDescriptor, since these may differ
// from the descriptor's contents.
CountScan::CountScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CountScanParams params,
WorkingSet* workingSet)
: RequiresIndexStage(kStageType, expCtx, collection, params.indexDescriptor, workingSet),
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index 44a092c19b8..3b571bc5723 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -88,7 +88,7 @@ struct CountScanParams {
class CountScan final : public RequiresIndexStage {
public:
CountScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CountScanParams params,
WorkingSet* workingSet);
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 456e1be27e6..73de1abdbc5 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -71,7 +71,7 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams* params) {
DeleteStage::DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child)
: RequiresMutableCollectionStage(kStageType.rawData(), expCtx, collection),
_params(std::move(params)),
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 0123902e508..225705510d2 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -104,7 +104,7 @@ public:
DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child);
bool isEOF() final;
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index b0644a44c82..395130f09cb 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -47,7 +47,7 @@ using std::vector;
const char* DistinctScan::kStageType = "DISTINCT_SCAN";
DistinctScan::DistinctScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
DistinctParams params,
WorkingSet* workingSet)
: RequiresIndexStage(kStageType, expCtx, collection, params.indexDescriptor, workingSet),
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index b4d73e7b407..606e5b0b8f4 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -96,7 +96,7 @@ struct DistinctParams {
class DistinctScan final : public RequiresIndexStage {
public:
DistinctScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
DistinctParams params,
WorkingSet* workingSet);
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 9528e9dc085..166b4604f8b 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -53,7 +53,7 @@ FetchStage::FetchStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
- const Collection* collection)
+ const CollectionPtr& collection)
: RequiresCollectionStage(kStageType, expCtx, collection),
_ws(ws),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 10dddc50a4f..0e3db734142 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -54,7 +54,7 @@ public:
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
- const Collection* collection);
+ const CollectionPtr& collection);
~FetchStage();
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index e55f085a85f..8f97ab6107a 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -266,7 +266,7 @@ static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams,
return fullBounds;
}
-GeoNear2DStage::DensityEstimator::DensityEstimator(const Collection* collection,
+GeoNear2DStage::DensityEstimator::DensityEstimator(const CollectionPtr& collection,
PlanStage::Children* children,
BSONObj infoObj,
const GeoNearParams* nearParams,
@@ -454,7 +454,7 @@ static const string kTwoDIndexNearStage("GEO_NEAR_2D");
GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
ExpressionContext* expCtx,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* twoDIndex)
: NearStage(expCtx,
kTwoDIndexNearStage.c_str(),
@@ -480,7 +480,7 @@ public:
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
MatchExpression* filter,
- const Collection* collection)
+ const CollectionPtr& collection)
: FetchStage(expCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
private:
@@ -520,7 +520,7 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
}
std::unique_ptr<NearStage::CoveredInterval> GeoNear2DStage::nextInterval(
- OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
+ OperationContext* opCtx, WorkingSet* workingSet, const CollectionPtr& collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return nullptr;
@@ -697,7 +697,7 @@ static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
ExpressionContext* expCtx,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* s2Index)
: NearStage(expCtx,
kS2IndexNearStage.c_str(),
@@ -762,7 +762,7 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) {
}
} // namespace
-GeoNear2DSphereStage::DensityEstimator::DensityEstimator(const Collection* collection,
+GeoNear2DSphereStage::DensityEstimator::DensityEstimator(const CollectionPtr& collection,
PlanStage::Children* children,
const GeoNearParams* nearParams,
const S2IndexingParams& indexParams,
@@ -919,7 +919,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
}
std::unique_ptr<NearStage::CoveredInterval> GeoNear2DSphereStage::nextInterval(
- OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
+ OperationContext* opCtx, WorkingSet* workingSet, const CollectionPtr& collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return nullptr;
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 08001ecc77c..d29d7536bea 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -71,13 +71,13 @@ public:
GeoNear2DStage(const GeoNearParams& nearParams,
ExpressionContext* expCtx,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* twoDIndex);
protected:
std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
- const Collection* collection) final;
+ const CollectionPtr& collection) final;
double computeDistance(WorkingSetMember* member) final;
@@ -88,7 +88,7 @@ protected:
private:
class DensityEstimator {
public:
- DensityEstimator(const Collection* collection,
+ DensityEstimator(const CollectionPtr& collection,
PlanStage::Children* children,
BSONObj infoObj,
const GeoNearParams* nearParams,
@@ -105,7 +105,7 @@ private:
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex);
- const Collection* _collection;
+ const CollectionPtr& _collection;
PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const GeoNearParams* _nearParams; // Not owned here.
const R2Annulus& _fullBounds;
@@ -140,13 +140,13 @@ public:
GeoNear2DSphereStage(const GeoNearParams& nearParams,
ExpressionContext* expCtx,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* s2Index);
protected:
std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
- const Collection* collection) final;
+ const CollectionPtr& collection) final;
double computeDistance(WorkingSetMember* member) final;
@@ -158,7 +158,7 @@ private:
// Estimate the density of data by search the nearest cells level by level around center.
class DensityEstimator {
public:
- DensityEstimator(const Collection* collection,
+ DensityEstimator(const CollectionPtr& collection,
PlanStage::Children* children,
const GeoNearParams* nearParams,
const S2IndexingParams& indexParams,
@@ -177,7 +177,7 @@ private:
WorkingSet* workingSet,
const IndexDescriptor* s2Index);
- const Collection* _collection;
+ const CollectionPtr& _collection;
PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const GeoNearParams* _nearParams; // Not owned here.
const S2IndexingParams _indexParams;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 196993562d2..22319c36e74 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -52,7 +52,7 @@ const char* IDHackStage::kStageType = "IDHACK";
IDHackStage::IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor)
: RequiresIndexStage(kStageType, expCtx, collection, descriptor, ws),
_workingSet(ws),
@@ -64,7 +64,7 @@ IDHackStage::IDHackStage(ExpressionContext* expCtx,
IDHackStage::IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor)
: RequiresIndexStage(kStageType, expCtx, collection, descriptor, ws),
_workingSet(ws),
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index a06366459e5..effe1766efa 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -51,13 +51,13 @@ public:
IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor);
IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor);
~IDHackStage();
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 62cc788b790..f746f3f8bad 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -60,7 +60,7 @@ namespace mongo {
const char* IndexScan::kStageType = "IXSCAN";
IndexScan::IndexScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter)
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index cad8df9b9a0..c2be1169117 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -109,7 +109,7 @@ public:
};
IndexScan(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index d9aee1eb09f..ca6dfe67210 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -45,7 +45,7 @@ const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
MultiIteratorStage::MultiIteratorStage(ExpressionContext* expCtx,
WorkingSet* ws,
- const Collection* collection)
+ const CollectionPtr& collection)
: RequiresCollectionStage(kStageType, expCtx, collection), _ws(ws) {}
void MultiIteratorStage::addIterator(unique_ptr<RecordCursor> it) {
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 1519901c52f..cf6e5e322c6 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class MultiIteratorStage final : public RequiresCollectionStage {
public:
- MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, const Collection* collection);
+ MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, const CollectionPtr& collection);
void addIterator(std::unique_ptr<RecordCursor> it);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index ab89c47642c..a9e985cd533 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -72,7 +72,7 @@ void markShouldCollectTimingInfoOnSubtree(PlanStage* root) {
} // namespace
MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* cq,
PlanCachingMode cachingMode)
: RequiresCollectionStage(kStageType, expCtx, collection),
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 59791dc7d2d..f76caef83a5 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -60,7 +60,7 @@ public:
* when possible. If 'shouldCache' is false, the plan cache will never be written.
*/
MultiPlanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* cq,
PlanCachingMode cachingMode = PlanCachingMode::AlwaysCache);
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 94af9d88f74..3b095958986 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -46,7 +46,7 @@ NearStage::NearStage(ExpressionContext* expCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* indexDescriptor)
: RequiresIndexStage(typeName, expCtx, collection, indexDescriptor, workingSet),
_workingSet(workingSet),
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index fef83f58640..315b7c8f4b8 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -108,7 +108,7 @@ protected:
const char* typeName,
StageType type,
WorkingSet* workingSet,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* indexDescriptor);
//
@@ -122,7 +122,7 @@ protected:
*/
virtual std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
- const Collection* collection) = 0;
+ const CollectionPtr& collection) = 0;
/**
* Computes the distance value for the given member data, or -1 if the member should not be
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index 3bb25315724..14946877777 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -80,7 +80,7 @@ void logNotCachingNoData(std::string&& solution);
template <typename PlanStageType, typename ResultType, typename Data>
void updatePlanCache(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanCachingMode cachingMode,
const CanonicalQuery& query,
std::unique_ptr<plan_ranker::PlanRankingDecision> ranking,
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 4392837219c..69c048ec8f7 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -41,6 +41,7 @@ namespace mongo {
class ClockSource;
class Collection;
+class CollectionPtr;
class OperationContext;
class RecordId;
diff --git a/src/mongo/db/exec/record_store_fast_count.cpp b/src/mongo/db/exec/record_store_fast_count.cpp
index c454a1af19f..f810b55dc79 100644
--- a/src/mongo/db/exec/record_store_fast_count.cpp
+++ b/src/mongo/db/exec/record_store_fast_count.cpp
@@ -36,7 +36,7 @@ namespace mongo {
const char* RecordStoreFastCountStage::kStageType = "RECORD_STORE_FAST_COUNT";
RecordStoreFastCountStage::RecordStoreFastCountStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
long long skip,
long long limit)
: RequiresCollectionStage(kStageType, expCtx, collection), _skip(skip), _limit(limit) {
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index 1a73829afa2..883e6e843d2 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -43,7 +43,7 @@ public:
static const char* kStageType;
RecordStoreFastCountStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
long long skip,
long long limit);
diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h
index 15beac52ebf..06d3a72ae55 100644
--- a/src/mongo/db/exec/requires_all_indices_stage.h
+++ b/src/mongo/db/exec/requires_all_indices_stage.h
@@ -43,7 +43,7 @@ class RequiresAllIndicesStage : public RequiresCollectionStage {
public:
RequiresAllIndicesStage(const char* stageType,
ExpressionContext* expCtx,
- const Collection* coll)
+ const CollectionPtr& coll)
: RequiresCollectionStage(stageType, expCtx, coll) {
auto allEntriesShared = coll->getIndexCatalog()->getAllReadyEntriesShared();
_indexCatalogEntries.reserve(allEntriesShared.size());
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 92a4a1e0279..c4b726d08cb 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -37,7 +37,7 @@ void RequiresCollectionStage::doSaveState() {
doSaveStateRequiresCollection();
// A stage may not access storage while in a saved state.
- _collection = nullptr;
+ _collection = CollectionPtr();
}
void RequiresCollectionStage::doRestoreState() {
@@ -64,6 +64,9 @@ void RequiresCollectionStage::doRestoreState() {
// restored locks on the correct name. It is now safe to restore the Collection pointer. The
// collection must exist, since we already successfully looked up the namespace string by UUID
// under the correct lock manager locks.
+ // TODO SERVER-51115: We can't have every instance of RequiresCollectionStage do a catalog
+ // lookup with lock free reads. If we have multiple instances within a single executor they
+ // might get different pointers.
_collection = catalog.lookupCollectionByUUID(opCtx(), _collectionUUID);
invariant(_collection);
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index eb9498f5e04..bbe6ea76d97 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -51,9 +51,9 @@ class RequiresCollectionStage : public PlanStage {
public:
RequiresCollectionStage(const char* stageType,
ExpressionContext* expCtx,
- const Collection* coll)
+ const CollectionPtr& coll)
: PlanStage(stageType, expCtx),
- _collection(coll),
+ _collection(coll.detached()),
_collectionUUID(_collection->uuid()),
_catalogEpoch(getCatalogEpoch()),
_nss(_collection->ns()) {
@@ -77,7 +77,7 @@ protected:
*/
virtual void doRestoreStateRequiresCollection() = 0;
- const Collection* collection() const {
+ const CollectionPtr& collection() const {
return _collection;
}
@@ -91,7 +91,7 @@ private:
return CollectionCatalog::get(opCtx()).getEpoch();
}
- const Collection* _collection;
+ CollectionPtr _collection;
const UUID _collectionUUID;
const uint64_t _catalogEpoch;
diff --git a/src/mongo/db/exec/requires_index_stage.cpp b/src/mongo/db/exec/requires_index_stage.cpp
index f1b43466822..9b7115b4ccc 100644
--- a/src/mongo/db/exec/requires_index_stage.cpp
+++ b/src/mongo/db/exec/requires_index_stage.cpp
@@ -35,7 +35,7 @@ namespace mongo {
RequiresIndexStage::RequiresIndexStage(const char* stageType,
ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet)
: RequiresCollectionStage(stageType, expCtx, collection),
diff --git a/src/mongo/db/exec/requires_index_stage.h b/src/mongo/db/exec/requires_index_stage.h
index cd3ba14fe4e..37257e98084 100644
--- a/src/mongo/db/exec/requires_index_stage.h
+++ b/src/mongo/db/exec/requires_index_stage.h
@@ -37,7 +37,7 @@ namespace mongo {
/**
* A base class for plan stages which require access to a particular index within a particular
- * collection. Provides subclasses access to the index's const Collection*, as well as to catalog
+ * collection. Provides subclasses access to the index's const CollectionPtr&, as well as to catalog
* types representing the index itself such as the IndexDescriptor. This base class is responsible
* for checking that the collection and index are still valid (e.g. have not been dropped) when
* recovering from yield.
@@ -49,7 +49,7 @@ class RequiresIndexStage : public RequiresCollectionStage {
public:
RequiresIndexStage(const char* stageType,
ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet);
diff --git a/src/mongo/db/exec/sbe/parser/parser.cpp b/src/mongo/db/exec/sbe/parser/parser.cpp
index 6be9d63afbf..f650ac6ed1c 100644
--- a/src/mongo/db/exec/sbe/parser/parser.cpp
+++ b/src/mongo/db/exec/sbe/parser/parser.cpp
@@ -532,8 +532,7 @@ void Parser::walkScan(AstQuery& ast) {
}
NamespaceString nssColl{dbName, collName};
- AutoGetCollectionForRead ctxColl(_opCtx, nssColl);
- auto collection = ctxColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, nssColl);
NamespaceStringOrUUID name =
collection ? NamespaceStringOrUUID{dbName, collection->uuid()} : nssColl;
const auto forward = (ast.nodes[forwardPos]->token == "true") ? true : false;
@@ -576,8 +575,7 @@ void Parser::walkParallelScan(AstQuery& ast) {
}
NamespaceString nssColl{dbName, collName};
- AutoGetCollectionForRead ctxColl(_opCtx, nssColl);
- auto collection = ctxColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, nssColl);
NamespaceStringOrUUID name =
collection ? NamespaceStringOrUUID{dbName, collection->uuid()} : nssColl;
@@ -616,8 +614,7 @@ void Parser::walkSeek(AstQuery& ast) {
}
NamespaceString nssColl{dbName, collName};
- AutoGetCollectionForRead ctxColl(_opCtx, nssColl);
- auto collection = ctxColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, nssColl);
NamespaceStringOrUUID name =
collection ? NamespaceStringOrUUID{dbName, collection->uuid()} : nssColl;
@@ -667,8 +664,7 @@ void Parser::walkIndexScan(AstQuery& ast) {
}
NamespaceString nssColl{dbName, collName};
- AutoGetCollectionForRead ctxColl(_opCtx, nssColl);
- auto collection = ctxColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, nssColl);
NamespaceStringOrUUID name =
collection ? NamespaceStringOrUUID{dbName, collection->uuid()} : nssColl;
const auto forward = (ast.nodes[forwardPos]->token == "true") ? true : false;
@@ -724,8 +720,7 @@ void Parser::walkIndexSeek(AstQuery& ast) {
}
NamespaceString nssColl{dbName, collName};
- AutoGetCollectionForRead ctxColl(_opCtx, nssColl);
- auto collection = ctxColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, nssColl);
NamespaceStringOrUUID name =
collection ? NamespaceStringOrUUID{dbName, collection->uuid()} : nssColl;
const auto forward = (ast.nodes[forwardPos]->token == "true") ? true : false;
diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
index 5c6a6051839..68c8c6c44cc 100644
--- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
@@ -178,7 +178,7 @@ void IndexScanStage::open(bool reOpen) {
_open = true;
_firstGetNext = true;
- if (auto collection = _coll->getCollection()) {
+ if (const auto& collection = _coll->getCollection()) {
auto indexCatalog = collection->getIndexCatalog();
auto indexDesc = indexCatalog->findIndexByName(_opCtx, _indexName);
if (indexDesc) {
diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp
index 967c94e1a37..7c3f3e5c29c 100644
--- a/src/mongo/db/exec/sbe/stages/scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/scan.cpp
@@ -178,7 +178,7 @@ void ScanStage::open(bool reOpen) {
_openCallback(_opCtx, _coll->getCollection(), reOpen);
}
- if (auto collection = _coll->getCollection()) {
+ if (const auto& collection = _coll->getCollection()) {
if (_seekKeyAccessor) {
auto [tag, val] = _seekKeyAccessor->getViewOfValue();
const auto msgTag = tag;
@@ -453,7 +453,7 @@ void ParallelScanStage::open(bool reOpen) {
uassertStatusOK(repl::ReplicationCoordinator::get(_opCtx)->checkCanServeReadsFor(
_opCtx, _coll->getNss(), true));
- auto collection = _coll->getCollection();
+ const auto& collection = _coll->getCollection();
if (collection) {
{
diff --git a/src/mongo/db/exec/sbe/stages/scan.h b/src/mongo/db/exec/sbe/stages/scan.h
index 0566c0d4f61..5c071ff32a6 100644
--- a/src/mongo/db/exec/sbe/stages/scan.h
+++ b/src/mongo/db/exec/sbe/stages/scan.h
@@ -37,7 +37,7 @@
namespace mongo {
namespace sbe {
-using ScanOpenCallback = std::function<void(OperationContext*, const Collection*, bool)>;
+using ScanOpenCallback = std::function<void(OperationContext*, const CollectionPtr&, bool)>;
class ScanStage final : public PlanStage {
public:
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index 2679902dd2e..f3196861947 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -42,6 +42,7 @@ namespace mongo {
class CollatorInterface;
class Collection;
+class CollectionPtr;
class WorkingSetMember;
/**
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 06dd3532fdc..856d2d527c4 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -148,14 +148,14 @@ public:
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
- // Need a context to get the actual const Collection*
+ // Need a context to get the actual const CollectionPtr&
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
// Make sure the collection is valid.
- const Collection* collection = autoColl.getCollection();
+ const auto& collection = autoColl.getCollection();
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Couldn't find collection " << nss.ns(),
collection);
@@ -201,7 +201,7 @@ public:
}
PlanStage* parseQuery(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONObj obj,
WorkingSet* workingSet,
const NamespaceString& nss,
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 667a0cecfec..c97c85d361e 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -57,7 +57,7 @@ using std::vector;
const char* SubplanStage::kStageType = "SUBPLAN";
SubplanStage::SubplanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index 6b5d5b7448b..0136954d6a1 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -71,7 +71,7 @@ class OperationContext;
class SubplanStage final : public RequiresAllIndicesStage {
public:
SubplanStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq);
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 2657556af7e..a6f252d2cc3 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -57,7 +57,7 @@ using fts::MAX_WEIGHT;
const char* TextStage::kStageType = "TEXT";
TextStage::TextStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
@@ -96,7 +96,7 @@ const SpecificStats* TextStage::getSpecificStats() const {
}
unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
const MatchExpression* filter,
bool wantTextScore) const {
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 9c0c54baadd..040ecb9f424 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -74,7 +74,7 @@ struct TextStageParams {
class TextStage final : public PlanStage {
public:
TextStage(ExpressionContext* expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter);
@@ -97,7 +97,7 @@ private:
* Helper method to built the query execution plan for the text stage.
*/
std::unique_ptr<PlanStage> buildTextTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
const MatchExpression* filter,
bool wantTextScore) const;
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 75e90e278cc..23e6d0fcbc5 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -56,7 +56,7 @@ TextOrStage::TextOrStage(ExpressionContext* expCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
- const Collection* collection)
+ const CollectionPtr& collection)
: RequiresCollectionStage(kStageType, expCtx, collection),
_ftsSpec(ftsSpec),
_ws(ws),
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 8b57b2f07e7..2e358bf71ed 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -71,7 +71,7 @@ public:
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
- const Collection* collection);
+ const CollectionPtr& collection);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/trial_period_utils.cpp b/src/mongo/db/exec/trial_period_utils.cpp
index 759e41b3577..4369ff40a08 100644
--- a/src/mongo/db/exec/trial_period_utils.cpp
+++ b/src/mongo/db/exec/trial_period_utils.cpp
@@ -34,11 +34,11 @@
#include "mongo/db/catalog/collection.h"
namespace mongo::trial_period {
-size_t getTrialPeriodMaxWorks(OperationContext* opCtx, const Collection* collection) {
+size_t getTrialPeriodMaxWorks(OperationContext* opCtx, const CollectionPtr& collection) {
// Run each plan some number of times. This number is at least as great as
// 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
size_t numWorks = internalQueryPlanEvaluationWorks.load();
- if (nullptr != collection) {
+ if (collection) {
// For large collections, the number of works is set to be this fraction of the collection
// size.
double fraction = internalQueryPlanEvaluationCollFraction;
diff --git a/src/mongo/db/exec/trial_period_utils.h b/src/mongo/db/exec/trial_period_utils.h
index 4919e8be1c9..609d5f3b484 100644
--- a/src/mongo/db/exec/trial_period_utils.h
+++ b/src/mongo/db/exec/trial_period_utils.h
@@ -33,6 +33,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
namespace trial_period {
/**
@@ -40,7 +41,7 @@ namespace trial_period {
*
* Calculated based on a fixed query knob and the size of the collection.
*/
-size_t getTrialPeriodMaxWorks(OperationContext* opCtx, const Collection* collection);
+size_t getTrialPeriodMaxWorks(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Returns the max number of documents which we should allow any plan to return during the
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 3f0340c66b9..5cecfb72314 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -109,7 +109,7 @@ CollectionUpdateArgs::StoreDocOption getStoreDocMode(const UpdateRequest& update
UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child)
: UpdateStage(expCtx, params, ws, collection) {
// We should never reach here if the request is an upsert.
@@ -121,7 +121,7 @@ UpdateStage::UpdateStage(ExpressionContext* expCtx,
UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection)
+ const CollectionPtr& collection)
: RequiresMutableCollectionStage(kStageType.rawData(), expCtx, collection),
_params(params),
_ws(ws),
diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h
index 7fc304f776c..d99fb902854 100644
--- a/src/mongo/db/exec/update_stage.h
+++ b/src/mongo/db/exec/update_stage.h
@@ -86,7 +86,7 @@ public:
UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child);
bool isEOF() override;
@@ -104,7 +104,7 @@ protected:
UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection);
+ const CollectionPtr& collection);
void doSaveStateRequiresCollection() final {}
diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp
index 4bf02125604..4f0fa990c23 100644
--- a/src/mongo/db/exec/upsert_stage.cpp
+++ b/src/mongo/db/exec/upsert_stage.cpp
@@ -51,7 +51,7 @@ const FieldRef idFieldRef(idFieldName);
UpsertStage::UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child)
: UpdateStage(expCtx, params, ws, collection) {
// We should never create this stage for a non-upsert request.
diff --git a/src/mongo/db/exec/upsert_stage.h b/src/mongo/db/exec/upsert_stage.h
index 7735d5b996b..e6d0ab0f0f8 100644
--- a/src/mongo/db/exec/upsert_stage.h
+++ b/src/mongo/db/exec/upsert_stage.h
@@ -54,7 +54,7 @@ public:
UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanStage* child);
bool isEOF() final;
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 301ced6f3a5..b060d57ab45 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -39,7 +39,7 @@
namespace mongo {
namespace write_stage_common {
-bool ensureStillMatches(const Collection* collection,
+bool ensureStillMatches(const CollectionPtr& collection,
OperationContext* opCtx,
WorkingSet* ws,
WorkingSetID id,
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 1d3934443e6..f413745dfa6 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -37,6 +37,7 @@ namespace mongo {
class CanonicalQuery;
class Collection;
+class CollectionPtr;
class OperationContext;
namespace write_stage_common {
@@ -49,7 +50,7 @@ namespace write_stage_common {
* May throw a WriteConflictException if there was a conflict while searching to see if the document
* still exists.
*/
-bool ensureStillMatches(const Collection* collection,
+bool ensureStillMatches(const CollectionPtr& collection,
OperationContext* opCtx,
WorkingSet* ws,
WorkingSetID id,
diff --git a/src/mongo/db/fcv_op_observer.h b/src/mongo/db/fcv_op_observer.h
index 374c4387dc1..1be55f57d07 100644
--- a/src/mongo/db/fcv_op_observer.h
+++ b/src/mongo/db/fcv_op_observer.h
@@ -112,7 +112,7 @@ public:
const boost::optional<repl::OpTime> prevWriteOpTimeInTransaction,
const boost::optional<OplogSlot> slot) final {}
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/free_mon/free_mon_op_observer.h b/src/mongo/db/free_mon/free_mon_op_observer.h
index 34496b448c9..5f6a9b912c0 100644
--- a/src/mongo/db/free_mon/free_mon_op_observer.h
+++ b/src/mongo/db/free_mon/free_mon_op_observer.h
@@ -106,7 +106,7 @@ public:
const boost::optional<OplogSlot> slot) final{};
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
index 0507887132d..f4aaa6a39a8 100644
--- a/src/mongo/db/index/haystack_access_method.h
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -37,6 +37,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
/**
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 18fe86ad0fb..58ce3d6a6c4 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -55,7 +55,7 @@ public:
double maxDistance,
unsigned limit,
const std::string& geoField,
- const Collection* collection)
+ const CollectionPtr& collection)
: _opCtx(opCtx),
_collection(collection),
_near(nearObj),
@@ -87,7 +87,7 @@ public:
private:
OperationContext* _opCtx;
- const Collection* _collection;
+ const CollectionPtr& _collection;
Point _near;
double _maxDistance;
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 1a510c09b49..6080125c723 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -121,7 +121,7 @@ AbstractIndexAccessMethod::AbstractIndexAccessMethod(IndexCatalogEntry* btreeSta
// Find the keys for obj, put them in the tree pointing to loc.
Status AbstractIndexAccessMethod::insert(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -158,7 +158,7 @@ Status AbstractIndexAccessMethod::insert(OperationContext* opCtx,
Status AbstractIndexAccessMethod::insertKeysAndUpdateMultikeyPaths(
OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
@@ -185,7 +185,7 @@ Status AbstractIndexAccessMethod::insertKeysAndUpdateMultikeyPaths(
}
Status AbstractIndexAccessMethod::insertKeys(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -422,7 +422,7 @@ void AbstractIndexAccessMethod::prepareUpdate(OperationContext* opCtx,
}
Status AbstractIndexAccessMethod::update(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const UpdateTicket& ticket,
int64_t* numInserted,
int64_t* numDeleted) {
@@ -775,7 +775,7 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
}
void AbstractIndexAccessMethod::setIndexIsMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
KeyStringSet multikeyMetadataKeys,
MultikeyPaths paths) {
_indexCatalogEntry->setMultikey(opCtx, collection, multikeyMetadataKeys, paths);
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 3d57dcb901a..15d32be7f1b 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -87,7 +87,7 @@ public:
* The behavior of the insertion can be specified through 'options'.
*/
virtual Status insert(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -100,7 +100,7 @@ public:
* multikey in the catalog, and sets the path-level multikey information if applicable.
*/
virtual Status insertKeysAndUpdateMultikeyPaths(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
@@ -114,7 +114,7 @@ public:
* insertion of these keys should cause the index to become multikey.
*/
virtual Status insertKeys(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -155,7 +155,7 @@ public:
* 'numDeleted' will be set to the number of keys removed from the index for the document.
*/
virtual Status update(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const UpdateTicket& ticket,
int64_t* numInserted,
int64_t* numDeleted) = 0;
@@ -215,7 +215,7 @@ public:
* Sets this index as multikey with the provided paths.
*/
virtual void setIndexIsMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
KeyStringSet multikeyMetadataKeys,
MultikeyPaths paths) = 0;
@@ -454,7 +454,7 @@ public:
std::unique_ptr<SortedDataInterface> btree);
Status insert(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -462,7 +462,7 @@ public:
int64_t* numInserted) final;
Status insertKeys(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -470,7 +470,7 @@ public:
int64_t* numInserted) final;
Status insertKeysAndUpdateMultikeyPaths(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
@@ -494,7 +494,7 @@ public:
UpdateTicket* ticket) const final;
Status update(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const UpdateTicket& ticket,
int64_t* numInserted,
int64_t* numDeleted) final;
@@ -520,7 +520,7 @@ public:
Status compact(OperationContext* opCtx) final;
void setIndexIsMultikey(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
KeyStringSet multikeyMetadataKeys,
MultikeyPaths paths) final;
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 6a897960e60..e81ea389524 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -150,7 +150,7 @@ Status IndexBuildInterceptor::checkDuplicateKeyConstraints(OperationContext* opC
}
Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const InsertDeleteOptions& options,
TrackDuplicates trackDuplicates,
DrainYieldPolicy drainYieldPolicy) {
@@ -307,7 +307,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx,
}
Status IndexBuildInterceptor::_applyWrite(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& operation,
const InsertDeleteOptions& options,
TrackDuplicates trackDups,
@@ -531,7 +531,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
}
Status IndexBuildInterceptor::retrySkippedRecords(OperationContext* opCtx,
- const Collection* collection) {
+ const CollectionPtr& collection) {
return _skippedRecordTracker.retrySkippedRecords(opCtx, collection);
}
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 2ea3e61ebfa..2eb571650d9 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -132,7 +132,7 @@ public:
* following the last inserted record from a previous call to drainWritesIntoIndex.
*/
Status drainWritesIntoIndex(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const InsertDeleteOptions& options,
TrackDuplicates trackDups,
DrainYieldPolicy drainYieldPolicy);
@@ -150,7 +150,7 @@ public:
* successful, keys are written directly to the index. Unsuccessful key generation or writes
* will return errors.
*/
- Status retrySkippedRecords(OperationContext* opCtx, const Collection* collection);
+ Status retrySkippedRecords(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Returns 'true' if there are no visible records remaining to be applied from the side writes
@@ -179,7 +179,7 @@ private:
void _initializeMultiKeyPaths(IndexCatalogEntry* entry);
Status _applyWrite(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const BSONObj& doc,
const InsertDeleteOptions& options,
TrackDuplicates trackDups,
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index f998864355e..dec4d7bb7e3 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -99,7 +99,7 @@ constexpr StringData IndexDescriptor::kUniqueFieldName;
constexpr StringData IndexDescriptor::kHiddenFieldName;
constexpr StringData IndexDescriptor::kWeightsFieldName;
-IndexDescriptor::IndexDescriptor(const Collection* collection,
+IndexDescriptor::IndexDescriptor(const CollectionPtr& collection,
const std::string& accessMethodName,
BSONObj infoObj)
: _accessMethodName(accessMethodName),
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index b4710c8e4b8..ad4c17b6cf6 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -43,6 +43,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class IndexCatalogEntry;
class IndexCatalogEntryContainer;
class OperationContext;
@@ -95,7 +96,7 @@ public:
/**
* infoObj is a copy of the index-describing BSONObj contained in the catalog.
*/
- IndexDescriptor(const Collection* collection,
+ IndexDescriptor(const CollectionPtr& collection,
const std::string& accessMethodName,
BSONObj infoObj);
diff --git a/src/mongo/db/index/skipped_record_tracker.cpp b/src/mongo/db/index/skipped_record_tracker.cpp
index 34c1eabf431..1a825164ced 100644
--- a/src/mongo/db/index/skipped_record_tracker.cpp
+++ b/src/mongo/db/index/skipped_record_tracker.cpp
@@ -99,7 +99,7 @@ bool SkippedRecordTracker::areAllRecordsApplied(OperationContext* opCtx) const {
}
Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx,
- const Collection* collection) {
+ const CollectionPtr& collection) {
dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
if (!_skippedRecordsTable) {
return Status::OK();
diff --git a/src/mongo/db/index/skipped_record_tracker.h b/src/mongo/db/index/skipped_record_tracker.h
index 856ebed9197..00112b9fab5 100644
--- a/src/mongo/db/index/skipped_record_tracker.h
+++ b/src/mongo/db/index/skipped_record_tracker.h
@@ -74,7 +74,7 @@ public:
* Attempts to generates keys for each skipped record and insert into the index. Returns OK if
* all records were either indexed or no longer exist.
*/
- Status retrySkippedRecords(OperationContext* opCtx, const Collection* collection);
+ Status retrySkippedRecords(OperationContext* opCtx, const CollectionPtr& collection);
boost::optional<std::string> getTableIdent() const {
return _skippedRecordsTable ? boost::make_optional(_skippedRecordsTable->rs()->getIdent())
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index 7a24324cac8..6b4265e8552 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -60,9 +60,8 @@ Status upsert(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) {
"upsertIndexBuildEntry",
NamespaceString::kIndexBuildEntryNamespace.ns(),
[&]() -> Status {
- AutoGetCollection autoCollection(
+ AutoGetCollection collection(
opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX);
- const Collection* collection = autoCollection.getCollection();
if (!collection) {
str::stream ss;
ss << "Collection not found: "
@@ -116,9 +115,8 @@ Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd
"upsertIndexBuildEntry",
NamespaceString::kIndexBuildEntryNamespace.ns(),
[&]() -> Status {
- AutoGetCollection autoCollection(
+ AutoGetCollection collection(
opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX);
- const Collection* collection = autoCollection.getCollection();
if (!collection) {
str::stream ss;
ss << "Collection not found: "
@@ -142,9 +140,8 @@ Status update(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd
"updateIndexBuildEntry",
NamespaceString::kIndexBuildEntryNamespace.ns(),
[&]() -> Status {
- AutoGetCollection autoCollection(
+ AutoGetCollection collection(
opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX);
- const Collection* collection = autoCollection.getCollection();
if (!collection) {
str::stream ss;
ss << "Collection not found: "
@@ -184,7 +181,7 @@ void ensureIndexBuildEntriesNamespaceExists(OperationContext* opCtx) {
opCtx, NamespaceString::kIndexBuildEntryNamespace)) {
WriteUnitOfWork wuow(opCtx);
CollectionOptions defaultCollectionOptions;
- const Collection* collection =
+ CollectionPtr collection =
db->createCollection(opCtx,
NamespaceString::kIndexBuildEntryNamespace,
defaultCollectionOptions);
@@ -223,9 +220,8 @@ Status addIndexBuildEntry(OperationContext* opCtx, const IndexBuildEntry& indexB
"addIndexBuildEntry",
NamespaceString::kIndexBuildEntryNamespace.ns(),
[&]() -> Status {
- AutoGetCollection autoCollection(
+ AutoGetCollection collection(
opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX);
- const Collection* collection = autoCollection.getCollection();
if (!collection) {
str::stream ss;
ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns();
@@ -257,17 +253,18 @@ Status removeIndexBuildEntry(OperationContext* opCtx, UUID indexBuildUUID) {
"removeIndexBuildEntry",
NamespaceString::kIndexBuildEntryNamespace.ns(),
[&]() -> Status {
- AutoGetCollection autoCollection(
+ AutoGetCollection collection(
opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX);
- const Collection* collection = autoCollection.getCollection();
if (!collection) {
str::stream ss;
ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns();
return Status(ErrorCodes::NamespaceNotFound, ss);
}
- RecordId rid = Helpers::findOne(
- opCtx, collection, BSON("_id" << indexBuildUUID), /*requireIndex=*/true);
+ RecordId rid = Helpers::findOne(opCtx,
+ collection.getCollection(),
+ BSON("_id" << indexBuildUUID),
+ /*requireIndex=*/true);
if (rid.isNull()) {
str::stream ss;
ss << "No matching IndexBuildEntry found with indexBuildUUID: " << indexBuildUUID;
@@ -286,8 +283,7 @@ StatusWith<IndexBuildEntry> getIndexBuildEntry(OperationContext* opCtx, UUID ind
// Read the most up to date data.
invariant(RecoveryUnit::ReadSource::kNoTimestamp ==
opCtx->recoveryUnit()->getTimestampReadSource());
- AutoGetCollectionForRead autoCollection(opCtx, NamespaceString::kIndexBuildEntryNamespace);
- const Collection* collection = autoCollection.getCollection();
+ AutoGetCollectionForRead collection(opCtx, NamespaceString::kIndexBuildEntryNamespace);
// Must not be interruptible. This fail point is used to test the scenario where the index
// build's OperationContext is interrupted by an abort, which will subsequently remove index
@@ -301,8 +297,11 @@ StatusWith<IndexBuildEntry> getIndexBuildEntry(OperationContext* opCtx, UUID ind
}
BSONObj obj;
- bool foundObj = Helpers::findOne(
- opCtx, collection, BSON("_id" << indexBuildUUID), obj, /*requireIndex=*/true);
+ bool foundObj = Helpers::findOne(opCtx,
+ collection.getCollection(),
+ BSON("_id" << indexBuildUUID),
+ obj,
+ /*requireIndex=*/true);
if (!foundObj) {
str::stream ss;
ss << "No matching IndexBuildEntry found with indexBuildUUID: " << indexBuildUUID;
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 07e79bd8dd4..0dce52b94d6 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -118,7 +118,7 @@ void checkShardKeyRestrictions(OperationContext* opCtx,
* bypass the index build registration.
*/
bool shouldBuildIndexesOnEmptyCollectionSinglePhased(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
IndexBuildProtocol protocol) {
const auto& nss = collection->ns();
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X), str::stream() << nss);
@@ -1436,7 +1436,7 @@ void IndexBuildsCoordinator::_completeSelfAbort(OperationContext* opCtx,
void IndexBuildsCoordinator::_completeAbortForShutdown(
OperationContext* opCtx,
std::shared_ptr<ReplIndexBuildState> replState,
- const Collection* collection) {
+ const CollectionPtr& collection) {
// Leave it as-if kill -9 happened. Startup recovery will restart the index build.
auto isResumable = !replState->lastOpTimeBeforeInterceptors.isNull();
_indexBuildsManager.abortIndexBuildWithoutCleanupForShutdown(
@@ -1984,8 +1984,7 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx,
// AutoGetCollection throws an exception if it is unable to look up the collection by UUID.
NamespaceStringOrUUID nssOrUuid{dbName.toString(), collectionUUID};
- AutoGetCollection autoColl(opCtx, nssOrUuid, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nssOrUuid, MODE_X);
const auto& nss = collection->ns();
// Disallow index builds on drop-pending namespaces (system.drop.*) if we are primary.
@@ -2008,7 +2007,7 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx,
std::vector<BSONObj> filteredSpecs;
try {
- filteredSpecs = prepareSpecListForCreate(opCtx, collection, nss, specs);
+ filteredSpecs = prepareSpecListForCreate(opCtx, collection.getCollection(), nss, specs);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -2017,16 +2016,17 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx,
// The requested index (specs) are already built or are being built. Return success
// early (this is v4.0 behavior compatible).
ReplIndexBuildState::IndexCatalogStats indexCatalogStats;
- int numIndexes = getNumIndexesTotal(opCtx, collection);
+ int numIndexes = getNumIndexesTotal(opCtx, collection.getCollection());
indexCatalogStats.numIndexesBefore = numIndexes;
indexCatalogStats.numIndexesAfter = numIndexes;
return SharedSemiFuture(indexCatalogStats);
}
// Bypass the thread pool if we are building indexes on an empty collection.
- if (shouldBuildIndexesOnEmptyCollectionSinglePhased(opCtx, collection, protocol)) {
+ if (shouldBuildIndexesOnEmptyCollectionSinglePhased(
+ opCtx, collection.getCollection(), protocol)) {
ReplIndexBuildState::IndexCatalogStats indexCatalogStats;
- indexCatalogStats.numIndexesBefore = getNumIndexesTotal(opCtx, collection);
+ indexCatalogStats.numIndexesBefore = getNumIndexesTotal(opCtx, collection.getCollection());
try {
// Replicate this index build using the old-style createIndexes oplog entry to avoid
// timestamping issues that would result from this empty collection optimization on a
@@ -2044,13 +2044,14 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx,
ex.addContext(str::stream() << "index build on empty collection failed: " << buildUUID);
return ex.toStatus();
}
- indexCatalogStats.numIndexesAfter = getNumIndexesTotal(opCtx, collection);
+ indexCatalogStats.numIndexesAfter = getNumIndexesTotal(opCtx, collection.getCollection());
return SharedSemiFuture(indexCatalogStats);
}
auto replIndexBuildState = std::make_shared<ReplIndexBuildState>(
buildUUID, collectionUUID, dbName.toString(), filteredSpecs, protocol);
- replIndexBuildState->stats.numIndexesBefore = getNumIndexesTotal(opCtx, collection);
+ replIndexBuildState->stats.numIndexesBefore =
+ getNumIndexesTotal(opCtx, collection.getCollection());
auto status = _registerIndexBuild(lk, replIndexBuildState);
if (!status.isOK()) {
@@ -2317,7 +2318,7 @@ void runOnAlternateContext(OperationContext* opCtx, std::string name, Func func)
void IndexBuildsCoordinator::_cleanUpSinglePhaseAfterFailure(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::shared_ptr<ReplIndexBuildState> replState,
const IndexBuildOptions& indexBuildOptions,
const Status& status) {
@@ -2345,7 +2346,7 @@ void IndexBuildsCoordinator::_cleanUpSinglePhaseAfterFailure(
void IndexBuildsCoordinator::_cleanUpTwoPhaseAfterFailure(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::shared_ptr<ReplIndexBuildState> replState,
const IndexBuildOptions& indexBuildOptions,
const Status& status) {
@@ -2446,12 +2447,11 @@ void IndexBuildsCoordinator::_runIndexBuildInner(
status.isA<ErrorCategory::ShutdownError>(),
str::stream() << "Unexpected error code during index build cleanup: " << status);
if (IndexBuildProtocol::kSinglePhase == replState->protocol) {
- _cleanUpSinglePhaseAfterFailure(
- opCtx, collection.get(), replState, indexBuildOptions, status);
+ _cleanUpSinglePhaseAfterFailure(opCtx, collection, replState, indexBuildOptions, status);
} else {
invariant(IndexBuildProtocol::kTwoPhase == replState->protocol,
str::stream() << replState->buildUUID);
- _cleanUpTwoPhaseAfterFailure(opCtx, collection.get(), replState, indexBuildOptions, status);
+ _cleanUpTwoPhaseAfterFailure(opCtx, collection, replState, indexBuildOptions, status);
}
// Any error that escapes at this point is not fatal and can be handled by the caller.
@@ -2642,7 +2642,7 @@ void IndexBuildsCoordinator::_insertSortedKeysIntoIndexForResume(
}
}
-const Collection* IndexBuildsCoordinator::_setUpForScanCollectionAndInsertSortedKeysIntoIndex(
+CollectionPtr IndexBuildsCoordinator::_setUpForScanCollectionAndInsertSortedKeysIntoIndex(
OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState) {
// Rebuilding system indexes during startup using the IndexBuildsCoordinator is done by all
// storage engines if they're missing.
@@ -3029,7 +3029,7 @@ std::vector<std::shared_ptr<ReplIndexBuildState>> IndexBuildsCoordinator::_filte
}
int IndexBuildsCoordinator::getNumIndexesTotal(OperationContext* opCtx,
- const Collection* collection) {
+ const CollectionPtr& collection) {
invariant(collection);
const auto& nss = collection->ns();
invariant(opCtx->lockState()->isLocked(),
@@ -3044,7 +3044,7 @@ int IndexBuildsCoordinator::getNumIndexesTotal(OperationContext* opCtx,
std::vector<BSONObj> IndexBuildsCoordinator::prepareSpecListForCreate(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
const std::vector<BSONObj>& indexSpecs) {
UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx,
@@ -3077,7 +3077,9 @@ std::vector<BSONObj> IndexBuildsCoordinator::prepareSpecListForCreate(
}
std::vector<BSONObj> IndexBuildsCoordinator::normalizeIndexSpecs(
- OperationContext* opCtx, const Collection* collection, const std::vector<BSONObj>& indexSpecs) {
+ OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const std::vector<BSONObj>& indexSpecs) {
// This helper function may be called before the collection is created, when we are attempting
// to check whether the candidate index collides with any existing indexes. If 'collection' is
// nullptr, skip normalization. Since the collection does not exist there cannot be a conflict,
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index 49388122054..5aff6eaa608 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -434,7 +434,7 @@ public:
* This function throws on error. Expects caller to have exclusive access to `collection`.
*/
static std::vector<BSONObj> prepareSpecListForCreate(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
const std::vector<BSONObj>& indexSpecs);
@@ -451,7 +451,7 @@ public:
* This function throws on error.
*/
static std::vector<BSONObj> normalizeIndexSpecs(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecs);
/**
@@ -461,7 +461,7 @@ public:
*
* Expects a lock to be held by the caller, so that 'collection' is safe to use.
*/
- static int getNumIndexesTotal(OperationContext* opCtx, const Collection* collection);
+ static int getNumIndexesTotal(OperationContext* opCtx, const CollectionPtr& collection);
/**
@@ -597,7 +597,7 @@ protected:
* Cleans up a single-phase index build after a failure.
*/
void _cleanUpSinglePhaseAfterFailure(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::shared_ptr<ReplIndexBuildState> replState,
const IndexBuildOptions& indexBuildOptions,
const Status& status);
@@ -606,7 +606,7 @@ protected:
* Cleans up a two-phase index build after a failure.
*/
void _cleanUpTwoPhaseAfterFailure(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::shared_ptr<ReplIndexBuildState> replState,
const IndexBuildOptions& indexBuildOptions,
const Status& status);
@@ -631,7 +631,7 @@ protected:
Status reason);
void _completeAbortForShutdown(OperationContext* opCtx,
std::shared_ptr<ReplIndexBuildState> replState,
- const Collection* collection);
+ const CollectionPtr& collection);
/**
* Waits for the last optime before the interceptors were installed on the node to be majority
@@ -661,7 +661,7 @@ protected:
*/
void _insertSortedKeysIntoIndexForResume(OperationContext* opCtx,
std::shared_ptr<ReplIndexBuildState> replState);
- const Collection* _setUpForScanCollectionAndInsertSortedKeysIntoIndex(
+ CollectionPtr _setUpForScanCollectionAndInsertSortedKeysIntoIndex(
OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState);
/**
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 89627a1f42e..d65581979a5 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -786,8 +786,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
<< nss << "' without providing any indexes.");
}
- AutoGetCollectionForRead autoColl(opCtx, nss);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx, nss);
if (!collection) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Collection '" << nss << "' was not found.");
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 95e8f4843fd..0a6dab11e7c 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -130,8 +130,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
EnforcePrepareConflictsBlock enforcePrepare(opCtx);
uassertStatusOK(createProfileCollection(opCtx, db));
- const Collection* const coll =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, dbProfilingNS);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, dbProfilingNS);
invariant(!opCtx->shouldParticipateInFlowControl());
WriteUnitOfWork wuow(opCtx);
@@ -160,7 +159,7 @@ Status createProfileCollection(OperationContext* opCtx, Database* db) {
// collection creation would endlessly throw errors because the collection exists: must check
// and see the collection exists in order to break free.
return writeConflictRetry(opCtx, "createProfileCollection", dbProfilingNS.ns(), [&] {
- const Collection* const collection =
+ const CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, dbProfilingNS);
if (collection) {
if (!collection->isCapped()) {
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index c2ea279d6b6..3f72bafb102 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -66,7 +66,7 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx,
<< nss.ns() << "')",
db);
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
uassert(ErrorCodes::IndexNotFound,
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 5a09ab036b5..07629f33788 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -257,7 +257,7 @@ void logStartup(OperationContext* opCtx) {
Lock::GlobalWrite lk(opCtx);
AutoGetOrCreateDb autoDb(opCtx, startupLogCollectionName.db(), mongo::MODE_X);
Database* db = autoDb.getDb();
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, startupLogCollectionName);
WriteUnitOfWork wunit(opCtx);
if (!collection) {
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index 49c4123049d..7b435bc80d1 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -182,7 +182,7 @@ public:
}
virtual void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index f0041e43a75..bb187fc2149 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -664,7 +664,7 @@ void OpObserverImpl::onInternalOpMessage(
}
void OpObserverImpl::onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
@@ -736,7 +736,8 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
if (!db) {
return;
}
- const Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
+ const CollectionPtr& coll =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
invariant(coll->uuid() == uuid);
invariant(DurableCatalog::get(opCtx)->isEqualToMetadataUUID(opCtx, coll->getCatalogId(), uuid));
diff --git a/src/mongo/db/op_observer_impl.h b/src/mongo/db/op_observer_impl.h
index 1e73732c1c9..70f348382b6 100644
--- a/src/mongo/db/op_observer_impl.h
+++ b/src/mongo/db/op_observer_impl.h
@@ -118,7 +118,7 @@ public:
const boost::optional<repl::OpTime> prevWriteOpTimeInTransaction,
const boost::optional<OplogSlot> slot) final;
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/op_observer_noop.h b/src/mongo/db/op_observer_noop.h
index 0a90da9d0d1..4c4429482d8 100644
--- a/src/mongo/db/op_observer_noop.h
+++ b/src/mongo/db/op_observer_noop.h
@@ -92,7 +92,7 @@ public:
const boost::optional<repl::OpTime> prevWriteOpTimeInTransaction,
const boost::optional<OplogSlot> slot) override {}
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/op_observer_registry.h b/src/mongo/db/op_observer_registry.h
index 029393ef199..ed34acbe127 100644
--- a/src/mongo/db/op_observer_registry.h
+++ b/src/mongo/db/op_observer_registry.h
@@ -172,7 +172,7 @@ public:
}
void onCreateCollection(OperationContext* const opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 14b81cc207b..a546a57ea76 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -41,7 +41,7 @@
namespace mongo {
long long deleteObjects(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& ns,
BSONObj pattern,
bool justOne,
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 58eebd434d7..a239b336ab3 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -44,7 +44,7 @@ class OperationContext;
* not yield. If 'god' is true, deletes are allowed on system namespaces.
*/
long long deleteObjects(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& ns,
BSONObj pattern,
bool justOne,
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index da11e24b03d..899dbac9713 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -62,7 +62,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
const NamespaceString& nsString = request.getNamespaceString();
invariant(opCtx->lockState()->isCollectionLockedForMode(nsString, MODE_IX));
- const Collection* collection;
+ CollectionPtr collection;
// The update stage does not create its own collection. As such, if the update is
// an upsert, create the collection that the update stage inserts into beforehand.
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 29d07007956..d0cdf8a29cd 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -281,7 +281,7 @@ bool handleError(OperationContext* opCtx,
}
void insertDocuments(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::vector<InsertStatement>::iterator begin,
std::vector<InsertStatement>::iterator end,
bool fromMigrate) {
@@ -336,7 +336,7 @@ void insertDocuments(OperationContext* opCtx,
* they only allow one operation at a time because they enforce insertion order with a MODE_X
* collection lock, which we cannot hold in transactions.
*/
-Status checkIfTransactionOnCappedColl(OperationContext* opCtx, const Collection* collection) {
+Status checkIfTransactionOnCappedColl(OperationContext* opCtx, const CollectionPtr& collection) {
if (opCtx->inMultiDocumentTransaction() && collection->isCapped()) {
return {ErrorCodes::OperationNotSupportedInTransaction,
str::stream() << "Collection '" << collection->ns()
@@ -649,7 +649,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
makeCollection(opCtx, ns);
}
- if (auto coll = collection->getCollection()) {
+ if (const auto& coll = collection->getCollection()) {
// Transactions are not allowed to operate on capped collections.
uassertStatusOK(checkIfTransactionOnCappedColl(opCtx, coll));
}
@@ -677,7 +677,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
PlanSummaryStats summary;
exec->getSummaryStats(&summary);
- if (auto coll = collection->getCollection()) {
+ if (const auto& coll = collection->getCollection()) {
CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summary);
}
@@ -915,7 +915,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
PlanSummaryStats summary;
exec->getSummaryStats(&summary);
- if (auto coll = collection.getCollection()) {
+ if (const auto& coll = collection.getCollection()) {
CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summary);
}
curOp.debug().setPlanSummaryMetrics(summary);
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index ba6151d341a..a651b387ce4 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -142,7 +142,7 @@ void DocumentSourceCursor::loadBatch() {
->checkCanServeReadsFor(pExpCtx->opCtx, _exec->nss(), true));
}
- _exec->restoreState();
+ _exec->restoreState(autoColl ? &autoColl->getCollection() : nullptr);
try {
ON_BLOCK_EXIT([this] { recordPlanSummaryStats(); });
@@ -285,7 +285,7 @@ DocumentSourceCursor::~DocumentSourceCursor() {
}
DocumentSourceCursor::DocumentSourceCursor(
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const intrusive_ptr<ExpressionContext>& pCtx,
CursorType cursorType,
@@ -316,7 +316,7 @@ DocumentSourceCursor::DocumentSourceCursor(
}
intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const intrusive_ptr<ExpressionContext>& pExpCtx,
CursorType cursorType,
diff --git a/src/mongo/db/pipeline/document_source_cursor.h b/src/mongo/db/pipeline/document_source_cursor.h
index a7794df2e2c..4ee177593c5 100644
--- a/src/mongo/db/pipeline/document_source_cursor.h
+++ b/src/mongo/db/pipeline/document_source_cursor.h
@@ -63,7 +63,7 @@ public:
* $cursor stage can return a sequence of empty documents for the caller to count.
*/
static boost::intrusive_ptr<DocumentSourceCursor> create(
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
CursorType cursorType,
@@ -112,7 +112,7 @@ public:
}
protected:
- DocumentSourceCursor(const Collection* collection,
+ DocumentSourceCursor(const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
CursorType cursorType,
diff --git a/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp b/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp
index 4277bd26424..8e6885567d0 100644
--- a/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp
@@ -51,7 +51,7 @@
namespace mongo {
boost::intrusive_ptr<DocumentSourceGeoNearCursor> DocumentSourceGeoNearCursor::create(
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
FieldPath distanceField,
@@ -66,7 +66,7 @@ boost::intrusive_ptr<DocumentSourceGeoNearCursor> DocumentSourceGeoNearCursor::c
}
DocumentSourceGeoNearCursor::DocumentSourceGeoNearCursor(
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
FieldPath distanceField,
diff --git a/src/mongo/db/pipeline/document_source_geo_near_cursor.h b/src/mongo/db/pipeline/document_source_geo_near_cursor.h
index f8d3b483914..b3ddf9a834b 100644
--- a/src/mongo/db/pipeline/document_source_geo_near_cursor.h
+++ b/src/mongo/db/pipeline/document_source_geo_near_cursor.h
@@ -60,7 +60,7 @@ public:
* nonnegative.
*/
static boost::intrusive_ptr<DocumentSourceGeoNearCursor> create(
- const Collection*,
+ const CollectionPtr&,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>,
const boost::intrusive_ptr<ExpressionContext>&,
FieldPath distanceField,
@@ -70,7 +70,7 @@ public:
const char* getSourceName() const final;
private:
- DocumentSourceGeoNearCursor(const Collection*,
+ DocumentSourceGeoNearCursor(const CollectionPtr&,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>,
const boost::intrusive_ptr<ExpressionContext>&,
FieldPath distanceField,
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index c7b8bd273ec..6d0406f7495 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -99,7 +99,7 @@ namespace {
* storage engine support for random cursors.
*/
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorExecutor(
- const Collection* coll,
+ const CollectionPtr& coll,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
long long sampleSize,
long long numRecords,
@@ -187,7 +187,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExecutor(
const intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
BSONObj queryObj,
BSONObj projectionObj,
@@ -273,7 +273,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
*
* The 'collection' is required to exist. Throws if no usable 2d or 2dsphere index could be found.
*/
-StringData extractGeoNearFieldFromIndexes(OperationContext* opCtx, const Collection* collection) {
+StringData extractGeoNearFieldFromIndexes(OperationContext* opCtx,
+ const CollectionPtr& collection) {
invariant(collection);
std::vector<const IndexDescriptor*> idxs;
@@ -313,7 +314,7 @@ StringData extractGeoNearFieldFromIndexes(OperationContext* opCtx, const Collect
} // namespace
std::pair<PipelineD::AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
-PipelineD::buildInnerQueryExecutor(const Collection* collection,
+PipelineD::buildInnerQueryExecutor(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline) {
@@ -348,7 +349,7 @@ PipelineD::buildInnerQueryExecutor(const Collection* collection,
? DocumentSourceCursor::CursorType::kEmptyDocuments
: DocumentSourceCursor::CursorType::kRegular;
auto attachExecutorCallback =
- [cursorType](const Collection* collection,
+ [cursorType](const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
Pipeline* pipeline) {
auto cursor = DocumentSourceCursor::create(
@@ -372,7 +373,7 @@ PipelineD::buildInnerQueryExecutor(const Collection* collection,
}
void PipelineD::attachInnerQueryExecutorToPipeline(
- const Collection* collection,
+ const CollectionPtr& collection,
PipelineD::AttachExecutorCallback attachExecutorCallback,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
Pipeline* pipeline) {
@@ -384,7 +385,7 @@ void PipelineD::attachInnerQueryExecutorToPipeline(
}
}
-void PipelineD::buildAndAttachInnerQueryExecutorToPipeline(const Collection* collection,
+void PipelineD::buildAndAttachInnerQueryExecutorToPipeline(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline) {
@@ -483,7 +484,7 @@ auto buildProjectionForPushdown(const DepsTracker& deps, Pipeline* pipeline) {
} // namespace
std::pair<PipelineD::AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
-PipelineD::buildInnerQueryExecutorGeneric(const Collection* collection,
+PipelineD::buildInnerQueryExecutorGeneric(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline) {
@@ -561,7 +562,7 @@ PipelineD::buildInnerQueryExecutorGeneric(const Collection* collection,
(pipeline->peekFront() && pipeline->peekFront()->constraints().isChangeStreamStage());
auto attachExecutorCallback =
- [cursorType, trackOplogTS](const Collection* collection,
+ [cursorType, trackOplogTS](const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
Pipeline* pipeline) {
auto cursor = DocumentSourceCursor::create(
@@ -572,7 +573,7 @@ PipelineD::buildInnerQueryExecutorGeneric(const Collection* collection,
}
std::pair<PipelineD::AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
-PipelineD::buildInnerQueryExecutorGeoNear(const Collection* collection,
+PipelineD::buildInnerQueryExecutorGeoNear(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline) {
@@ -616,7 +617,7 @@ PipelineD::buildInnerQueryExecutorGeoNear(const Collection* collection,
locationField = geoNearStage->getLocationField(),
distanceMultiplier =
geoNearStage->getDistanceMultiplier().value_or(1.0)](
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
Pipeline* pipeline) {
auto cursor = DocumentSourceGeoNearCursor::create(collection,
@@ -634,7 +635,7 @@ PipelineD::buildInnerQueryExecutorGeoNear(const Collection* collection,
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> PipelineD::prepareExecutor(
const intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
Pipeline* pipeline,
const boost::intrusive_ptr<DocumentSourceSort>& sortStage,
diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h
index ec1baacf6fd..96a2b83c57c 100644
--- a/src/mongo/db/pipeline/pipeline_d.h
+++ b/src/mongo/db/pipeline/pipeline_d.h
@@ -43,6 +43,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class DocumentSourceCursor;
class DocumentSourceMatch;
class DocumentSourceSort;
@@ -67,7 +68,7 @@ public:
* the new stage to the pipeline.
*/
using AttachExecutorCallback = std::function<void(
- const Collection*, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>, Pipeline*)>;
+ const CollectionPtr&, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>, Pipeline*)>;
/**
* This method looks for early pipeline stages that can be folded into the underlying
@@ -88,7 +89,7 @@ public:
* 'nullptr'.
*/
static std::pair<AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
- buildInnerQueryExecutor(const Collection* collection,
+ buildInnerQueryExecutor(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline);
@@ -101,7 +102,7 @@ public:
* 'nullptr'.
*/
static void attachInnerQueryExecutorToPipeline(
- const Collection* collection,
+ const CollectionPtr& collection,
AttachExecutorCallback attachExecutorCallback,
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
Pipeline* pipeline);
@@ -112,7 +113,7 @@ public:
* used when the executor attachment phase doesn't need to be deferred and the $cursor stage
* can be created right after buiding the executor.
*/
- static void buildAndAttachInnerQueryExecutorToPipeline(const Collection* collection,
+ static void buildAndAttachInnerQueryExecutorToPipeline(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline);
@@ -130,7 +131,7 @@ public:
*/
static std::unique_ptr<CollatorInterface> resolveCollator(OperationContext* opCtx,
BSONObj userCollation,
- const Collection* collection) {
+ const CollectionPtr& collection) {
if (!userCollation.isEmpty()) {
return uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(userCollation));
@@ -149,7 +150,7 @@ private:
* the 'pipeline'.
*/
static std::pair<AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
- buildInnerQueryExecutorGeneric(const Collection* collection,
+ buildInnerQueryExecutorGeneric(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline);
@@ -160,7 +161,7 @@ private:
* not exist, as the $geoNearCursor requires a 2d or 2dsphere index.
*/
static std::pair<AttachExecutorCallback, std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
- buildInnerQueryExecutorGeoNear(const Collection* collection,
+ buildInnerQueryExecutorGeoNear(const CollectionPtr& collection,
const NamespaceString& nss,
const AggregationRequest* aggRequest,
Pipeline* pipeline);
@@ -179,7 +180,7 @@ private:
*/
static StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> prepareExecutor(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const NamespaceString& nss,
Pipeline* pipeline,
const boost::intrusive_ptr<DocumentSourceSort>& sortStage,
diff --git a/src/mongo/db/pipeline/plan_executor_pipeline.h b/src/mongo/db/pipeline/plan_executor_pipeline.h
index 54a66979bfc..72d61471720 100644
--- a/src/mongo/db/pipeline/plan_executor_pipeline.h
+++ b/src/mongo/db/pipeline/plan_executor_pipeline.h
@@ -62,7 +62,7 @@ public:
// underlying data access plan is saved/restored internally in between DocumentSourceCursor
// batches, or when the underlying PlanStage tree yields.
void saveState() override {}
- void restoreState() override {}
+ void restoreState(const Yieldable* yieldable) override {}
void detachFromOperationContext() override {
_pipeline->detachFromOperationContext();
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index 35a3c11acad..7e795c181a0 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -156,9 +156,8 @@ std::vector<Document> CommonMongodProcessInterface::getIndexStats(OperationConte
const NamespaceString& ns,
StringData host,
bool addShardName) {
- AutoGetCollectionForReadCommand autoColl(opCtx, ns);
+ AutoGetCollectionForReadCommand collection(opCtx, ns);
- const Collection* collection = autoColl.getCollection();
std::vector<Document> indexStats;
if (!collection) {
LOGV2_DEBUG(23881,
@@ -227,15 +226,13 @@ Status CommonMongodProcessInterface::appendRecordCount(OperationContext* opCtx,
Status CommonMongodProcessInterface::appendQueryExecStats(OperationContext* opCtx,
const NamespaceString& nss,
BSONObjBuilder* builder) const {
- AutoGetCollectionForReadCommand autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
- if (!autoColl.getDb()) {
+ if (!collection.getDb()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Database [" << nss.db().toString() << "] not found."};
}
- const Collection* collection = autoColl.getCollection();
-
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Collection [" << nss.toString() << "] not found."};
@@ -261,12 +258,11 @@ Status CommonMongodProcessInterface::appendQueryExecStats(OperationContext* opCt
BSONObj CommonMongodProcessInterface::getCollectionOptions(OperationContext* opCtx,
const NamespaceString& nss) {
- AutoGetCollectionForReadCommand autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
BSONObj collectionOptions = {};
- if (!autoColl.getDb()) {
+ if (!collection.getDb()) {
return collectionOptions;
}
- const Collection* collection = autoColl.getCollection();
if (!collection) {
return collectionOptions;
}
@@ -412,12 +408,11 @@ std::vector<BSONObj> CommonMongodProcessInterface::getMatchingPlanCacheEntryStat
return !matchExp ? true : matchExp->matchesBSON(obj);
};
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- const auto collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_IS);
uassert(
50933, str::stream() << "collection '" << nss.toString() << "' does not exist", collection);
- const auto planCache = CollectionQueryInfo::get(collection).getPlanCache();
+ const auto planCache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
invariant(planCache);
return planCache->getMatchingStats(serializer, predicate);
diff --git a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
index 537d89749d1..5d085033f96 100644
--- a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
@@ -116,7 +116,7 @@ void NonShardServerProcessInterface::createIndexesOnEmptyCollection(
str::stream() << "The database is in the process of being dropped " << ns.db(),
autoColl.getDb() && !autoColl.getDb()->isDropPending(opCtx));
- auto collection = autoColl.getCollection();
+ const auto& collection = autoColl.getCollection();
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to create indexes for aggregation because collection "
"does not exist: "
diff --git a/src/mongo/db/query/classic_stage_builder.h b/src/mongo/db/query/classic_stage_builder.h
index bdf68d4677b..1c63e6714b5 100644
--- a/src/mongo/db/query/classic_stage_builder.h
+++ b/src/mongo/db/query/classic_stage_builder.h
@@ -39,7 +39,7 @@ namespace mongo::stage_builder {
class ClassicStageBuilder : public StageBuilder<PlanStage> {
public:
ClassicStageBuilder(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
WorkingSet* ws)
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 46f9422e759..38df12525f7 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -87,7 +87,7 @@ const UpdateIndexData& CollectionQueryInfo::getIndexKeys(OperationContext* opCtx
return _indexedPaths;
}
-void CollectionQueryInfo::computeIndexKeys(OperationContext* opCtx, const Collection* coll) {
+void CollectionQueryInfo::computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll) {
_indexedPaths.clear();
std::unique_ptr<IndexCatalog::IndexIterator> it =
@@ -160,7 +160,7 @@ void CollectionQueryInfo::computeIndexKeys(OperationContext* opCtx, const Collec
}
void CollectionQueryInfo::notifyOfQuery(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const PlanSummaryStats& summaryStats) const {
auto& collectionIndexUsageTracker =
CollectionIndexUsageTrackerDecoration::get(coll->getSharedDecorations());
@@ -181,7 +181,7 @@ void CollectionQueryInfo::notifyOfQuery(OperationContext* opCtx,
}
}
-void CollectionQueryInfo::clearQueryCache(const Collection* coll) const {
+void CollectionQueryInfo::clearQueryCache(const CollectionPtr& coll) const {
LOGV2_DEBUG(20907,
1,
"Clearing plan cache - collection info cache reset",
@@ -196,7 +196,7 @@ PlanCache* CollectionQueryInfo::getPlanCache() const {
}
void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx,
- const Collection* coll) {
+ const CollectionPtr& coll) {
std::vector<CoreIndexInfo> indexCores;
// TODO We shouldn't need to include unfinished indexes, but we must here because the index
@@ -212,7 +212,7 @@ void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx,
_planCache->notifyOfIndexUpdates(indexCores);
}
-void CollectionQueryInfo::init(OperationContext* opCtx, const Collection* coll) {
+void CollectionQueryInfo::init(OperationContext* opCtx, const CollectionPtr& coll) {
const bool includeUnfinishedIndexes = false;
std::unique_ptr<IndexCatalog::IndexIterator> ii =
coll->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
@@ -226,7 +226,7 @@ void CollectionQueryInfo::init(OperationContext* opCtx, const Collection* coll)
}
void CollectionQueryInfo::addedIndex(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* desc) {
invariant(desc);
@@ -236,14 +236,14 @@ void CollectionQueryInfo::addedIndex(OperationContext* opCtx,
}
void CollectionQueryInfo::droppedIndex(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
StringData indexName) {
rebuildIndexData(opCtx, coll);
CollectionIndexUsageTrackerDecoration::get(coll->getSharedDecorations())
.unregisterIndex(indexName);
}
-void CollectionQueryInfo::rebuildIndexData(OperationContext* opCtx, const Collection* coll) {
+void CollectionQueryInfo::rebuildIndexData(OperationContext* opCtx, const CollectionPtr& coll) {
clearQueryCache(coll);
_keysComputed = false;
diff --git a/src/mongo/db/query/collection_query_info.h b/src/mongo/db/query/collection_query_info.h
index a9fda7742ed..1175dab7837 100644
--- a/src/mongo/db/query/collection_query_info.h
+++ b/src/mongo/db/query/collection_query_info.h
@@ -48,7 +48,14 @@ class CollectionQueryInfo {
public:
CollectionQueryInfo();
- inline static const auto get = Collection::declareDecoration<CollectionQueryInfo>();
+ inline static const auto getCollectionQueryInfo =
+ Collection::declareDecoration<CollectionQueryInfo>();
+ static const CollectionQueryInfo& get(const CollectionPtr& collection) {
+ return CollectionQueryInfo::getCollectionQueryInfo(collection.get());
+ }
+ static CollectionQueryInfo& get(Collection* collection) {
+ return CollectionQueryInfo::getCollectionQueryInfo(collection);
+ }
/**
* Get the PlanCache for this collection.
@@ -63,7 +70,7 @@ public:
/**
* Builds internal cache state based on the current state of the Collection's IndexCatalog.
*/
- void init(OperationContext* opCtx, const Collection* coll);
+ void init(OperationContext* opCtx, const CollectionPtr& coll);
/**
* Register a newly-created index with the cache. Must be called whenever an index is
@@ -71,7 +78,9 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void addedIndex(OperationContext* opCtx, const Collection* coll, const IndexDescriptor* desc);
+ void addedIndex(OperationContext* opCtx,
+ const CollectionPtr& coll,
+ const IndexDescriptor* desc);
/**
* Deregister a newly-dropped index with the cache. Must be called whenever an index is
@@ -79,26 +88,26 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void droppedIndex(OperationContext* opCtx, const Collection* coll, StringData indexName);
+ void droppedIndex(OperationContext* opCtx, const CollectionPtr& coll, StringData indexName);
/**
* Removes all cached query plans.
*/
- void clearQueryCache(const Collection* coll) const;
+ void clearQueryCache(const CollectionPtr& coll) const;
void notifyOfQuery(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const PlanSummaryStats& summaryStats) const;
private:
- void computeIndexKeys(OperationContext* opCtx, const Collection* coll);
- void updatePlanCacheIndexEntries(OperationContext* opCtx, const Collection* coll);
+ void computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll);
+ void updatePlanCacheIndexEntries(OperationContext* opCtx, const CollectionPtr& coll);
/**
* Rebuilds cached information that is dependent on index composition. Must be called
* when index composition changes.
*/
- void rebuildIndexData(OperationContext* opCtx, const Collection* coll);
+ void rebuildIndexData(OperationContext* opCtx, const CollectionPtr& coll);
// --- index keys cache
bool _keysComputed;
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index c4be694ad3d..003c5fe202c 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -591,7 +591,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
void Explain::generatePlannerInfo(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONObj extraInfo,
BSONObjBuilder* out) {
auto planExecImpl = dynamic_cast<PlanExecutorImpl*>(exec);
@@ -795,7 +795,7 @@ void Explain::generateExecutionInfo(PlanExecutor* exec,
}
void Explain::explainStages(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
ExplainOptions::Verbosity verbosity,
Status executePlanStatus,
PlanStageStats* winningPlanTrialStats,
@@ -833,7 +833,7 @@ void Explain::explainPipelineExecutor(PlanExecutorPipeline* exec,
}
void Explain::explainStages(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
ExplainOptions::Verbosity verbosity,
BSONObj extraInfo,
BSONObjBuilder* out) {
@@ -844,6 +844,7 @@ void Explain::explainStages(PlanExecutor* exec,
auto winningPlanTrialStats = Explain::getWinningPlanTrialStats(exec);
Status executePlanStatus = Status::OK();
+ const CollectionPtr* collectionPtr = &collection;
// If we need execution stats, then run the plan in order to gather the stats.
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
@@ -857,12 +858,12 @@ void Explain::explainStages(PlanExecutor* exec,
// then the collection may no longer be valid. We conservatively set our collection pointer
// to null in case it is invalid.
if (executePlanStatus != ErrorCodes::NoQueryExecutionPlans) {
- collection = nullptr;
+ collectionPtr = &CollectionPtr::null;
}
}
explainStages(exec,
- collection,
+ *collectionPtr,
verbosity,
executePlanStatus,
winningPlanTrialStats.get(),
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 666959883b2..a8848decfc1 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -42,6 +42,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
class PlanExecutorPipeline;
struct PlanSummaryStats;
@@ -72,7 +73,7 @@ public:
* added to the "executionStats" section of the explain.
*/
static void explainStages(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
ExplainOptions::Verbosity verbosity,
BSONObj extraInfo,
BSONObjBuilder* out);
@@ -92,7 +93,7 @@ public:
* - 'out' is the builder for the explain output.
*/
static void explainStages(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
ExplainOptions::Verbosity verbosity,
Status executePlanStatus,
PlanStageStats* winningPlanTrialStats,
@@ -205,7 +206,7 @@ private:
* - 'out' is a builder for the explain output.
*/
static void generatePlannerInfo(PlanExecutor* exec,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONObj extraInfo,
BSONObjBuilder* out);
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index fe77d388ff2..27e10256727 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -82,7 +82,7 @@ MONGO_FAIL_POINT_DEFINE(failReceivedGetmore);
MONGO_FAIL_POINT_DEFINE(legacyGetMoreWaitWithCursor)
bool shouldSaveCursor(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec) {
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
@@ -121,7 +121,7 @@ void beginQueryOp(OperationContext* opCtx,
}
void endQueryOp(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const PlanExecutor& exec,
long long numResults,
CursorId cursorId) {
@@ -410,7 +410,7 @@ Message getMore(OperationContext* opCtx,
PlanExecutor* exec = cursorPin->getExecutor();
exec->reattachToOperationContext(opCtx);
- exec->restoreState();
+ exec->restoreState(readLock ? &readLock->getCollection() : nullptr);
auto planSummary = exec->getPlanSummary();
{
@@ -476,7 +476,7 @@ Message getMore(OperationContext* opCtx,
// Reacquiring locks.
readLock.emplace(opCtx, nss);
- exec->restoreState();
+ exec->restoreState(&readLock->getCollection());
// We woke up because either the timed_wait expired, or there was more data. Either way,
// attempt to generate another batch of results.
@@ -605,8 +605,8 @@ bool runQuery(OperationContext* opCtx,
LOGV2_DEBUG(20914, 2, "Running query", "query"_attr = redact(cq->toStringShort()));
// Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionForReadCommand ctx(opCtx, nss, AutoGetCollectionViewMode::kViewsForbidden);
- const Collection* const collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(
+ opCtx, nss, AutoGetCollectionViewMode::kViewsForbidden);
const QueryRequest& qr = cq->getQueryRequest();
opCtx->setExhaust(qr.isExhaust());
@@ -625,7 +625,8 @@ bool runQuery(OperationContext* opCtx,
// Get the execution plan for the query.
constexpr auto verbosity = ExplainOptions::Verbosity::kExecAllPlans;
expCtx->explain = qr.isExplain() ? boost::make_optional(verbosity) : boost::none;
- auto exec = uassertStatusOK(getExecutorLegacyFind(opCtx, collection, std::move(cq)));
+ auto exec =
+ uassertStatusOK(getExecutorLegacyFind(opCtx, collection.getCollection(), std::move(cq)));
// If it's actually an explain, do the explain and return rather than falling through
// to the normal query execution loop.
@@ -634,7 +635,8 @@ bool runQuery(OperationContext* opCtx,
bb.skip(sizeof(QueryResult::Value));
BSONObjBuilder explainBob;
- Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &explainBob);
+ Explain::explainStages(
+ exec.get(), collection.getCollection(), verbosity, BSONObj(), &explainBob);
// Add the resulting object to the return buffer.
BSONObj explainObj = explainBob.obj();
@@ -721,7 +723,7 @@ bool runQuery(OperationContext* opCtx,
// this cursorid later.
long long ccId = 0;
- if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
+ if (shouldSaveCursor(opCtx, collection.getCollection(), state, exec.get())) {
// We won't use the executor until it's getMore'd.
exec->saveState();
exec->detachFromOperationContext();
@@ -763,11 +765,15 @@ bool runQuery(OperationContext* opCtx,
pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
- endQueryOp(opCtx, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
+ endQueryOp(opCtx,
+ collection.getCollection(),
+ *pinnedCursor.getCursor()->getExecutor(),
+ numResults,
+ ccId);
} else {
LOGV2_DEBUG(
20917, 5, "Not caching executor but returning results", "numResults"_attr = numResults);
- endQueryOp(opCtx, collection, *exec, numResults, ccId);
+ endQueryOp(opCtx, collection.getCollection(), *exec, numResults, ccId);
}
// Fill out the output buffer's header.
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 805fd18884d..6913404cebb 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -50,7 +50,7 @@ class OperationContext;
* a cursor ID of 0.
*/
bool shouldSaveCursor(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec);
@@ -79,7 +79,7 @@ void beginQueryOp(OperationContext* opCtx,
* Uses explain functionality to extract stats from 'exec'.
*/
void endQueryOp(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const PlanExecutor& exec,
long long numResults,
CursorId cursorId);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index b13605fb2aa..daecec36c15 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -145,7 +145,7 @@ bool turnIxscanIntoCount(QuerySolution* soln);
/**
* Returns 'true' if 'query' on the given 'collection' can be answered using a special IDHACK plan.
*/
-bool isIdHackEligibleQuery(const Collection* collection, const CanonicalQuery& query) {
+bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery& query) {
return !query.getQueryRequest().showRecordId() && query.getQueryRequest().getHint().isEmpty() &&
query.getQueryRequest().getMin().isEmpty() && query.getQueryRequest().getMax().isEmpty() &&
!query.getQueryRequest().getSkip() &&
@@ -247,7 +247,7 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx,
* If query supports index filters, filter params.indices according to any index filters that have
* been configured. In addition, sets that there were indeed index filters applied.
*/
-void applyIndexFilters(const Collection* collection,
+void applyIndexFilters(const CollectionPtr& collection,
const CanonicalQuery& canonicalQuery,
QueryPlannerParams* plannerParams) {
if (!isIdHackEligibleQuery(collection, canonicalQuery)) {
@@ -266,7 +266,7 @@ void applyIndexFilters(const Collection* collection,
}
void fillOutPlannerParams(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams) {
invariant(canonicalQuery);
@@ -334,7 +334,7 @@ void fillOutPlannerParams(OperationContext* opCtx,
}
bool shouldWaitForOplogVisibility(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool tailable) {
// Only non-tailable cursors on the oplog are affected. Only forward cursors, not reverse
@@ -514,7 +514,7 @@ template <typename PlanStageType, typename ResultType>
class PrepareExecutionHelper {
public:
PrepareExecutionHelper(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* cq,
PlanYieldPolicy* yieldPolicy,
size_t plannerOptions)
@@ -527,7 +527,7 @@ public:
}
StatusWith<std::unique_ptr<ResultType>> prepare() {
- if (nullptr == _collection) {
+ if (!_collection) {
LOGV2_DEBUG(20921,
2,
"Collection does not exist. Using EOF plan",
@@ -720,7 +720,7 @@ protected:
const QueryPlannerParams& plannerParams) = 0;
OperationContext* _opCtx;
- const Collection* _collection;
+ const CollectionPtr& _collection;
CanonicalQuery* _cq;
PlanYieldPolicy* _yieldPolicy;
const size_t _plannerOptions;
@@ -733,7 +733,7 @@ class ClassicPrepareExecutionHelper final
: public PrepareExecutionHelper<std::unique_ptr<PlanStage>, ClassicPrepareExecutionResult> {
public:
ClassicPrepareExecutionHelper(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
WorkingSet* ws,
CanonicalQuery* cq,
PlanYieldPolicy* yieldPolicy,
@@ -954,7 +954,7 @@ private:
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getClassicExecutor(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
@@ -986,7 +986,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getClassicExecu
*/
std::unique_ptr<sbe::RuntimePlanner> makeRuntimePlannerIfNeeded(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* canonicalQuery,
size_t numSolutions,
boost::optional<size_t> decisionWorks,
@@ -1051,7 +1051,7 @@ std::unique_ptr<PlanYieldPolicySBE> makeSbeYieldPolicy(
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExecutor(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> cq,
PlanYieldPolicy::YieldPolicy requestedYieldPolicy,
size_t plannerOptions) {
@@ -1100,7 +1100,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
@@ -1119,7 +1119,7 @@ namespace {
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFind(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
@@ -1134,7 +1134,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFin
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
bool permitYield,
size_t plannerOptions) {
@@ -1147,7 +1147,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorLegacyFind(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery) {
return _getExecutorFind(opCtx,
collection,
@@ -1206,7 +1206,7 @@ StatusWith<std::unique_ptr<projection_ast::Projection>> makeProjection(const BSO
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
OpDebug* opDebug,
- const Collection* collection,
+ const CollectionPtr& collection,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity) {
auto expCtx = parsedDelete->expCtx();
@@ -1364,7 +1364,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
OpDebug* opDebug,
- const Collection* collection,
+ const CollectionPtr& collection,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity) {
auto expCtx = parsedUpdate->expCtx();
@@ -1662,7 +1662,7 @@ bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CountCommand& request,
bool explain,
const NamespaceString& nss) {
@@ -1929,7 +1929,7 @@ namespace {
// Get the list of indexes that include the "distinct" field.
QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
size_t plannerOptions,
const ParsedDistinct& parsedDistinct) {
QueryPlannerParams plannerParams;
@@ -2013,7 +2013,7 @@ QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx,
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForSimpleDistinct(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const QueryPlannerParams& plannerParams,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ParsedDistinct* parsedDistinct) {
@@ -2086,7 +2086,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForS
// 'strictDistinctOnly' parameter.
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
getExecutorDistinctFromIndexSolutions(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::vector<std::unique_ptr<QuerySolution>> solutions,
PlanYieldPolicy::YieldPolicy yieldPolicy,
ParsedDistinct* parsedDistinct,
@@ -2126,7 +2126,7 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx,
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorWithoutProjection(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery* cq,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
@@ -2148,7 +2148,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorWith
} // namespace
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- const Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct) {
+ const CollectionPtr& collection, size_t plannerOptions, ParsedDistinct* parsedDistinct) {
auto expCtx = parsedDistinct->getQuery()->getExpCtx();
OperationContext* opCtx = expCtx->opCtx;
const auto yieldPolicy = opCtx->inMultiDocumentTransaction()
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index d14e07f9da3..1cdb332dcdc 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -46,6 +46,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class CountRequest;
/**
@@ -73,7 +74,7 @@ void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
* 'collection'. Exposed for testing.
*/
void fillOutPlannerParams(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams);
@@ -106,7 +107,7 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx,
* collection scans on the oplog.
*/
bool shouldWaitForOplogVisibility(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
bool tailable);
/**
@@ -119,7 +120,7 @@ bool shouldWaitForOplogVisibility(OperationContext* opCtx,
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions = 0);
@@ -136,7 +137,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
bool permitYield = false,
size_t plannerOptions = QueryPlannerParams::DEFAULT);
@@ -146,7 +147,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorLegacyFind(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<CanonicalQuery> canonicalQuery);
/**
@@ -203,7 +204,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln,
* distinct.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- const Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct);
+ const CollectionPtr& collection, size_t plannerOptions, ParsedDistinct* parsedDistinct);
/*
* Get a PlanExecutor for a query executing as part of a count command.
@@ -214,7 +215,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDist
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CountCommand& request,
bool explain,
const NamespaceString& nss);
@@ -240,7 +241,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCoun
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
OpDebug* opDebug,
- const Collection* collection,
+ const CollectionPtr& collection,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity);
@@ -266,7 +267,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
OpDebug* opDebug,
- const Collection* collection,
+ const CollectionPtr& collection,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity);
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 8d4f5d8ca47..f7c6beeefac 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -51,7 +51,7 @@ namespace mongo {
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collectionScan(
OperationContext* opCtx,
StringData ns,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
const Direction direction,
boost::optional<RecordId> resumeAfterRecordId) {
@@ -60,7 +60,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(ns));
- if (nullptr == collection) {
+ if (!collection) {
auto eof = std::make_unique<EOFStage>(expCtx.get());
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor = plan_executor_factory::make(
@@ -82,7 +82,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWithCollectionScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<DeleteStageParams> params,
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction) {
@@ -106,7 +106,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::indexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
const BSONObj& endKey,
@@ -137,7 +137,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::indexScan(
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWithIndexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<DeleteStageParams> params,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -172,7 +172,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWithIdHack(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UpdateStageParams& params,
const IndexDescriptor* descriptor,
const BSONObj& key,
@@ -201,7 +201,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith
std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
Direction direction,
boost::optional<RecordId> resumeAfterRecordId) {
invariant(collection);
@@ -223,7 +223,7 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(
std::unique_ptr<PlanStage> InternalPlanner::_indexScan(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
const BSONObj& endKey,
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 3846dca76bc..c7507243369 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -39,6 +39,7 @@ namespace mongo {
class BSONObj;
class Collection;
+class CollectionPtr;
class IndexDescriptor;
class OperationContext;
class PlanStage;
@@ -72,7 +73,7 @@ public:
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> collectionScan(
OperationContext* opCtx,
StringData ns,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
const Direction direction = FORWARD,
boost::optional<RecordId> resumeAfterRecordId = boost::none);
@@ -82,7 +83,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> deleteWithCollectionScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<DeleteStageParams> params,
PlanYieldPolicy::YieldPolicy yieldPolicy,
Direction direction = FORWARD);
@@ -92,7 +93,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> indexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
const BSONObj& endKey,
@@ -106,7 +107,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> deleteWithIndexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
std::unique_ptr<DeleteStageParams> params,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -120,7 +121,7 @@ public:
*/
static std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> updateWithIdHack(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const UpdateStageParams& params,
const IndexDescriptor* descriptor,
const BSONObj& key,
@@ -135,7 +136,7 @@ private:
static std::unique_ptr<PlanStage> _collectionScan(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
Direction direction,
boost::optional<RecordId> resumeAfterRecordId = boost::none);
@@ -147,7 +148,7 @@ private:
static std::unique_ptr<PlanStage> _indexScan(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* ws,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
const BSONObj& endKey,
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 3fe99fcad2d..60a832e0491 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -187,7 +187,7 @@ public:
* WriteConflictException is encountered. If the time limit is exceeded during this retry
* process, throws ErrorCodes::MaxTimeMSExpired.
*/
- virtual void restoreState() = 0;
+ virtual void restoreState(const Yieldable* yieldable) = 0;
/**
* Detaches from the OperationContext and releases any storage-engine state.
diff --git a/src/mongo/db/query/plan_executor_factory.cpp b/src/mongo/db/query/plan_executor_factory.cpp
index 3630e975646..93ee590d1dc 100644
--- a/src/mongo/db/query/plan_executor_factory.cpp
+++ b/src/mongo/db/query/plan_executor_factory.cpp
@@ -45,7 +45,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<CanonicalQuery> cq,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
NamespaceString nss,
std::unique_ptr<QuerySolution> qs) {
@@ -65,7 +65,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
NamespaceString nss,
std::unique_ptr<QuerySolution> qs) {
@@ -87,7 +87,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<QuerySolution> qs,
std::unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy) {
@@ -113,7 +113,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
std::unique_ptr<PlanYieldPolicySBE> yieldPolicy) {
@@ -142,7 +142,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
std::queue<std::pair<BSONObj, boost::optional<RecordId>>> stash,
std::unique_ptr<PlanYieldPolicySBE> yieldPolicy) {
diff --git a/src/mongo/db/query/plan_executor_factory.h b/src/mongo/db/query/plan_executor_factory.h
index 56f6fe87cf4..207e3065e20 100644
--- a/src/mongo/db/query/plan_executor_factory.h
+++ b/src/mongo/db/query/plan_executor_factory.h
@@ -65,7 +65,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<CanonicalQuery> cq,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
NamespaceString nss = NamespaceString(),
std::unique_ptr<QuerySolution> qs = nullptr);
@@ -81,7 +81,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
- const Collection* collection,
+ const CollectionPtr& collection,
PlanYieldPolicy::YieldPolicy yieldPolicy,
NamespaceString nss = NamespaceString(),
std::unique_ptr<QuerySolution> qs = nullptr);
@@ -93,7 +93,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
std::unique_ptr<QuerySolution> qs,
std::unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy);
@@ -105,7 +105,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
std::unique_ptr<PlanYieldPolicySBE> yieldPolicy);
@@ -118,7 +118,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> make(
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
std::queue<std::pair<BSONObj, boost::optional<RecordId>>> stash,
std::unique_ptr<PlanYieldPolicySBE> yieldPolicy);
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index 7be8aa4ef87..915bc2ad34b 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -83,14 +83,15 @@ MONGO_FAIL_POINT_DEFINE(planExecutorHangBeforeShouldWaitForInserts);
* Constructs a PlanYieldPolicy based on 'policy'.
*/
std::unique_ptr<PlanYieldPolicy> makeYieldPolicy(PlanExecutorImpl* exec,
- PlanYieldPolicy::YieldPolicy policy) {
+ PlanYieldPolicy::YieldPolicy policy,
+ const Yieldable* yieldable) {
switch (policy) {
case PlanYieldPolicy::YieldPolicy::YIELD_AUTO:
case PlanYieldPolicy::YieldPolicy::YIELD_MANUAL:
case PlanYieldPolicy::YieldPolicy::NO_YIELD:
case PlanYieldPolicy::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY:
case PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY: {
- return std::make_unique<PlanYieldPolicyImpl>(exec, policy);
+ return std::make_unique<PlanYieldPolicyImpl>(exec, policy, yieldable);
}
case PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT: {
return std::make_unique<AlwaysTimeOutYieldPolicy>(exec);
@@ -130,7 +131,7 @@ PlanExecutorImpl::PlanExecutorImpl(OperationContext* opCtx,
unique_ptr<QuerySolution> qs,
unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy)
: _opCtx(opCtx),
@@ -141,8 +142,10 @@ PlanExecutorImpl::PlanExecutorImpl(OperationContext* opCtx,
_root(std::move(rt)),
_nss(std::move(nss)),
// There's no point in yielding if the collection doesn't exist.
- _yieldPolicy(makeYieldPolicy(
- this, collection ? yieldPolicy : PlanYieldPolicy::YieldPolicy::NO_YIELD)) {
+ _yieldPolicy(
+ makeYieldPolicy(this,
+ collection ? yieldPolicy : PlanYieldPolicy::YieldPolicy::NO_YIELD,
+ collection ? &collection : nullptr)) {
invariant(!_expCtx || _expCtx->opCtx == _opCtx);
invariant(!_cq || !_expCtx || _cq->getExpCtx() == _expCtx);
@@ -243,12 +246,13 @@ void PlanExecutorImpl::saveState() {
if (!isMarkedAsKilled()) {
_root->saveState();
}
+ _yieldPolicy->setYieldable(nullptr);
_currentState = kSaved;
}
-void PlanExecutorImpl::restoreState() {
+void PlanExecutorImpl::restoreState(const Yieldable* yieldable) {
try {
- restoreStateWithoutRetrying();
+ restoreStateWithoutRetrying(yieldable);
} catch (const WriteConflictException&) {
if (!_yieldPolicy->canAutoYield())
throw;
@@ -258,9 +262,10 @@ void PlanExecutorImpl::restoreState() {
}
}
-void PlanExecutorImpl::restoreStateWithoutRetrying() {
+void PlanExecutorImpl::restoreStateWithoutRetrying(const Yieldable* yieldable) {
invariant(_currentState == kSaved);
+ _yieldPolicy->setYieldable(yieldable);
if (!isMarkedAsKilled()) {
_root->restoreState();
}
diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h
index 865a20fc515..651937eeb3f 100644
--- a/src/mongo/db/query/plan_executor_impl.h
+++ b/src/mongo/db/query/plan_executor_impl.h
@@ -59,7 +59,7 @@ public:
std::unique_ptr<QuerySolution> qs,
std::unique_ptr<CanonicalQuery> cq,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
PlanYieldPolicy::YieldPolicy yieldPolicy);
@@ -68,7 +68,7 @@ public:
const NamespaceString& nss() const final;
OperationContext* getOpCtx() const final;
void saveState() final;
- void restoreState() final;
+ void restoreState(const Yieldable* yieldable) final;
void detachFromOperationContext() final;
void reattachToOperationContext(OperationContext* opCtx) final;
ExecState getNextDocument(Document* objOut, RecordId* dlOut) final;
@@ -96,7 +96,7 @@ public:
*
* This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE.
*/
- void restoreStateWithoutRetrying();
+ void restoreStateWithoutRetrying(const Yieldable* yieldable);
/**
* Return a pointer to this executor's MultiPlanStage, or nullptr if it does not have one.
diff --git a/src/mongo/db/query/plan_executor_sbe.cpp b/src/mongo/db/query/plan_executor_sbe.cpp
index 3e65c0c46e7..4e8931a00f1 100644
--- a/src/mongo/db/query/plan_executor_sbe.cpp
+++ b/src/mongo/db/query/plan_executor_sbe.cpp
@@ -42,7 +42,7 @@ PlanExecutorSBE::PlanExecutorSBE(
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
bool isOpen,
boost::optional<std::queue<std::pair<BSONObj, boost::optional<RecordId>>>> stash,
@@ -105,7 +105,7 @@ void PlanExecutorSBE::saveState() {
_root->saveState();
}
-void PlanExecutorSBE::restoreState() {
+void PlanExecutorSBE::restoreState(const Yieldable* yieldable) {
invariant(_root);
_root->restoreState();
}
diff --git a/src/mongo/db/query/plan_executor_sbe.h b/src/mongo/db/query/plan_executor_sbe.h
index 680f76c578e..0e3ebb3505c 100644
--- a/src/mongo/db/query/plan_executor_sbe.h
+++ b/src/mongo/db/query/plan_executor_sbe.h
@@ -43,7 +43,7 @@ public:
OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData> root,
- const Collection* collection,
+ const CollectionPtr& collection,
NamespaceString nss,
bool isOpen,
boost::optional<std::queue<std::pair<BSONObj, boost::optional<RecordId>>>> stash,
@@ -62,7 +62,7 @@ public:
}
void saveState();
- void restoreState();
+ void restoreState(const Yieldable* yieldable);
void detachFromOperationContext();
void reattachToOperationContext(OperationContext* opCtx);
diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h
index 1a10961baa5..8fe09adb8e2 100644
--- a/src/mongo/db/query/plan_yield_policy.h
+++ b/src/mongo/db/query/plan_yield_policy.h
@@ -38,6 +38,7 @@
namespace mongo {
class ClockSource;
+class Yieldable;
class PlanYieldPolicy {
public:
@@ -238,6 +239,11 @@ public:
return _policy;
}
+ /**
+ * Set new yieldable instance if policy supports it.
+ */
+ virtual void setYieldable(const Yieldable* yieldable) {}
+
private:
/**
* Yields locks and calls 'abandonSnapshot()'. Calls 'whileYieldingFn()', if provided, while
diff --git a/src/mongo/db/query/plan_yield_policy_impl.cpp b/src/mongo/db/query/plan_yield_policy_impl.cpp
index 4fc3af37052..a88378d58f3 100644
--- a/src/mongo/db/query/plan_yield_policy_impl.cpp
+++ b/src/mongo/db/query/plan_yield_policy_impl.cpp
@@ -45,20 +45,24 @@ MONGO_FAIL_POINT_DEFINE(setInterruptOnlyPlansCheckForInterruptHang);
} // namespace
PlanYieldPolicyImpl::PlanYieldPolicyImpl(PlanExecutorImpl* exec,
- PlanYieldPolicy::YieldPolicy policy)
+ PlanYieldPolicy::YieldPolicy policy,
+ const Yieldable* yieldable)
: PlanYieldPolicy(exec->getOpCtx()->lockState()->isGlobalLockedRecursively()
? PlanYieldPolicy::YieldPolicy::NO_YIELD
: policy,
exec->getOpCtx()->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
Milliseconds{internalQueryExecYieldPeriodMS.load()}),
- _planYielding(exec) {}
+ _planYielding(exec),
+ _yieldable(yieldable) {}
Status PlanYieldPolicyImpl::yield(OperationContext* opCtx, std::function<void()> whileYieldingFn) {
// Can't use writeConflictRetry since we need to call saveState before reseting the
// transaction.
for (int attempt = 1; true; attempt++) {
try {
+ // Saving and restoring state modifies _yieldable so make a copy before we start
+ const Yieldable* yieldable = _yieldable;
try {
_planYielding->saveState();
} catch (const WriteConflictException&) {
@@ -70,10 +74,10 @@ Status PlanYieldPolicyImpl::yield(OperationContext* opCtx, std::function<void()>
opCtx->recoveryUnit()->abandonSnapshot();
} else {
// Release and reacquire locks.
- _yieldAllLocks(opCtx, whileYieldingFn, _planYielding->nss());
+ _yieldAllLocks(opCtx, yieldable, whileYieldingFn, _planYielding->nss());
}
- _planYielding->restoreStateWithoutRetrying();
+ _planYielding->restoreStateWithoutRetrying(yieldable);
return Status::OK();
} catch (const WriteConflictException&) {
CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1);
@@ -89,6 +93,7 @@ Status PlanYieldPolicyImpl::yield(OperationContext* opCtx, std::function<void()>
}
void PlanYieldPolicyImpl::_yieldAllLocks(OperationContext* opCtx,
+ const Yieldable* yieldable,
std::function<void()> whileYieldingFn,
const NamespaceString& planExecNS) {
// Things have to happen here in a specific order:
@@ -101,6 +106,10 @@ void PlanYieldPolicyImpl::_yieldAllLocks(OperationContext* opCtx,
Locker::LockSnapshot snapshot;
+ if (yieldable) {
+ yieldable->yield();
+ }
+
auto unlocked = locker->saveLockStateAndUnlock(&snapshot);
// Attempt to check for interrupt while locks are not held, in order to discourage the
@@ -129,6 +138,10 @@ void PlanYieldPolicyImpl::_yieldAllLocks(OperationContext* opCtx,
locker->restoreLockState(opCtx, snapshot);
+ if (yieldable) {
+ yieldable->restore();
+ }
+
// After yielding and reacquiring locks, the preconditions that were used to select our
// ReadSource initially need to be checked again. Queries hold an AutoGetCollectionForRead RAII
// lock for their lifetime, which may select a ReadSource based on state (e.g. replication
diff --git a/src/mongo/db/query/plan_yield_policy_impl.h b/src/mongo/db/query/plan_yield_policy_impl.h
index be4163eedec..812c17638d0 100644
--- a/src/mongo/db/query/plan_yield_policy_impl.h
+++ b/src/mongo/db/query/plan_yield_policy_impl.h
@@ -31,14 +31,20 @@
#include "mongo/db/query/plan_executor_impl.h"
#include "mongo/db/query/plan_yield_policy.h"
+#include "mongo/db/yieldable.h"
namespace mongo {
class PlanYieldPolicyImpl final : public PlanYieldPolicy {
public:
- PlanYieldPolicyImpl(PlanExecutorImpl* exec, PlanYieldPolicy::YieldPolicy policy);
+ PlanYieldPolicyImpl(PlanExecutorImpl* exec,
+ PlanYieldPolicy::YieldPolicy policy,
+ const Yieldable* yieldable);
private:
+ void setYieldable(const Yieldable* yieldable) override {
+ _yieldable = yieldable;
+ }
Status yield(OperationContext* opCtx, std::function<void()> whileYieldingFn = nullptr) override;
void preCheckInterruptOnly(OperationContext* opCtx) override;
@@ -52,12 +58,14 @@ private:
* The whileYieldingFn will be executed after unlocking the locks and before re-acquiring them.
*/
void _yieldAllLocks(OperationContext* opCtx,
+ const Yieldable* yieldable,
std::function<void()> whileYieldingFn,
const NamespaceString& planExecNS);
// The plan executor which this yield policy is responsible for yielding. Must not outlive the
// plan executor.
PlanExecutorImpl* const _planYielding;
+ const Yieldable* _yieldable;
};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis.h b/src/mongo/db/query/planner_analysis.h
index ba1e6a8ac47..e5700483d30 100644
--- a/src/mongo/db/query/planner_analysis.h
+++ b/src/mongo/db/query/planner_analysis.h
@@ -36,6 +36,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class QueryPlannerAnalysis {
public:
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 18d7f3543a7..13a9fe6129b 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -1124,7 +1124,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const PlanCache* planCache,
const CanonicalQuery& query,
const QueryPlannerParams& params) {
diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h
index e9d27bea0f5..236ec4f0269 100644
--- a/src/mongo/db/query/query_planner.h
+++ b/src/mongo/db/query/query_planner.h
@@ -38,6 +38,7 @@ namespace mongo {
class CachedSolution;
class Collection;
+class CollectionPtr;
/**
* QueryPlanner's job is to provide an entry point to the query planning and optimization
@@ -106,7 +107,7 @@ public:
* lists of query solutions in 'SubqueriesPlanningResult'.
*/
static StatusWith<SubqueriesPlanningResult> planSubqueries(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const PlanCache* planCache,
const CanonicalQuery& query,
const QueryPlannerParams& params);
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.h b/src/mongo/db/query/sbe_cached_solution_planner.h
index 264ddfbbbfa..9474cca0073 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.h
+++ b/src/mongo/db/query/sbe_cached_solution_planner.h
@@ -43,7 +43,7 @@ namespace mongo::sbe {
class CachedSolutionPlanner final : public BaseRuntimePlanner {
public:
CachedSolutionPlanner(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QueryPlannerParams& queryParams,
size_t decisionReads,
diff --git a/src/mongo/db/query/sbe_multi_planner.h b/src/mongo/db/query/sbe_multi_planner.h
index 48dca7081bf..80ac325c3b3 100644
--- a/src/mongo/db/query/sbe_multi_planner.h
+++ b/src/mongo/db/query/sbe_multi_planner.h
@@ -43,7 +43,7 @@ namespace mongo::sbe {
class MultiPlanner final : public BaseRuntimePlanner {
public:
MultiPlanner(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
PlanCachingMode cachingMode,
PlanYieldPolicySBE* yieldPolicy)
diff --git a/src/mongo/db/query/sbe_runtime_planner.cpp b/src/mongo/db/query/sbe_runtime_planner.cpp
index 254cfeb84de..1da89caf8e1 100644
--- a/src/mongo/db/query/sbe_runtime_planner.cpp
+++ b/src/mongo/db/query/sbe_runtime_planner.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/query/sbe_runtime_planner.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/sbe/expressions/expression.h"
#include "mongo/db/exec/trial_period_utils.h"
#include "mongo/db/query/plan_executor_sbe.h"
diff --git a/src/mongo/db/query/sbe_runtime_planner.h b/src/mongo/db/query/sbe_runtime_planner.h
index d399b096877..c7dbfbd73d2 100644
--- a/src/mongo/db/query/sbe_runtime_planner.h
+++ b/src/mongo/db/query/sbe_runtime_planner.h
@@ -60,7 +60,7 @@ public:
class BaseRuntimePlanner : public RuntimePlanner {
public:
BaseRuntimePlanner(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
PlanYieldPolicySBE* yieldPolicy)
: _opCtx(opCtx), _collection(collection), _cq(cq), _yieldPolicy(yieldPolicy) {
@@ -95,7 +95,7 @@ protected:
std::vector<std::pair<std::unique_ptr<PlanStage>, stage_builder::PlanStageData>> roots);
OperationContext* const _opCtx;
- const Collection* const _collection;
+ const CollectionPtr& _collection;
const CanonicalQuery& _cq;
PlanYieldPolicySBE* const _yieldPolicy;
};
diff --git a/src/mongo/db/query/sbe_stage_builder.h b/src/mongo/db/query/sbe_stage_builder.h
index 203b04ef8ff..568f45d2b3f 100644
--- a/src/mongo/db/query/sbe_stage_builder.h
+++ b/src/mongo/db/query/sbe_stage_builder.h
@@ -89,7 +89,7 @@ struct PlanStageData {
class SlotBasedStageBuilder final : public StageBuilder<sbe::PlanStage> {
public:
SlotBasedStageBuilder(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
PlanYieldPolicySBE* yieldPolicy,
diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp
index 8b44fb09c89..bf099dee54d 100644
--- a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp
@@ -54,13 +54,13 @@ namespace {
* Checks whether a callback function should be created for a ScanStage and returns it, if so. The
* logic in the provided callback will be executed when the ScanStage is opened or reopened.
*/
-sbe::ScanOpenCallback makeOpenCallbackIfNeeded(const Collection* collection,
+sbe::ScanOpenCallback makeOpenCallbackIfNeeded(const CollectionPtr& collection,
const CollectionScanNode* csn) {
if (csn->direction == CollectionScanParams::FORWARD && csn->shouldWaitForOplogVisibility) {
invariant(!csn->tailable);
invariant(collection->ns().isOplog());
- return [](OperationContext* opCtx, const Collection* collection, bool reOpen) {
+ return [](OperationContext* opCtx, const CollectionPtr& collection, bool reOpen) {
if (!reOpen) {
// Forward, non-tailable scans from the oplog need to wait until all oplog entries
// before the read begins to be visible. This isn't needed for reverse scans because
@@ -87,7 +87,7 @@ sbe::ScanOpenCallback makeOpenCallbackIfNeeded(const Collection* collection,
* of the same SlotId (the latter is returned purely for convenience purposes).
*/
std::tuple<std::vector<std::string>, sbe::value::SlotVector, boost::optional<sbe::value::SlotId>>
-makeOplogTimestampSlotsIfNeeded(const Collection* collection,
+makeOplogTimestampSlotsIfNeeded(const CollectionPtr& collection,
sbe::value::SlotIdGenerator* slotIdGenerator,
bool shouldTrackLatestOplogTimestamp) {
if (shouldTrackLatestOplogTimestamp) {
@@ -118,7 +118,7 @@ std::tuple<sbe::value::SlotId,
boost::optional<sbe::value::SlotId>,
std::unique_ptr<sbe::PlanStage>>
generateOptimizedOplogScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanNode* csn,
sbe::value::SlotIdGenerator* slotIdGenerator,
sbe::value::FrameIdGenerator* frameIdGenerator,
@@ -300,7 +300,7 @@ std::tuple<sbe::value::SlotId,
boost::optional<sbe::value::SlotId>,
std::unique_ptr<sbe::PlanStage>>
generateGenericCollScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanNode* csn,
sbe::value::SlotIdGenerator* slotIdGenerator,
sbe::value::FrameIdGenerator* frameIdGenerator,
@@ -447,7 +447,7 @@ std::tuple<sbe::value::SlotId,
boost::optional<sbe::value::SlotId>,
std::unique_ptr<sbe::PlanStage>>
generateCollScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanNode* csn,
sbe::value::SlotIdGenerator* slotIdGenerator,
sbe::value::FrameIdGenerator* frameIdGenerator,
diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.h b/src/mongo/db/query/sbe_stage_builder_coll_scan.h
index 0fff4cf6212..338c5cff894 100644
--- a/src/mongo/db/query/sbe_stage_builder_coll_scan.h
+++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.h
@@ -53,7 +53,7 @@ std::tuple<sbe::value::SlotId,
boost::optional<sbe::value::SlotId>,
std::unique_ptr<sbe::PlanStage>>
generateCollScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CollectionScanNode* csn,
sbe::value::SlotIdGenerator* slotIdGenerator,
sbe::value::FrameIdGenerator* frameIdGenerator,
diff --git a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
index b7da05ca14b..2e98673bb93 100644
--- a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
@@ -266,7 +266,7 @@ makeIntervalsFromIndexBounds(const IndexBounds& bounds,
*/
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>>
generateOptimizedMultiIntervalIndexScan(
- const Collection* collection,
+ const CollectionPtr& collection,
const std::string& indexName,
bool forward,
std::vector<std::pair<std::unique_ptr<KeyString::Value>, std::unique_ptr<KeyString::Value>>>
@@ -386,7 +386,7 @@ std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> makeAnchorBranchF
* consisting of valid recordId's and index seek keys to restart the index scan from.
*/
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>>
-makeRecursiveBranchForGenericIndexScan(const Collection* collection,
+makeRecursiveBranchForGenericIndexScan(const CollectionPtr& collection,
const std::string& indexName,
const sbe::CheckBoundsParams& params,
sbe::SpoolId spoolId,
@@ -513,7 +513,7 @@ makeRecursiveBranchForGenericIndexScan(const Collection* collection,
* - The recursion is terminated when the sspool becomes empty.
*/
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>>
-generateGenericMultiIntervalIndexScan(const Collection* collection,
+generateGenericMultiIntervalIndexScan(const CollectionPtr& collection,
const IndexScanNode* ixn,
KeyString::Version version,
Ordering ordering,
@@ -614,7 +614,7 @@ generateGenericMultiIntervalIndexScan(const Collection* collection,
} // namespace
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateSingleIntervalIndexScan(
- const Collection* collection,
+ const CollectionPtr& collection,
const std::string& indexName,
bool forward,
std::unique_ptr<KeyString::Value> lowKey,
@@ -673,7 +673,7 @@ std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateSingleInt
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateIndexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexScanNode* ixn,
boost::optional<sbe::value::SlotId> returnKeySlot,
sbe::value::SlotIdGenerator* slotIdGenerator,
diff --git a/src/mongo/db/query/sbe_stage_builder_index_scan.h b/src/mongo/db/query/sbe_stage_builder_index_scan.h
index beb1d983482..538e54dff77 100644
--- a/src/mongo/db/query/sbe_stage_builder_index_scan.h
+++ b/src/mongo/db/query/sbe_stage_builder_index_scan.h
@@ -40,7 +40,7 @@ namespace mongo::stage_builder {
*/
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateIndexScan(
OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexScanNode* ixn,
boost::optional<sbe::value::SlotId> returnKeySlot,
sbe::value::SlotIdGenerator* slotIdGenerator,
@@ -67,7 +67,7 @@ std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateIndexScan
* in the index.
*/
std::pair<sbe::value::SlotId, std::unique_ptr<sbe::PlanStage>> generateSingleIntervalIndexScan(
- const Collection* collection,
+ const CollectionPtr& collection,
const std::string& indexName,
bool forward,
std::unique_ptr<KeyString::Value> lowKey,
diff --git a/src/mongo/db/query/sbe_sub_planner.h b/src/mongo/db/query/sbe_sub_planner.h
index 1bb465df0f4..5f708a2cd4d 100644
--- a/src/mongo/db/query/sbe_sub_planner.h
+++ b/src/mongo/db/query/sbe_sub_planner.h
@@ -43,7 +43,7 @@ namespace mongo::sbe {
class SubPlanner final : public BaseRuntimePlanner {
public:
SubPlanner(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QueryPlannerParams& queryParams,
PlanYieldPolicySBE* yieldPolicy)
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index dbee0b6b8b6..7e20618cd83 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -41,7 +41,7 @@ template <typename PlanStageType>
class StageBuilder {
public:
StageBuilder(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution)
: _opCtx(opCtx), _collection(collection), _cq(cq), _solution(solution) {}
@@ -56,7 +56,7 @@ public:
protected:
OperationContext* _opCtx;
- const Collection* _collection;
+ const CollectionPtr& _collection;
const CanonicalQuery& _cq;
const QuerySolution& _solution;
};
diff --git a/src/mongo/db/query/stage_builder_util.cpp b/src/mongo/db/query/stage_builder_util.cpp
index 9af708df67b..9b535c20b17 100644
--- a/src/mongo/db/query/stage_builder_util.cpp
+++ b/src/mongo/db/query/stage_builder_util.cpp
@@ -37,7 +37,7 @@
namespace mongo::stage_builder {
std::unique_ptr<PlanStage> buildClassicExecutableTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
WorkingSet* ws) {
@@ -54,7 +54,7 @@ std::unique_ptr<PlanStage> buildClassicExecutableTree(OperationContext* opCtx,
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData>
buildSlotBasedExecutableTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
PlanYieldPolicy* yieldPolicy,
diff --git a/src/mongo/db/query/stage_builder_util.h b/src/mongo/db/query/stage_builder_util.h
index aa6c0abfad2..cd1f77594eb 100644
--- a/src/mongo/db/query/stage_builder_util.h
+++ b/src/mongo/db/query/stage_builder_util.h
@@ -45,14 +45,14 @@ namespace mongo::stage_builder {
* will consist of.
*/
std::unique_ptr<PlanStage> buildClassicExecutableTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
WorkingSet* ws);
std::pair<std::unique_ptr<sbe::PlanStage>, stage_builder::PlanStageData>
buildSlotBasedExecutableTree(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
PlanYieldPolicy* yieldPolicy,
diff --git a/src/mongo/db/rebuild_indexes.cpp b/src/mongo/db/rebuild_indexes.cpp
index ea35ad418b8..52962679162 100644
--- a/src/mongo/db/rebuild_indexes.cpp
+++ b/src/mongo/db/rebuild_indexes.cpp
@@ -92,7 +92,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
}
Status rebuildIndexesOnCollection(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecs,
RepairData repair) {
// Skip the rest if there are no indexes to rebuild.
diff --git a/src/mongo/db/rebuild_indexes.h b/src/mongo/db/rebuild_indexes.h
index 20ef0e7aff6..ea6c0cc2a94 100644
--- a/src/mongo/db/rebuild_indexes.h
+++ b/src/mongo/db/rebuild_indexes.h
@@ -37,6 +37,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
typedef std::pair<std::vector<std::string>, std::vector<BSONObj>> IndexNameObjs;
@@ -61,7 +62,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
*/
enum class RepairData { kYes, kNo };
Status rebuildIndexesOnCollection(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecs,
RepairData repair);
@@ -70,7 +71,7 @@ Status rebuildIndexesOnCollection(OperationContext* opCtx,
* One example usage is when a 'dropIndex' command is rolled back. The dropped index must be remade.
*/
Status rebuildIndexesOnCollection(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecs);
} // namespace mongo
diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp
index 950e0b8d9aa..44bba37e1a0 100644
--- a/src/mongo/db/repair.cpp
+++ b/src/mongo/db/repair.cpp
@@ -87,7 +87,7 @@ Status rebuildIndexesForNamespace(OperationContext* opCtx,
}
namespace {
-Status dropUnfinishedIndexes(OperationContext* opCtx, const Collection* collection) {
+Status dropUnfinishedIndexes(OperationContext* opCtx, const CollectionPtr& collection) {
std::vector<std::string> indexNames;
auto durableCatalog = DurableCatalog::get(opCtx);
durableCatalog->getAllIndexes(opCtx, collection->getCatalogId(), &indexNames);
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index de85cf9021e..bff884e7844 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -21,6 +21,7 @@ env.Library(
'local_oplog_info.cpp',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/catalog/collection',
'$BUILD_DIR/mongo/db/logical_time',
'$BUILD_DIR/mongo/db/storage/flow_control',
'$BUILD_DIR/mongo/db/vector_clock_mutable',
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index d9c8565507f..8643f55ffff 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -315,7 +315,7 @@ Status _checkPrecondition(OperationContext* opCtx,
if (!database) {
return {ErrorCodes::NamespaceNotFound, "database in ns does not exist: " + nss.ns()};
}
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
return {ErrorCodes::NamespaceNotFound, "collection in ns does not exist: " + nss.ns()};
diff --git a/src/mongo/db/repl/collection_bulk_loader.h b/src/mongo/db/repl/collection_bulk_loader.h
index e58e262a674..7fc018f6a6a 100644
--- a/src/mongo/db/repl/collection_bulk_loader.h
+++ b/src/mongo/db/repl/collection_bulk_loader.h
@@ -39,6 +39,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
namespace repl {
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index 7da0b9c64a9..34127bb5e95 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -172,7 +172,7 @@ std::unique_ptr<HealthLogEntry> dbCheckBatchEntry(const NamespaceString& nss,
}
DbCheckHasher::DbCheckHasher(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONKey& start,
const BSONKey& end,
int64_t maxCount,
@@ -233,7 +233,7 @@ std::string hashCollectionInfo(const DbCheckCollectionInformation& info) {
}
std::pair<boost::optional<UUID>, boost::optional<UUID>> getPrevAndNextUUIDs(
- OperationContext* opCtx, const Collection* collection) {
+ OperationContext* opCtx, const CollectionPtr& collection) {
const CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
const UUID uuid = collection->uuid();
@@ -350,7 +350,7 @@ bool DbCheckHasher::_canHash(const BSONObj& obj) {
return true;
}
-std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const Collection* collection) {
+std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const CollectionPtr& collection) {
std::vector<BSONObj> result;
std::vector<std::string> names;
@@ -370,7 +370,7 @@ std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const Collecti
return result;
}
-BSONObj collectionOptions(OperationContext* opCtx, const Collection* collection) {
+BSONObj collectionOptions(OperationContext* opCtx, const CollectionPtr& collection) {
return DurableCatalog::get(opCtx)
->getCollectionOptions(opCtx, collection->getCatalogId())
.toBSON();
@@ -407,8 +407,7 @@ namespace {
Status dbCheckBatchOnSecondary(OperationContext* opCtx,
const repl::OpTime& optime,
const DbCheckOplogBatch& entry) {
- AutoGetCollectionForDbCheck agc(opCtx, entry.getNss(), entry.getType());
- const Collection* collection = agc.getCollection();
+ AutoGetCollectionForDbCheck collection(opCtx, entry.getNss(), entry.getType());
std::string msg = "replication consistency check";
if (!collection) {
@@ -419,7 +418,7 @@ Status dbCheckBatchOnSecondary(OperationContext* opCtx,
Status status = Status::OK();
boost::optional<DbCheckHasher> hasher;
try {
- hasher.emplace(opCtx, collection, entry.getMinKey(), entry.getMaxKey());
+ hasher.emplace(opCtx, collection.getCollection(), entry.getMinKey(), entry.getMaxKey());
} catch (const DBException& exception) {
auto logEntry = dbCheckErrorHealthLogEntry(
entry.getNss(), msg, OplogEntriesEnum::Batch, exception.toStatus());
diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h
index 99c28b4be59..2a5b2433cf8 100644
--- a/src/mongo/db/repl/dbcheck.h
+++ b/src/mongo/db/repl/dbcheck.h
@@ -41,6 +41,7 @@ namespace mongo {
// Forward declarations.
class Collection;
+class CollectionPtr;
class OperationContext;
namespace repl {
@@ -83,7 +84,7 @@ struct DbCheckCollectionInformation {
* previous or next UUID, return boost::none respectively.
*/
std::pair<boost::optional<UUID>, boost::optional<UUID>> getPrevAndNextUUIDs(
- OperationContext* opCtx, const Collection* collection);
+ OperationContext* opCtx, const CollectionPtr& collection);
/**
* Get a HealthLogEntry for a dbCheck collection.
@@ -118,7 +119,7 @@ public:
* @param maxBytes The maximum number of bytes to hash.
*/
DbCheckHasher(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const BSONKey& start,
const BSONKey& end,
int64_t maxCount = std::numeric_limits<int64_t>::max(),
@@ -191,26 +192,34 @@ public:
AutoGetCollectionForDbCheck(OperationContext* opCtx,
const NamespaceString& nss,
const OplogEntriesEnum& type);
- const Collection* getCollection(void) {
+ explicit operator bool() const {
+ return static_cast<bool>(getCollection());
+ }
+
+ const Collection* operator->() const {
+ return getCollection().get();
+ }
+
+ const CollectionPtr& getCollection() const {
return _collection;
}
private:
AutoGetDbForDbCheck _agd;
Lock::CollectionLock _collLock;
- const Collection* _collection;
+ CollectionPtr _collection;
};
/**
* Gather the index information for a collection.
*/
-std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const Collection* collection);
+std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Gather other information for a collection.
*/
-BSONObj collectionOptions(OperationContext* opCtx, const Collection* collection);
+BSONObj collectionOptions(OperationContext* opCtx, const CollectionPtr& collection);
namespace repl {
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index ca996de2692..571b5abedfe 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -324,7 +324,7 @@ OplogEntry IdempotencyTest::partialTxn(LogicalSessionId lsid,
prevOpTime);
}
-std::string IdempotencyTest::computeDataHash(const Collection* collection) {
+std::string IdempotencyTest::computeDataHash(const CollectionPtr& collection) {
auto desc = collection->getIndexCatalog()->findIdIndex(_opCtx.get());
ASSERT_TRUE(desc);
auto exec = InternalPlanner::indexScan(_opCtx.get(),
@@ -375,7 +375,7 @@ std::vector<CollectionState> IdempotencyTest::validateAllCollections() {
CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
auto collUUID = [&]() -> OptionalCollectionUUID {
AutoGetCollectionForReadCommand autoColl(_opCtx.get(), nss);
- if (auto collection = autoColl.getCollection()) {
+ if (const auto& collection = autoColl.getCollection()) {
return collection->uuid();
}
return boost::none;
@@ -388,8 +388,7 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
}
{
- AutoGetCollectionForReadCommand autoColl(_opCtx.get(), nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForReadCommand collection(_opCtx.get(), nss);
if (!collection) {
// Return a mostly default initialized CollectionState struct with exists set to false
@@ -412,10 +411,9 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
ASSERT_TRUE(validateResults.valid);
}
- AutoGetCollectionForReadCommand autoColl(_opCtx.get(), nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForReadCommand collection(_opCtx.get(), nss);
- std::string dataHash = computeDataHash(collection);
+ std::string dataHash = computeDataHash(collection.getCollection());
auto durableCatalog = DurableCatalog::get(_opCtx.get());
auto collectionOptions =
diff --git a/src/mongo/db/repl/idempotency_test_fixture.h b/src/mongo/db/repl/idempotency_test_fixture.h
index e871517e9f5..654e9c87ddd 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.h
+++ b/src/mongo/db/repl/idempotency_test_fixture.h
@@ -49,6 +49,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
namespace repl {
@@ -151,7 +152,7 @@ protected:
return obj;
};
- std::string computeDataHash(const Collection* collection);
+ std::string computeDataHash(const CollectionPtr& collection);
virtual std::string getStatesString(const std::vector<CollectionState>& state1,
const std::vector<CollectionState>& state2,
const std::vector<OplogEntry>& ops);
diff --git a/src/mongo/db/repl/local_oplog_info.cpp b/src/mongo/db/repl/local_oplog_info.cpp
index 01f48ac47b5..e73199c00db 100644
--- a/src/mongo/db/repl/local_oplog_info.cpp
+++ b/src/mongo/db/repl/local_oplog_info.cpp
@@ -81,16 +81,16 @@ void LocalOplogInfo::setOplogCollectionName(ServiceContext* service) {
}
}
-const Collection* LocalOplogInfo::getCollection() const {
+const CollectionPtr& LocalOplogInfo::getCollection() const {
return _oplog;
}
-void LocalOplogInfo::setCollection(const Collection* oplog) {
- _oplog = oplog;
+void LocalOplogInfo::setCollection(const CollectionPtr& oplog) {
+ _oplog = oplog.detached();
}
void LocalOplogInfo::resetCollection() {
- _oplog = nullptr;
+ _oplog = CollectionPtr();
}
void LocalOplogInfo::setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
diff --git a/src/mongo/db/repl/local_oplog_info.h b/src/mongo/db/repl/local_oplog_info.h
index 417901e4f4f..880e6bb6b0e 100644
--- a/src/mongo/db/repl/local_oplog_info.h
+++ b/src/mongo/db/repl/local_oplog_info.h
@@ -66,8 +66,8 @@ public:
*/
void setOplogCollectionName(ServiceContext* service);
- const Collection* getCollection() const;
- void setCollection(const Collection* oplog);
+ const CollectionPtr& getCollection() const;
+ void setCollection(const CollectionPtr& oplog);
void resetCollection();
/**
@@ -88,7 +88,7 @@ private:
// The "oplog" pointer is always valid (or null) because an operation must take the global
// exclusive lock to set the pointer to null when the Collection instance is destroyed. See
// "oplogCheckCloseDatabase".
- const Collection* _oplog = nullptr;
+ CollectionPtr _oplog;
// Synchronizes the section where a new Timestamp is generated and when it is registered in the
// storage engine.
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
index 09d79862c07..e6f426933e4 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
@@ -94,9 +94,8 @@ void MockReplCoordServerFixture::setUp() {
}
void MockReplCoordServerFixture::insertOplogEntry(const repl::OplogEntry& entry) {
- AutoGetCollection autoColl(opCtx(), NamespaceString::kRsOplogNamespace, MODE_IX);
- auto coll = autoColl.getCollection();
- ASSERT_TRUE(coll != nullptr);
+ AutoGetCollection coll(opCtx(), NamespaceString::kRsOplogNamespace, MODE_IX);
+ ASSERT_TRUE(coll);
WriteUnitOfWork wuow(opCtx());
auto status = coll->insertDocument(opCtx(),
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 8b0626fa926..23c148b7f75 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -213,7 +213,7 @@ void _logOpsInner(OperationContext* opCtx,
const NamespaceString& nss,
std::vector<Record>* records,
const std::vector<Timestamp>& timestamps,
- const Collection* oplogCollection,
+ const CollectionPtr& oplogCollection,
OpTime finalOpTime,
Date_t wallTime) {
auto replCoord = ReplicationCoordinator::get(opCtx);
@@ -333,7 +333,7 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) {
oplogEntry->setOpTime(slot);
}
- auto oplog = oplogInfo->getCollection();
+ const auto& oplog = oplogInfo->getCollection();
auto wallClockTime = oplogEntry->getWallClockTime();
auto bsonOplogEntry = oplogEntry->toBSON();
@@ -425,7 +425,7 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx,
invariant(!opTimes.empty());
auto lastOpTime = opTimes.back();
invariant(!lastOpTime.isNull());
- auto oplog = oplogInfo->getCollection();
+ const auto& oplog = oplogInfo->getCollection();
auto wallClockTime = oplogEntryTemplate->getWallClockTime();
_logOpsInner(opCtx, nss, &records, timestamps, oplog, lastOpTime, wallClockTime);
wuow.commit();
@@ -536,7 +536,7 @@ void createOplog(OperationContext* opCtx,
const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings();
OldClientContext ctx(opCtx, oplogCollectionName.ns());
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogCollectionName);
if (collection) {
@@ -981,7 +981,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
NamespaceString requestNss;
- const Collection* collection = nullptr;
+ CollectionPtr collection = nullptr;
if (auto uuid = op.getUuid()) {
CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
collection = catalog.lookupCollectionByUUID(opCtx, uuid.get());
@@ -1728,14 +1728,14 @@ void acquireOplogCollectionForLogging(OperationContext* opCtx) {
}
}
-void establishOplogCollectionForLogging(OperationContext* opCtx, const Collection* oplog) {
+void establishOplogCollectionForLogging(OperationContext* opCtx, const CollectionPtr& oplog) {
invariant(opCtx->lockState()->isW());
invariant(oplog);
LocalOplogInfo::get(opCtx)->setCollection(oplog);
}
void signalOplogWaiters() {
- auto oplog = LocalOplogInfo::get(getGlobalServiceContext())->getCollection();
+ const auto& oplog = LocalOplogInfo::get(getGlobalServiceContext())->getCollection();
if (oplog) {
oplog->getCappedCallback()->notifyCappedWaitersIfNeeded();
}
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index 17781db4df3..74e607d477e 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -45,6 +45,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class Database;
class NamespaceString;
class OperationContext;
@@ -148,7 +149,7 @@ void acquireOplogCollectionForLogging(OperationContext* opCtx);
* Called by catalog::openCatalog() to re-establish the oplog collection pointer while holding onto
* the global lock in exclusive mode.
*/
-void establishOplogCollectionForLogging(OperationContext* opCtx, const Collection* oplog);
+void establishOplogCollectionForLogging(OperationContext* opCtx, const CollectionPtr& oplog);
using IncrementOpsAppliedStatsFn = std::function<void()>;
diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp
index 9862126d7e2..f56011c0105 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp
@@ -349,7 +349,7 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsCommand) {
<< BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
- const Collection*,
+ const CollectionPtr&,
const NamespaceString& collNss,
const CollectionOptions&,
const BSONObj&) {
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
index d75b6469b4a..72f996976c2 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
@@ -89,7 +89,7 @@ void OplogApplierImplOpObserver::onUpdate(OperationContext* opCtx,
}
void OplogApplierImplOpObserver::onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.h b/src/mongo/db/repl/oplog_applier_impl_test_fixture.h
index 1a6c414d182..28fec503692 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.h
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.h
@@ -99,7 +99,7 @@ public:
* Called when OplogApplierImpl creates a collection.
*/
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
@@ -121,7 +121,7 @@ public:
std::function<void(OperationContext*, const OplogUpdateEntryArgs&)> onUpdateFn;
std::function<void(OperationContext*,
- const Collection*,
+ const CollectionPtr&,
const NamespaceString&,
const CollectionOptions&,
const BSONObj&)>
diff --git a/src/mongo/db/repl/primary_only_service_op_observer.h b/src/mongo/db/repl/primary_only_service_op_observer.h
index 7b176eb663e..f0314ab9231 100644
--- a/src/mongo/db/repl/primary_only_service_op_observer.h
+++ b/src/mongo/db/repl/primary_only_service_op_observer.h
@@ -108,7 +108,7 @@ public:
const boost::optional<OplogSlot> slot) final {}
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 8e2badd998f..8daa5f92aba 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -715,8 +715,7 @@ Timestamp ReplicationCoordinatorExternalStateImpl::getGlobalTimestamp(ServiceCon
}
bool ReplicationCoordinatorExternalStateImpl::oplogExists(OperationContext* opCtx) {
- auto oplog = LocalOplogInfo::get(opCtx)->getCollection();
- return oplog != nullptr;
+ return static_cast<bool>(LocalOplogInfo::get(opCtx)->getCollection());
}
StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index a5f5d199b2d..766d69ffbc2 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2303,7 +2303,7 @@ StatusWith<OpTime> ReplicationCoordinatorImpl::getLatestWriteOpTime(OperationCon
if (!canAcceptNonLocalWrites()) {
return {ErrorCodes::NotWritablePrimary, "Not primary so can't get latest write optime"};
}
- auto oplog = LocalOplogInfo::get(opCtx)->getCollection();
+ const auto& oplog = LocalOplogInfo::get(opCtx)->getCollection();
if (!oplog) {
return {ErrorCodes::NamespaceNotFound, "oplog collection does not exist"};
}
@@ -2836,7 +2836,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opC
if (!ns->isOplog()) {
return true;
}
- } else if (auto oplogCollection = LocalOplogInfo::get(opCtx)->getCollection()) {
+ } else if (const auto& oplogCollection = LocalOplogInfo::get(opCtx)->getCollection()) {
auto uuid = nsOrUUID.uuid();
invariant(uuid, nsOrUUID.toString());
if (oplogCollection->uuid() != *uuid) {
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 365410970ad..a696a57966b 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -603,12 +603,12 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) {
"Scanning collection to fix collection count",
"namespace"_attr = nss.ns(),
"uuid"_attr = uuid.toString());
- AutoGetCollectionForRead autoCollToScan(opCtx, nss);
- auto collToScan = autoCollToScan.getCollection();
- invariant(coll == collToScan,
+ AutoGetCollectionForRead collToScan(opCtx, nss);
+ invariant(coll == collToScan.getCollection(),
str::stream() << "Catalog returned invalid collection: " << nss.ns() << " ("
<< uuid.toString() << ")");
auto exec = collToScan->makePlanExecutor(opCtx,
+ collToScan.getCollection(),
PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY,
Collection::ScanDirection::kForward);
long long countFromScan = 0;
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 862ba41e289..91a995fc466 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -233,17 +233,20 @@ void RollbackTest::_insertDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc) {
- AutoGetCollection autoColl(opCtx, nss, MODE_X);
- auto collection = autoColl.getCollection();
- if (!collection) {
+ auto insertDoc = [opCtx, &doc](const CollectionPtr& collection) {
+ WriteUnitOfWork wuow(opCtx);
+ OpDebug* const opDebug = nullptr;
+ ASSERT_OK(collection->insertDocument(opCtx, InsertStatement(doc), opDebug));
+ wuow.commit();
+ };
+ AutoGetCollection collection(opCtx, nss, MODE_X);
+ if (collection) {
+ insertDoc(collection.getCollection());
+ } else {
CollectionOptions options;
options.uuid = UUID::gen();
- collection = _createCollection(opCtx, nss, options);
+ insertDoc(_createCollection(opCtx, nss, options));
}
- WriteUnitOfWork wuow(opCtx);
- OpDebug* const opDebug = nullptr;
- ASSERT_OK(collection->insertDocument(opCtx, InsertStatement(doc), opDebug));
- wuow.commit();
}
Status RollbackTest::_insertOplogEntry(const BSONObj& doc) {
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index fa6b07c6696..0689bab6384 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -971,7 +971,7 @@ void rollbackDropIndexes(OperationContext* opCtx,
invariant(nss);
Lock::DBLock dbLock(opCtx, nss->db(), MODE_IX);
Lock::CollectionLock collLock(opCtx, *nss, MODE_X);
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, *nss);
// If we cannot find the collection, we skip over dropping the index.
@@ -1016,7 +1016,7 @@ void rollbackDropIndexes(OperationContext* opCtx,
*/
void dropCollection(OperationContext* opCtx,
NamespaceString nss,
- const Collection* collection,
+ const CollectionPtr& collection,
Database* db) {
if (RollbackImpl::shouldCreateDataFiles()) {
RemoveSaver removeSaver("rollback", "", collection->uuid().toString());
@@ -1499,7 +1499,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Database* db = dbLock.getDb();
if (db) {
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
dropCollection(opCtx, *nss, collection, db);
LOGV2_DEBUG(21698,
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index b785eadeaa5..d7a14c13a35 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -79,7 +79,7 @@ OplogInterfaceMock::Operation makeNoopOplogEntryAndRecordId(Seconds seconds) {
return std::make_pair(BSON("ts" << ts.getTimestamp()), RecordId(1));
}
-OplogInterfaceMock::Operation makeDropIndexOplogEntry(const Collection* collection,
+OplogInterfaceMock::Operation makeDropIndexOplogEntry(const CollectionPtr& collection,
BSONObj key,
std::string indexName,
int time) {
@@ -96,7 +96,7 @@ OplogInterfaceMock::Operation makeDropIndexOplogEntry(const Collection* collecti
RecordId(time));
}
-OplogInterfaceMock::Operation makeStartIndexBuildOplogEntry(const Collection* collection,
+OplogInterfaceMock::Operation makeStartIndexBuildOplogEntry(const CollectionPtr& collection,
UUID buildUUID,
BSONObj spec,
int time) {
@@ -112,7 +112,7 @@ OplogInterfaceMock::Operation makeStartIndexBuildOplogEntry(const Collection* co
RecordId(time));
}
-OplogInterfaceMock::Operation makeCommitIndexBuildOplogEntry(const Collection* collection,
+OplogInterfaceMock::Operation makeCommitIndexBuildOplogEntry(const CollectionPtr& collection,
UUID buildUUID,
BSONObj spec,
int time) {
@@ -128,7 +128,7 @@ OplogInterfaceMock::Operation makeCommitIndexBuildOplogEntry(const Collection* c
RecordId(time));
}
-OplogInterfaceMock::Operation makeAbortIndexBuildOplogEntry(const Collection* collection,
+OplogInterfaceMock::Operation makeAbortIndexBuildOplogEntry(const CollectionPtr& collection,
UUID buildUUID,
BSONObj spec,
int time) {
@@ -150,7 +150,7 @@ OplogInterfaceMock::Operation makeAbortIndexBuildOplogEntry(const Collection* co
RecordId(time));
}
-OplogInterfaceMock::Operation makeCreateIndexOplogEntry(const Collection* collection,
+OplogInterfaceMock::Operation makeCreateIndexOplogEntry(const CollectionPtr& collection,
BSONObj key,
std::string indexName,
int time) {
@@ -847,14 +847,14 @@ BSONObj idxSpec(NamespaceString nss, std::string id) {
}
// Returns the number of indexes that exist on the given collection.
-int numIndexesOnColl(OperationContext* opCtx, NamespaceString nss, const Collection* coll) {
+int numIndexesOnColl(OperationContext* opCtx, NamespaceString nss, const CollectionPtr& coll) {
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
auto indexCatalog = coll->getIndexCatalog();
ASSERT(indexCatalog);
return indexCatalog->numIndexesReady(opCtx);
}
-int numIndexesInProgress(OperationContext* opCtx, NamespaceString nss, const Collection* coll) {
+int numIndexesInProgress(OperationContext* opCtx, NamespaceString nss, const CollectionPtr& coll) {
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
auto indexCatalog = coll->getIndexCatalog();
ASSERT(indexCatalog);
@@ -1738,7 +1738,7 @@ OpTime getOpTimeFromOplogEntry(const BSONObj& entry) {
TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
createOplog(_opCtx.get());
- const Collection* coll = nullptr;
+ CollectionPtr coll;
CollectionOptions options;
options.uuid = UUID::gen();
{
diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h
index ef8af4ac2c1..098df44021d 100644
--- a/src/mongo/db/repl/storage_interface.h
+++ b/src/mongo/db/repl/storage_interface.h
@@ -47,6 +47,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
struct CollectionOptions;
class OperationContext;
@@ -326,7 +327,7 @@ public:
* matches are found.
*/
virtual boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestamp(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) = 0;
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) = 0;
/**
* Calls findOplogEntryLessThanOrEqualToTimestamp with endless WriteConflictException retries.
@@ -337,7 +338,7 @@ public:
* fail, say for correctness.
*/
virtual boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestampRetryOnWCE(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) = 0;
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) = 0;
/**
* Fetches the latest oplog entry's timestamp. Bypasses the oplog visibility rules.
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index f9b6945b259..926cc60efcb 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -299,28 +299,27 @@ Status StorageInterfaceImpl::insertDocument(OperationContext* opCtx,
namespace {
/**
- * Returns const Collection* from database RAII object.
+ * Returns const CollectionPtr& from database RAII object.
* Returns NamespaceNotFound if the database or collection does not exist.
*/
template <typename AutoGetCollectionType>
-StatusWith<decltype(std::declval<AutoGetCollectionType>().getCollection())> getCollection(
- const AutoGetCollectionType& autoGetCollection,
- const NamespaceStringOrUUID& nsOrUUID,
- const std::string& message) {
+StatusWith<const CollectionPtr*> getCollection(const AutoGetCollectionType& autoGetCollection,
+ const NamespaceStringOrUUID& nsOrUUID,
+ const std::string& message) {
if (!autoGetCollection.getDb()) {
StringData dbName = nsOrUUID.nss() ? nsOrUUID.nss()->db() : nsOrUUID.dbname();
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Database [" << dbName << "] not found. " << message};
}
- auto collection = autoGetCollection.getCollection();
+ const auto& collection = autoGetCollection.getCollection();
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Collection [" << nsOrUUID.toString() << "] not found. "
<< message};
}
- return collection;
+ return &collection;
}
Status insertDocumentsSingleBatch(OperationContext* opCtx,
@@ -329,14 +328,14 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx,
std::vector<InsertStatement>::const_iterator end) {
boost::optional<AutoGetCollection> autoColl;
boost::optional<AutoGetOplog> autoOplog;
- const Collection* collection;
+ const CollectionPtr* collection;
auto nss = nsOrUUID.nss();
if (nss && nss->isOplog()) {
// Simplify locking rules for oplog collection.
autoOplog.emplace(opCtx, OplogAccessMode::kWrite);
- collection = autoOplog->getCollection();
- if (!collection) {
+ collection = &autoOplog->getCollection();
+ if (!*collection) {
return {ErrorCodes::NamespaceNotFound, "Oplog collection does not exist"};
}
} else {
@@ -351,7 +350,7 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
- auto status = collection->insertDocuments(opCtx, begin, end, nullOpDebug, false);
+ auto status = (*collection)->insertDocuments(opCtx, begin, end, nullOpDebug, false);
if (!status.isOK()) {
return status;
}
@@ -454,7 +453,7 @@ StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx
NamespaceString::kRsOplogNamespace.ns(),
[&]() -> StatusWith<size_t> {
AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead);
- auto oplog = oplogRead.getCollection();
+ const auto& oplog = oplogRead.getCollection();
if (!oplog) {
return {ErrorCodes::NamespaceNotFound, "Your oplog doesn't exist."};
}
@@ -602,7 +601,7 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx,
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
WriteUnitOfWork wunit(opCtx);
auto tsResult = opCtx->recoveryUnit()->setTimestamp(ts);
@@ -667,7 +666,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
if (!collectionResult.isOK()) {
return Result(collectionResult.getStatus());
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
auto isForward = scanDirection == StorageInterface::ScanDirection::kForward;
auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD;
@@ -922,7 +921,7 @@ Status _updateWithQuery(OperationContext* opCtx,
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
WriteUnitOfWork wuow(opCtx);
if (!ts.isNull()) {
uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(ts));
@@ -970,7 +969,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx,
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
// We can create an UpdateRequest now that the collection's namespace has been resolved, in
// the event it was specified as a UUID.
@@ -1080,7 +1079,7 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
auto planExecutorResult = mongo::getExecutorDelete(
nullptr, collection, &parsedDelete, boost::none /* verbosity */);
@@ -1103,7 +1102,7 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
}
boost::optional<BSONObj> StorageInterfaceImpl::findOplogEntryLessThanOrEqualToTimestamp(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) {
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) {
invariant(oplog);
invariant(opCtx->lockState()->isLocked());
@@ -1134,7 +1133,7 @@ boost::optional<BSONObj> StorageInterfaceImpl::findOplogEntryLessThanOrEqualToTi
}
boost::optional<BSONObj> StorageInterfaceImpl::findOplogEntryLessThanOrEqualToTimestampRetryOnWCE(
- OperationContext* opCtx, const Collection* oplogCollection, const Timestamp& timestamp) {
+ OperationContext* opCtx, const CollectionPtr& oplogCollection, const Timestamp& timestamp) {
// Oplog reads are specially done under only MODE_IS global locks, without database or
// collection level intent locks. Therefore, reads can run concurrently with validate cmds that
// take collection MODE_X locks. Validate with {full:true} set calls WT::verify on the
@@ -1209,7 +1208,7 @@ StatusWith<StorageInterface::CollectionSize> StorageInterfaceImpl::getCollection
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
return collection->dataSize(opCtx);
}
@@ -1223,7 +1222,7 @@ StatusWith<StorageInterface::CollectionCount> StorageInterfaceImpl::getCollectio
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
return collection->numRecords(opCtx);
}
@@ -1238,7 +1237,7 @@ Status StorageInterfaceImpl::setCollectionCount(OperationContext* opCtx,
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
auto rs = collection->getRecordStore();
// We cannot fix the data size correctly, so we just get the current cached value and keep it
@@ -1257,7 +1256,7 @@ StatusWith<OptionalCollectionUUID> StorageInterfaceImpl::getCollectionUUID(
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
- auto collection = collectionResult.getValue();
+ const auto& collection = *collectionResult.getValue();
return collection->uuid();
}
@@ -1340,12 +1339,11 @@ Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
return Status::OK();
}
- const Collection* const usersCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- opCtx, AuthorizationManager::usersCollectionNamespace);
+ CollectionPtr usersCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx, AuthorizationManager::usersCollectionNamespace);
const bool hasUsers =
usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull();
- const Collection* const adminVersionCollection =
+ CollectionPtr adminVersionCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, AuthorizationManager::versionCollectionNamespace);
BSONObj authSchemaVersionDocument;
@@ -1398,7 +1396,7 @@ void StorageInterfaceImpl::waitForAllEarlierOplogWritesToBeVisible(OperationCont
if (primaryOnly &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin"))
return;
- auto oplog = oplogRead.getCollection();
+ const auto& oplog = oplogRead.getCollection();
uassert(ErrorCodes::NotYetInitialized, "The oplog does not exist", oplog);
oplog->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx);
}
diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h
index 2cddfaa4719..0e13614d443 100644
--- a/src/mongo/db/repl/storage_interface_impl.h
+++ b/src/mongo/db/repl/storage_interface_impl.h
@@ -148,10 +148,10 @@ public:
const BSONObj& filter) override;
boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestamp(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) override;
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) override;
boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestampRetryOnWCE(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) override;
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) override;
Timestamp getLatestOplogTimestamp(OperationContext* opCtx) override;
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 3efd6bf1bcb..e78f89427ab 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -573,8 +573,7 @@ TEST_F(StorageInterfaceImplTest, CreateCollectionWithIDIndexCommits) {
ASSERT_OK(loader->insertDocuments(docs.begin(), docs.end()));
ASSERT_OK(loader->commit());
- AutoGetCollectionForReadCommand autoColl(opCtx, nss);
- auto coll = autoColl.getCollection();
+ AutoGetCollectionForReadCommand coll(opCtx, nss);
ASSERT(coll);
ASSERT_EQ(coll->getRecordStore()->numRecords(opCtx), 2LL);
auto collIdxCat = coll->getIndexCatalog();
@@ -601,8 +600,7 @@ void _testDestroyUncommitedCollectionBulkLoader(
// Collection and ID index should not exist after 'loader' is destroyed.
destroyLoaderFn(std::move(loader));
- AutoGetCollectionForReadCommand autoColl(opCtx, nss);
- auto coll = autoColl.getCollection();
+ AutoGetCollectionForReadCommand coll(opCtx, nss);
// Bulk loader is used to create indexes. The collection is not dropped when the bulk loader is
// destroyed.
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index 074f520ef62..9af77afaa55 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -273,12 +273,12 @@ public:
}
boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestamp(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) override {
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) override {
return boost::none;
}
boost::optional<BSONObj> findOplogEntryLessThanOrEqualToTimestampRetryOnWCE(
- OperationContext* opCtx, const Collection* oplog, const Timestamp& timestamp) override {
+ OperationContext* opCtx, const CollectionPtr& oplog, const Timestamp& timestamp) override {
return boost::none;
}
diff --git a/src/mongo/db/repl/tenant_migration_donor_op_observer.h b/src/mongo/db/repl/tenant_migration_donor_op_observer.h
index 302ee5bfaef..d317a518498 100644
--- a/src/mongo/db/repl/tenant_migration_donor_op_observer.h
+++ b/src/mongo/db/repl/tenant_migration_donor_op_observer.h
@@ -106,7 +106,7 @@ public:
const boost::optional<OplogSlot> slot) final {}
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
index 357d38ae466..de656c59ca6 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
@@ -52,9 +52,9 @@ namespace {
* Creates the tenant migration recipients collection if it doesn't exist.
* Note: Throws WriteConflictException if the collection already exist.
*/
-const Collection* ensureTenantMigrationRecipientsCollectionExists(OperationContext* opCtx,
- Database* db,
- const NamespaceString& nss) {
+CollectionPtr ensureTenantMigrationRecipientsCollectionExists(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& nss) {
// Sanity checks.
invariant(db);
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
@@ -129,9 +129,8 @@ StatusWith<TenantMigrationRecipientDocument> getStateDoc(OperationContext* opCtx
const UUID& migrationUUID) {
// Read the most up to date data.
ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp);
- AutoGetCollectionForRead autoCollection(opCtx,
- NamespaceString::kTenantMigrationRecipientsNamespace);
- const Collection* collection = autoCollection.getCollection();
+ AutoGetCollectionForRead collection(opCtx,
+ NamespaceString::kTenantMigrationRecipientsNamespace);
if (!collection) {
return Status(ErrorCodes::NamespaceNotFound,
@@ -140,8 +139,11 @@ StatusWith<TenantMigrationRecipientDocument> getStateDoc(OperationContext* opCtx
}
BSONObj result;
- auto foundDoc = Helpers::findOne(
- opCtx, collection, BSON("_id" << migrationUUID), result, /*requireIndex=*/true);
+ auto foundDoc = Helpers::findOne(opCtx,
+ collection.getCollection(),
+ BSON("_id" << migrationUUID),
+ result,
+ /*requireIndex=*/true);
if (!foundDoc) {
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "No matching state doc found with tenant migration UUID: "
diff --git a/src/mongo/db/repl/tenant_oplog_applier_test.cpp b/src/mongo/db/repl/tenant_oplog_applier_test.cpp
index 39a07d3d0b7..15efc7052b9 100644
--- a/src/mongo/db/repl/tenant_oplog_applier_test.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier_test.cpp
@@ -578,7 +578,7 @@ TEST_F(TenantOplogApplierTest, ApplyCommand_Success) {
<< BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
- const Collection*,
+ const CollectionPtr&,
const NamespaceString& collNss,
const CollectionOptions&,
const BSONObj&) {
@@ -609,7 +609,7 @@ TEST_F(TenantOplogApplierTest, ApplyCommand_WrongNSS) {
<< BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
- const Collection*,
+ const CollectionPtr&,
const NamespaceString& collNss,
const CollectionOptions&,
const BSONObj&) { applyCmdCalled = true; };
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 64f07ebcccf..af6aca46aff 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -87,8 +87,7 @@ public:
return true;
}
- AutoGetCollectionForReadCommand autoColl(opCtx, nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
if (!collection) {
errmsg = "ns not found";
return false;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
index 0f28c3fc1bb..19b0fc0b720 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
@@ -244,8 +244,7 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
repl::UnreplicatedWritesBlock uwb(opCtx);
auto nss = VersionType::ConfigNS;
writeConflictRetry(opCtx, "removeConfigDocuments", nss.ns(), [&] {
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto coll = autoColl.getCollection();
+ AutoGetCollection coll(opCtx, nss, MODE_IX);
ASSERT_TRUE(coll);
auto cursor = coll->getCursor(opCtx);
std::vector<RecordId> recordIds;
diff --git a/src/mongo/db/s/config_server_op_observer.h b/src/mongo/db/s/config_server_op_observer.h
index f3c7b61fdd1..e45ee8d30d0 100644
--- a/src/mongo/db/s/config_server_op_observer.h
+++ b/src/mongo/db/s/config_server_op_observer.h
@@ -108,7 +108,7 @@ public:
const boost::optional<OplogSlot> slot) final{};
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 40d2853424e..85b91e96903 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -586,7 +586,7 @@ void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackReques
}
void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder) {
ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
@@ -597,7 +597,7 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
_jumboChunkCloneState->clonerExec = std::move(exec);
} else {
_jumboChunkCloneState->clonerExec->reattachToOperationContext(opCtx);
- _jumboChunkCloneState->clonerExec->restoreState();
+ _jumboChunkCloneState->clonerExec->restoreState(&collection);
}
PlanExecutor::ExecState execState;
@@ -655,7 +655,7 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
}
void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneLocs(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder) {
ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
@@ -705,7 +705,7 @@ uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
}
Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss(), MODE_IS));
@@ -802,7 +802,7 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(OperationContext* opCtx,
- const Collection* const collection) {
+ const CollectionPtr& collection) {
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
const IndexDescriptor* idx =
@@ -833,15 +833,13 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(OperationContext* opCtx,
}
Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) {
- AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
-
- const Collection* const collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, _args.getNss(), MODE_IS);
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Collection " << _args.getNss().ns() << " does not exist."};
}
- auto swExec = _getIndexScanExecutor(opCtx, collection);
+ auto swExec = _getIndexScanExecutor(opCtx, collection.getCollection());
if (!swExec.isOK()) {
return swExec.getStatus();
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index a5c1bdab2c1..181bc15b6ce 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -52,6 +52,7 @@ namespace mongo {
class BSONArrayBuilder;
class BSONObjBuilder;
class Collection;
+class CollectionPtr;
class Database;
class RecordId;
@@ -159,7 +160,7 @@ public:
* NOTE: Must be called with the collection lock held in at least IS mode.
*/
Status nextCloneBatch(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder);
/**
@@ -221,14 +222,14 @@ private:
StatusWith<BSONObj> _callRecipient(const BSONObj& cmdObj);
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getIndexScanExecutor(
- OperationContext* opCtx, const Collection* const collection);
+ OperationContext* opCtx, const CollectionPtr& collection);
void _nextCloneBatchFromIndexScan(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder);
void _nextCloneBatchFromCloneLocs(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder);
/**
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index 017851f43e9..2f255b68fc2 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -108,7 +108,7 @@ public:
return _autoColl->getDb();
}
- const Collection* getColl() const {
+ const CollectionPtr& getColl() const {
invariant(_autoColl);
return _autoColl->getCollection();
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index e6e4de4ac75..fe1952a8ffc 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -717,7 +717,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(
// missing (auto-heal indexes).
// Checks that the collection's UUID matches the donor's.
- auto checkUUIDsMatch = [&](const Collection* collection) {
+ auto checkUUIDsMatch = [&](const CollectionPtr& collection) {
uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Unable to create collection " << nss.ns()
<< " because the node is not primary",
@@ -737,7 +737,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(
// Gets the missing indexes and checks if the collection is empty (auto-healing is
// possible).
- auto checkEmptyOrGetMissingIndexesFromDonor = [&](const Collection* collection) {
+ auto checkEmptyOrGetMissingIndexesFromDonor = [&](const CollectionPtr& collection) {
auto indexCatalog = collection->getIndexCatalog();
auto indexSpecs = indexCatalog->removeExistingIndexesNoChecks(
opCtx, collectionOptionsAndIndexes.indexSpecs);
@@ -754,12 +754,12 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(
};
{
- AutoGetCollection autoGetCollection(opCtx, nss, MODE_IS);
+ AutoGetCollection collection(opCtx, nss, MODE_IS);
- auto collection = autoGetCollection.getCollection();
if (collection) {
- checkUUIDsMatch(collection);
- auto indexSpecs = checkEmptyOrGetMissingIndexesFromDonor(collection);
+ checkUUIDsMatch(collection.getCollection());
+ auto indexSpecs =
+ checkEmptyOrGetMissingIndexesFromDonor(collection.getCollection());
if (indexSpecs.empty()) {
return;
}
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index 7d289fb979e..1e5011a6472 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -537,7 +537,7 @@ void submitOrphanRangesForCleanup(OperationContext* opCtx) {
if (dbName == NamespaceString::kLocalDb)
continue;
- for (auto collIt = catalog.begin(dbName); collIt != catalog.end(); ++collIt) {
+ for (auto collIt = catalog.begin(opCtx, dbName); collIt != catalog.end(opCtx); ++collIt) {
auto uuid = collIt.uuid().get();
auto nss = catalog.lookupNSSByUUID(opCtx, uuid).get();
LOGV2_DEBUG(22034,
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 5f904e488ad..ec61ce0d47c 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -83,7 +83,7 @@ MONGO_FAIL_POINT_DEFINE(throwInternalErrorInDeleteRange);
* enqueued for deletion.
*/
bool collectionUuidHasChanged(const NamespaceString& nss,
- const Collection* currentCollection,
+ const CollectionPtr& currentCollection,
UUID expectedCollectionUuid) {
if (!currentCollection) {
@@ -121,11 +121,11 @@ bool collectionUuidHasChanged(const NamespaceString& nss,
* the range failed.
*/
StatusWith<int> deleteNextBatch(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
BSONObj const& keyPattern,
ChunkRange const& range,
int numDocsToRemovePerBatch) {
- invariant(collection != nullptr);
+ invariant(collection);
auto const& nss = collection->ns();
@@ -303,18 +303,21 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
ensureRangeDeletionTaskStillExists(opCtx, *migrationId);
}
- AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto* const collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_IX);
// Ensure the collection exists and has not been dropped or dropped and
// recreated.
- uassert(ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist,
- "Collection has been dropped since enqueuing this range "
- "deletion task. No need to delete documents.",
- !collectionUuidHasChanged(nss, collection, collectionUuid));
-
- auto numDeleted = uassertStatusOK(deleteNextBatch(
- opCtx, collection, keyPattern, range, numDocsToRemovePerBatch));
+ uassert(
+ ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist,
+ "Collection has been dropped since enqueuing this range "
+ "deletion task. No need to delete documents.",
+ !collectionUuidHasChanged(nss, collection.getCollection(), collectionUuid));
+
+ auto numDeleted = uassertStatusOK(deleteNextBatch(opCtx,
+ collection.getCollection(),
+ keyPattern,
+ range,
+ numDocsToRemovePerBatch));
LOGV2_DEBUG(
23769,
diff --git a/src/mongo/db/s/resharding/resharding_op_observer.h b/src/mongo/db/s/resharding/resharding_op_observer.h
index 0a6cfc3ad1f..7b5eb7108a6 100644
--- a/src/mongo/db/s/resharding/resharding_op_observer.h
+++ b/src/mongo/db/s/resharding/resharding_op_observer.h
@@ -108,7 +108,7 @@ public:
const boost::optional<OplogSlot> slot) override {}
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/s/resharding_util.cpp b/src/mongo/db/s/resharding_util.cpp
index 5fc47faee38..849e99535d6 100644
--- a/src/mongo/db/s/resharding_util.cpp
+++ b/src/mongo/db/s/resharding_util.cpp
@@ -363,7 +363,7 @@ void createSlimOplogView(OperationContext* opCtx, Database* db) {
{
// Create 'system.views' in a separate WUOW if it does not exist.
WriteUnitOfWork wuow(opCtx);
- const Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ CollectionPtr coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, NamespaceString(db->getSystemViewsName()));
if (!coll) {
coll = db->createCollection(opCtx, NamespaceString(db->getSystemViewsName()));
diff --git a/src/mongo/db/s/shard_local.cpp b/src/mongo/db/s/shard_local.cpp
index 9b432100426..8b293435bfb 100644
--- a/src/mongo/db/s/shard_local.cpp
+++ b/src/mongo/db/s/shard_local.cpp
@@ -127,9 +127,11 @@ Status ShardLocal::createIndexOnConfig(OperationContext* opCtx,
invariant(ns.db() == "config" || ns.db() == "admin");
try {
+ // TODO SERVER-50983: Create abstraction for creating collection when using
+ // AutoGetCollection
AutoGetOrCreateDb autoDb(opCtx, ns.db(), MODE_IX);
AutoGetCollection autoColl(opCtx, ns, MODE_X);
- auto collection = autoColl.getCollection();
+ const Collection* collection = autoColl.getCollection().get();
if (!collection) {
CollectionOptions options;
options.uuid = UUID::gen();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 3cef6d2aebd..9075740f1a8 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -453,7 +453,7 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx,
}
void ShardServerOpObserver::onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/s/shard_server_op_observer.h b/src/mongo/db/s/shard_server_op_observer.h
index d608611570f..822cc291943 100644
--- a/src/mongo/db/s/shard_server_op_observer.h
+++ b/src/mongo/db/s/shard_server_op_observer.h
@@ -106,7 +106,7 @@ public:
const boost::optional<OplogSlot> slot) final{};
void onCreateCollection(OperationContext* opCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex,
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index a311110cc98..5b9e4c5dd7d 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -189,7 +189,7 @@ void checkCollation(OperationContext* opCtx, const ShardsvrShardCollectionReques
AutoGetCollectionViewMode::kViewsForbidden);
const auto actualCollator = [&]() -> const CollatorInterface* {
- const auto* const coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
if (coll) {
uassert(
ErrorCodes::InvalidOptions, "can't shard a capped collection", !coll->isCapped());
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index b46885c3f1b..4efdc2e8656 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -58,7 +58,7 @@ namespace {
const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOnly};
bool checkIfSingleDoc(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
const IndexDescriptor* idx,
const ChunkType* chunk) {
KeyPattern kp(idx->keyPattern());
@@ -208,9 +208,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
}
}
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
-
- const Collection* const collection = autoColl.getCollection();
+ AutoGetCollection collection(opCtx, nss, MODE_IS);
if (!collection) {
LOGV2_WARNING(
23778,
@@ -237,10 +235,10 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
KeyPattern shardKeyPattern(keyPatternObj);
if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 &&
- checkIfSingleDoc(opCtx, collection, idx, &backChunk)) {
+ checkIfSingleDoc(opCtx, collection.getCollection(), idx, &backChunk)) {
return boost::optional<ChunkRange>(ChunkRange(backChunk.getMin(), backChunk.getMax()));
} else if (shardKeyPattern.globalMin().woCompare(frontChunk.getMin()) == 0 &&
- checkIfSingleDoc(opCtx, collection, idx, &frontChunk)) {
+ checkIfSingleDoc(opCtx, collection.getCollection(), idx, &frontChunk)) {
return boost::optional<ChunkRange>(ChunkRange(frontChunk.getMin(), frontChunk.getMax()));
}
diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp
index 67daf15550f..7879027e91c 100644
--- a/src/mongo/db/s/split_vector.cpp
+++ b/src/mongo/db/s/split_vector.cpp
@@ -76,9 +76,8 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
}
{
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
+ AutoGetCollection collection(opCtx, nss, MODE_IS);
- const Collection* const collection = autoColl.getCollection();
uassert(ErrorCodes::NamespaceNotFound, "ns not found", collection);
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore,
@@ -164,7 +163,7 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
long long numChunks = 0;
auto exec = InternalPlanner::indexScan(opCtx,
- collection,
+ collection.getCollection(),
idx,
minKey,
maxKey,
@@ -182,7 +181,7 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
BSONObj maxKeyInChunk;
{
auto exec = InternalPlanner::indexScan(opCtx,
- collection,
+ collection.getCollection(),
idx,
maxKey,
minKey,
@@ -299,7 +298,7 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
"keyCount"_attr = keyCount);
exec = InternalPlanner::indexScan(opCtx,
- collection,
+ collection.getCollection(),
idx,
minKey,
maxKey,
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index 7ae2190fe77..a8a8ff9d7d6 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -270,9 +270,8 @@ void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) {
}
boost::optional<UUID> MongoDSessionCatalog::getTransactionTableUUID(OperationContext* opCtx) {
- AutoGetCollection autoColl(opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IS);
+ AutoGetCollection coll(opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IS);
- const auto coll = autoColl.getCollection();
if (!coll) {
return boost::none;
}
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 7c96c3919f3..fbaaef9637f 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -105,7 +105,7 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
createCollection(opCtx, fcvNss.db().toString(), BSON("create" << fcvNss.coll())));
}
- const Collection* fcvColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ const CollectionPtr& fcvColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, NamespaceString::kServerConfigurationNamespace);
invariant(fcvColl);
@@ -323,7 +323,7 @@ bool hasReplSetConfigDoc(OperationContext* opCtx) {
void assertCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
- const Collection* oplogCollection =
+ const CollectionPtr& oplogCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (oplogCollection && !oplogCollection->isCapped()) {
LOGV2_FATAL_NOTRACE(
@@ -436,7 +436,7 @@ void setReplSetMemberInStandaloneMode(OperationContext* opCtx) {
}
invariant(opCtx->lockState()->isW());
- const Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ CollectionPtr collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, NamespaceString::kSystemReplSetNamespace);
if (collection && !collection->isEmpty(opCtx)) {
setReplSetMemberInStandaloneMode(opCtx->getServiceContext(), true);
diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp
index b942daf3dff..4e3eee4e846 100644
--- a/src/mongo/db/stats/storage_stats.cpp
+++ b/src/mongo/db/stats/storage_stats.cpp
@@ -70,7 +70,7 @@ Status appendCollectionStorageStats(OperationContext* opCtx,
return Status::OK();
}
- const Collection* collection = autoColl->getCollection(); // Will be set if present
+ const auto& collection = autoColl->getCollection(); // Will be set if present
if (!autoColl->getDb() || !collection) {
result->appendNumber("size", 0);
result->appendNumber("count", 0);
@@ -145,13 +145,12 @@ Status appendCollectionStorageStats(OperationContext* opCtx,
Status appendCollectionRecordCount(OperationContext* opCtx,
const NamespaceString& nss,
BSONObjBuilder* result) {
- AutoGetCollectionForReadCommand ctx(opCtx, nss);
- if (!ctx.getDb()) {
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
+ if (!collection.getDb()) {
return {ErrorCodes::BadValue,
str::stream() << "Database [" << nss.db().toString() << "] not found."};
}
- const Collection* collection = ctx.getCollection();
if (!collection) {
return {ErrorCodes::BadValue,
str::stream() << "Collection [" << nss.toString() << "] not found."};
diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
index fefa3744e6b..53b359ec0d4 100644
--- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
+++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
@@ -69,7 +69,7 @@ bool OplogCapMaintainerThread::_deleteExcessDocuments() {
// interruptions such as restartCatalog. PBWM, database lock or collection lock is not
// needed. This improves concurrency if oplog truncation takes long time.
AutoGetOplog oplogWrite(opCtx.get(), OplogAccessMode::kWrite);
- auto oplog = oplogWrite.getCollection();
+ const auto& oplog = oplogWrite.getCollection();
if (!oplog) {
LOGV2_DEBUG(4562600, 2, "oplog collection does not exist");
return false;
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index a6a88bc8403..ffcf0106e30 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -43,6 +43,7 @@ namespace mongo {
class CappedCallback;
class Collection;
+class CollectionPtr;
class MAdvise;
class OperationContext;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index c8c87173421..f90d2ea6415 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -1192,7 +1192,7 @@ void StorageEngineImpl::TimestampMonitor::removeListener(TimestampListener* list
int64_t StorageEngineImpl::sizeOnDiskForDb(OperationContext* opCtx, StringData dbName) {
int64_t size = 0;
- catalog::forEachCollectionFromDb(opCtx, dbName, MODE_IS, [&](const Collection* collection) {
+ catalog::forEachCollectionFromDb(opCtx, dbName, MODE_IS, [&](const CollectionPtr& collection) {
size += collection->getRecordStore()->storageSize(opCtx);
std::vector<std::string> indexNames;
diff --git a/src/mongo/db/storage/storage_engine_test_fixture.h b/src/mongo/db/storage/storage_engine_test_fixture.h
index cada701556d..c6ad8880e77 100644
--- a/src/mongo/db/storage/storage_engine_test_fixture.h
+++ b/src/mongo/db/storage/storage_engine_test_fixture.h
@@ -157,7 +157,7 @@ public:
}
BSONObj spec = builder.append("name", key).append("v", 2).done();
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNs);
auto descriptor =
std::make_unique<IndexDescriptor>(collection, IndexNames::findPluginName(spec), spec);
@@ -171,13 +171,13 @@ public:
}
void indexBuildSuccess(OperationContext* opCtx, NamespaceString collNs, std::string key) {
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNs);
DurableCatalog::get(opCtx)->indexBuildSuccess(opCtx, collection->getCatalogId(), key);
}
Status removeEntry(OperationContext* opCtx, StringData collNs, DurableCatalog* catalog) {
- const Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ CollectionPtr collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
opCtx, NamespaceString(collNs));
return dynamic_cast<DurableCatalogImpl*>(catalog)->_removeEntry(opCtx,
collection->getCatalogId());
diff --git a/src/mongo/db/storage/wiredtiger/oplog_stones_server_status_section.cpp b/src/mongo/db/storage/wiredtiger/oplog_stones_server_status_section.cpp
index 71d2421f5e5..e92d6b169e2 100644
--- a/src/mongo/db/storage/wiredtiger/oplog_stones_server_status_section.cpp
+++ b/src/mongo/db/storage/wiredtiger/oplog_stones_server_status_section.cpp
@@ -55,7 +55,7 @@ public:
return builder.obj();
}
AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead);
- auto oplog = oplogRead.getCollection();
+ const auto& oplog = oplogRead.getCollection();
if (oplog) {
const auto localDb =
DatabaseHolder::get(opCtx)->getDb(opCtx, NamespaceString::kLocalDb);
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index 8bd0cfbd422..09521feda5a 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -199,7 +199,7 @@ Status verifySystemIndexes(OperationContext* opCtx) {
return Status::OK();
}
-void createSystemIndexes(OperationContext* opCtx, const Collection* collection) {
+void createSystemIndexes(OperationContext* opCtx, const CollectionPtr& collection) {
invariant(collection);
const NamespaceString& ns = collection->ns();
BSONObj indexSpec;
diff --git a/src/mongo/db/system_index.h b/src/mongo/db/system_index.h
index 02eb4d26b4a..ce341e8b3b9 100644
--- a/src/mongo/db/system_index.h
+++ b/src/mongo/db/system_index.h
@@ -32,6 +32,7 @@
namespace mongo {
class Collection;
+class CollectionPtr;
class OperationContext;
class Status;
@@ -39,7 +40,7 @@ class Status;
* Creates the appropriate indexes on _new_ system collections for authentication,
* authorization, and sessions.
*/
-void createSystemIndexes(OperationContext* opCtx, const Collection* collection);
+void createSystemIndexes(OperationContext* opCtx, const CollectionPtr& collection);
/**
* Verifies that only the appropriate indexes to support authentication, authorization, and
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index de5874ae3a6..5284c8c2300 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -206,18 +206,18 @@ void updateSessionEntry(OperationContext* opCtx, const UpdateRequest& updateRequ
// Current code only supports replacement update.
dassert(UpdateDriver::isDocReplacement(updateRequest.getUpdateModification()));
- AutoGetCollection autoColl(opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IX);
+ AutoGetCollection collection(
+ opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IX);
uassert(40527,
str::stream() << "Unable to persist transaction state because the session transaction "
"collection is missing. This indicates that the "
<< NamespaceString::kSessionTransactionsTableNamespace.ns()
<< " collection has been manually deleted.",
- autoColl.getCollection());
+ collection.getCollection());
WriteUnitOfWork wuow(opCtx);
- auto collection = autoColl.getCollection();
auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx);
uassert(40672,
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index d27b4384a59..25b0c2a28a6 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -111,7 +111,7 @@ void DurableViewCatalogImpl::_iterate(OperationContext* opCtx,
ViewCatalogLookupBehavior lookupBehavior) {
invariant(opCtx->lockState()->isCollectionLockedForMode(_db->getSystemViewsName(), MODE_IS));
- const Collection* systemViews =
+ const CollectionPtr& systemViews =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _db->getSystemViewsName());
if (!systemViews) {
return;
@@ -186,7 +186,7 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
NamespaceString systemViewsNs(_db->getSystemViewsName());
dassert(opCtx->lockState()->isCollectionLockedForMode(systemViewsNs, MODE_X));
- const Collection* systemViews =
+ const CollectionPtr& systemViews =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, systemViewsNs);
invariant(systemViews);
@@ -218,7 +218,7 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri
dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_IX));
dassert(opCtx->lockState()->isCollectionLockedForMode(name, MODE_IX));
- const Collection* systemViews =
+ const CollectionPtr& systemViews =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _db->getSystemViewsName());
dassert(opCtx->lockState()->isCollectionLockedForMode(systemViews->ns(), MODE_X));
diff --git a/src/mongo/db/yieldable.h b/src/mongo/db/yieldable.h
new file mode 100644
index 00000000000..d420789933d
--- /dev/null
+++ b/src/mongo/db/yieldable.h
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+namespace mongo {
+class Yieldable {
+public:
+ virtual ~Yieldable() {}
+ virtual void yield() const = 0;
+ virtual void restore() const = 0;
+};
+} // namespace mongo
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index a8a42782b20..f17326bfbf3 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -119,7 +119,7 @@ public:
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
- const Collection* collection = ctx.getCollection();
+ const auto& collection = ctx.getCollection();
ASSERT(collection);
const IndexCatalog* indexCatalog = collection->getIndexCatalog();
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 5e6a74f4d11..836ca016f26 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -117,7 +117,7 @@ protected:
OldClientContext _context;
Database* _database;
- const Collection* _collection;
+ CollectionPtr _collection;
DBDirectClient _client;
};
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
index 8a1e7065573..310b8235796 100644
--- a/src/mongo/dbtests/dbtests.h
+++ b/src/mongo/dbtests/dbtests.h
@@ -79,7 +79,7 @@ public:
return _clientContext->db();
}
- const Collection* getCollection() const {
+ CollectionPtr getCollection() const {
return CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_opCtx, _nss);
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 83ba1ca2541..a8751f1f14e 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -56,7 +56,7 @@ static const NamespaceString _nss = NamespaceString(_ns);
/**
* Test fixture for a write locked test using collection _ns. Includes functionality to
* partially construct a new IndexDetails in a manner that supports proper cleanup in
- * dropCollection().
+ * dropcollection().get().
*/
class IndexBuildBase {
public:
@@ -270,7 +270,7 @@ public:
boost::optional<Lock::CollectionLock> collLk;
collLk.emplace(_opCtx, _nss, LockMode::MODE_IX);
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!collection()->getIndexCatalog()->findIndexByName(_opCtx, "a_1"));
+ ASSERT(!collection().get()->getIndexCatalog()->findIndexByName(_opCtx, "a_1"));
}
};
@@ -320,7 +320,7 @@ public:
collLk.emplace(_opCtx, _nss, LockMode::MODE_IX);
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!collection()->getIndexCatalog()->findIndexByName(_opCtx, "_id_"));
+ ASSERT(!collection().get()->getIndexCatalog()->findIndexByName(_opCtx, "_id_"));
}
}; // namespace IndexUpdateTests
@@ -547,7 +547,7 @@ protected:
class IndexCatatalogFixIndexKey : public IndexBuildBase {
public:
void run() {
- auto indexCatalog = collection()->getIndexCatalog();
+ auto indexCatalog = collection().get()->getIndexCatalog();
ASSERT_BSONOBJ_EQ(BSON("x" << 1), indexCatalog->fixIndexKey(BSON("x" << 1)));
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index b9e47a1bff0..64096842209 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -77,11 +77,11 @@ public:
}
}
- Status createIndex(const Collection* collection, BSONObj indexSpec) {
+ Status createIndex(const CollectionPtr& collection, BSONObj indexSpec) {
return dbtests::createIndexFromSpec(_opCtx.get(), collection->ns().ns(), indexSpec);
}
- void assertMultikeyPaths(const Collection* collection,
+ void assertMultikeyPaths(const CollectionPtr& collection,
BSONObj keyPattern,
const MultikeyPaths& expectedMultikeyPaths) {
const IndexCatalog* indexCatalog = collection->getIndexCatalog();
@@ -124,8 +124,7 @@ private:
};
TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
{
@@ -139,18 +138,17 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
}
BSONObj keyPattern = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPattern << "v" << static_cast<int>(kIndexVersion)))
.transitional_ignore();
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {MultikeyComponents{}, {0U}});
}
TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
{
@@ -168,22 +166,21 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
}
BSONObj keyPattern = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPattern << "v" << static_cast<int>(kIndexVersion)))
.transitional_ignore();
- assertMultikeyPaths(collection, keyPattern, {{0U}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {{0U}, {0U}});
}
TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
BSONObj keyPattern = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPattern << "v" << static_cast<int>(kIndexVersion)))
@@ -199,7 +196,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
wuow.commit();
}
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {MultikeyComponents{}, {0U}});
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -211,16 +208,15 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
wuow.commit();
}
- assertMultikeyPaths(collection, keyPattern, {{0U}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {{0U}, {0U}});
}
TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
BSONObj keyPattern = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPattern << "v" << static_cast<int>(kIndexVersion)))
@@ -234,7 +230,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
wuow.commit();
}
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, MultikeyComponents{}});
+ assertMultikeyPaths(
+ collection.getCollection(), keyPattern, {MultikeyComponents{}, MultikeyComponents{}});
{
auto cursor = collection->getCursor(_opCtx.get());
@@ -259,16 +256,15 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
}
}
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {MultikeyComponents{}, {0U}});
}
TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
BSONObj keyPattern = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPattern << "v" << static_cast<int>(kIndexVersion)))
@@ -284,7 +280,7 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
wuow.commit();
}
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {MultikeyComponents{}, {0U}});
{
auto cursor = collection->getCursor(_opCtx.get());
@@ -299,23 +295,22 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
}
}
- assertMultikeyPaths(collection, keyPattern, {MultikeyComponents{}, {0U}});
+ assertMultikeyPaths(collection.getCollection(), keyPattern, {MultikeyComponents{}, {0U}});
}
TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
- AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_X);
- const Collection* collection = autoColl.getCollection();
+ AutoGetCollection collection(_opCtx.get(), _nss, MODE_X);
invariant(collection);
BSONObj keyPatternAB = BSON("a" << 1 << "b" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_b_1"
<< "key" << keyPatternAB << "v" << static_cast<int>(kIndexVersion)))
.transitional_ignore();
BSONObj keyPatternAC = BSON("a" << 1 << "c" << 1);
- createIndex(collection,
+ createIndex(collection.getCollection(),
BSON("name"
<< "a_1_c_1"
<< "key" << keyPatternAC << "v" << static_cast<int>(kIndexVersion)))
@@ -331,8 +326,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
wuow.commit();
}
- assertMultikeyPaths(collection, keyPatternAB, {{0U}, MultikeyComponents{}});
- assertMultikeyPaths(collection, keyPatternAC, {{0U}, MultikeyComponents{}});
+ assertMultikeyPaths(collection.getCollection(), keyPatternAB, {{0U}, MultikeyComponents{}});
+ assertMultikeyPaths(collection.getCollection(), keyPatternAC, {{0U}, MultikeyComponents{}});
}
} // namespace
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 2352931aa9e..a083e3e08ac 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -59,7 +59,7 @@ protected:
static NamespaceString nss() {
return NamespaceString("unittests.pdfiletests.Insert");
}
- const Collection* collection() {
+ CollectionPtr collection() {
return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
}
@@ -75,7 +75,7 @@ public:
WriteUnitOfWork wunit(&_opCtx);
BSONObj x = BSON("x" << 1);
ASSERT(x["_id"].type() == 0);
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
coll = _context.db()->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 56ba389423f..8a5369ba36c 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -117,7 +117,7 @@ public:
return 50;
}
- const Collection* collection() const {
+ CollectionPtr collection() const {
return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
}
@@ -162,7 +162,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYiel
_client.remove(nss.ns(), BSON("foo" << 10));
_client.remove(nss.ns(), BSON("foo" << 11));
- exec->restoreState();
+ exec->restoreState(nullptr);
// Make sure that the PlanExecutor moved forward over the deleted data. We don't see foo==10 or
// foo==11.
@@ -189,7 +189,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs
// Drop a collection that's not ours.
_client.dropCollection("unittests.someboguscollection");
- exec->restoreState();
+ exec->restoreState(nullptr);
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr));
ASSERT_EQUALS(10, obj["foo"].numberInt());
@@ -198,7 +198,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs
_client.dropCollection(nss.ns());
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDropped) {
@@ -215,7 +215,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro
exec->saveState();
_client.dropIndexes(nss.ns());
- exec->restoreState();
+ exec->restoreState(nullptr);
// Read the rest of the collection.
for (int i = 10; i < N(); ++i) {
@@ -238,7 +238,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp
exec->saveState();
_client.dropIndex(nss.ns(), BSON("foo" << 1));
- exec->restoreState();
+ exec->restoreState(nullptr);
// Read the rest of the collection.
for (int i = 10; i < N(); ++i) {
@@ -268,7 +268,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenAllIndexesDropped) {
_client.dropIndexes(nss.ns());
// Restoring the executor should throw.
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDropped) {
@@ -289,7 +289,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDr
_client.dropIndex(nss.ns(), keyPattern);
// Restoring the executor should throw.
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsDropped) {
@@ -311,7 +311,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsD
// state.
exec->saveState();
_client.dropIndex(nss.ns(), keyPatternBar);
- exec->restoreState();
+ exec->restoreState(nullptr);
// Scan the rest of the index.
for (int i = 10; i < N(); ++i) {
@@ -337,7 +337,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe
_ctx.reset();
_client.dropDatabase("somesillydb");
_ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns()));
- exec->restoreState();
+ exec->restoreState(nullptr);
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr));
ASSERT_EQUALS(10, obj["foo"].numberInt());
@@ -348,7 +348,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe
_ctx.reset();
_client.dropDatabase("unittests");
_ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns()));
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
// TODO SERVER-31695: Allow PlanExecutors to remain valid after collection rename.
@@ -371,7 +371,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas
<< "dropTarget" << true),
info));
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
// TODO SERVER-31695: Allow PlanExecutors to remain valid after collection rename.
@@ -397,7 +397,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase)
<< "dropTarget" << true),
info));
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllIndices) {
@@ -417,7 +417,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllInd
// expected error code.
exec->saveState();
truncateCollection();
- ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(exec->restoreState(nullptr), DBException, ErrorCodes::QueryPlanKilled);
}
TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) {
@@ -434,7 +434,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate)
// successfully.
exec->saveState();
truncateCollection();
- exec->restoreState();
+ exec->restoreState(nullptr);
// Since all documents in the collection have been deleted, the PlanExecutor should issue EOF.
ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, nullptr));
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index ddd7a9c51cf..6fad59d6966 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -114,11 +114,10 @@ public:
* Does NOT take ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
*/
const QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_opCtx, collection, cq, &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection.getCollection(), cq, &plannerParams);
// Plan.
auto statusWithSolutions = QueryPlanner::plan(*cq, plannerParams);
@@ -128,12 +127,12 @@ public:
ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
// Fill out the MPR.
- _mps.reset(new MultiPlanStage(_expCtx.get(), collection, cq));
+ _mps.reset(new MultiPlanStage(_expCtx.get(), collection.getCollection(), cq));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
auto&& root = stage_builder::buildClassicExecutableTree(
- &_opCtx, collection, *cq, *solutions[i], ws.get());
+ &_opCtx, collection.getCollection(), *cq, *solutions[i], ws.get());
_mps->addPlan(std::move(solutions[i]), std::move(root), ws.get());
}
// This is what sets a backup plan, should we test for it.
@@ -245,11 +244,12 @@ public:
"{ixscan: {filter: null, pattern: {d:1}}}}}",
soln->root()));
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
StatusWith<std::unique_ptr<PlanCacheEntry>> planCacheEntryWithStatus =
- CollectionQueryInfo::get(collection).getPlanCache()->getEntry(*(cq.get()));
+ CollectionQueryInfo::get(collection.getCollection())
+ .getPlanCache()
+ ->getEntry(*(cq.get()));
ASSERT_OK(planCacheEntryWithStatus.getStatus());
// We assert that there was only one plan scored, implying that there was only one
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index d0bc883c9d5..491755db37e 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -97,7 +97,7 @@ public:
* capable of executing a simple collection scan.
*/
unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeCollScanExec(
- const Collection* coll,
+ const CollectionPtr& coll,
BSONObj& filterObj,
PlanYieldPolicy::YieldPolicy yieldPolicy = PlanYieldPolicy::YieldPolicy::YIELD_MANUAL,
TailableModeEnum tailableMode = TailableModeEnum::kNormal) {
@@ -148,7 +148,7 @@ public:
ixparams.bounds.endKey = BSON("" << end);
ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
@@ -183,7 +183,7 @@ protected:
private:
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- const Collection* collection =
+ CollectionPtr collection =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
std::vector<const IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
@@ -206,7 +206,7 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) {
BSONObj indexSpec = BSON("a" << 1);
addIndex(indexSpec);
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
// Create the aggregation pipeline.
std::vector<BSONObj> rawPipeline = {fromjson("{$match: {a: {$gte: 7, $lte: 10}}}")};
@@ -242,7 +242,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfExceedsTimeLimitDuringYield) {
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
auto exec = makeCollScanExec(coll, filterObj, PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT);
BSONObj resultObj;
@@ -259,7 +259,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwa
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
auto exec = makeCollScanExec(coll,
filterObj,
PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT,
@@ -279,7 +279,7 @@ TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailabl
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
auto exec = makeCollScanExec(coll,
filterObj,
PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT,
@@ -299,7 +299,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYield) {
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
auto exec = makeCollScanExec(coll, filterObj, PlanYieldPolicy::YieldPolicy::ALWAYS_MARK_KILLED);
BSONObj resultObj;
@@ -364,7 +364,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotControl) {
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
auto exec = makeCollScanExec(coll, filterObj);
BSONObj objOut;
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index d08909af1ce..4a1bfa748ce 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -73,7 +73,7 @@ public:
ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
- const IndexDescriptor* getIndex(const BSONObj& obj, const Collection* coll) {
+ const IndexDescriptor* getIndex(const BSONObj& obj, const CollectionPtr& coll) {
std::vector<const IndexDescriptor*> indexes;
coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
if (indexes.empty()) {
@@ -92,7 +92,7 @@ public:
return params;
}
- void getRecordIds(set<RecordId>* out, const Collection* coll) {
+ void getRecordIds(set<RecordId>* out, const CollectionPtr& coll) {
auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -178,7 +178,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -264,7 +264,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -345,7 +345,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -389,7 +389,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -437,7 +437,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -483,7 +483,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -536,7 +536,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -589,7 +589,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -645,7 +645,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -693,7 +693,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -746,7 +746,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -796,7 +796,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -918,7 +918,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1026,7 +1026,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1080,7 +1080,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1119,7 +1119,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1162,7 +1162,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1223,7 +1223,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -1276,7 +1276,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll = ctx.getCollection();
+ CollectionPtr coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index cc62d39971b..8bf13d8f2c6 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -81,7 +81,7 @@ public:
addIndex(BSON("b" << 1));
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
ASSERT(collection);
// Add data.
@@ -111,7 +111,7 @@ public:
wuow.commit();
}
- void insertDocument(const Collection* collection, BSONObj obj) {
+ void insertDocument(const CollectionPtr& collection, BSONObj obj) {
WriteUnitOfWork wuow(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
@@ -142,7 +142,7 @@ public:
return numResults;
}
- void forceReplanning(const Collection* collection, CanonicalQuery* cq) {
+ void forceReplanning(const CollectionPtr& collection, CanonicalQuery* cq) {
// Get planner params.
QueryPlannerParams plannerParams;
fillOutPlannerParams(&_opCtx, collection, cq, &plannerParams);
@@ -183,8 +183,7 @@ protected:
* not create a new cache entry.
*/
TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
@@ -195,13 +194,13 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = CollectionQueryInfo::get(collection).getPlanCache();
+ PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection.getCollection(), cq.get(), &plannerParams);
// Mock stage will return a failure during the cached plan trial period.
auto mockChild = std::make_unique<MockStage>(_expCtx.get(), &_ws);
@@ -211,7 +210,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
CachedPlanStage cachedPlanStage(_expCtx.get(),
- collection,
+ collection.getCollection(),
&_ws,
cq.get(),
plannerParams,
@@ -234,8 +233,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
* query to be replanned. Also verify that the replanning results in a new plan cache entry.
*/
TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
@@ -246,13 +244,13 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = CollectionQueryInfo::get(collection).getPlanCache();
+ PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection.getCollection(), cq.get(), &plannerParams);
// Set up queued data stage to take a long time before returning EOF. Should be long
// enough to trigger a replan.
@@ -265,7 +263,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
}
CachedPlanStage cachedPlanStage(_expCtx.get(),
- collection,
+ collection.getCollection(),
&_ws,
cq.get(),
plannerParams,
@@ -287,8 +285,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
* Test the way cache entries are added (either "active" or "inactive") to the plan cache.
*/
TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
ASSERT(collection);
// Never run - just used as a key for the cache's get() functions, since all of the other
@@ -301,13 +298,13 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
// We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = CollectionQueryInfo::get(collection).getPlanCache();
+ PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
// Run the CachedPlanStage with a long-running child plan. Replanning should be
// triggered and an inactive entry will be added.
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
// Check for an inactive cache entry.
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
@@ -324,7 +321,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
// longer).
auto someResultsCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 1}, b: {$gte: 0}}"));
- forceReplanning(collection, someResultsCq.get());
+ forceReplanning(collection.getCollection(), someResultsCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
// The works on the cache entry should have doubled.
@@ -335,7 +332,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
// Run another query which takes less time, and be sure an active entry is created.
auto fewResultsCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 6}, b: {$gte: 0}}"));
- forceReplanning(collection, fewResultsCq.get());
+ forceReplanning(collection.getCollection(), fewResultsCq.get());
// Now there should be an active cache entry.
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
@@ -346,8 +343,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
ASSERT(collection);
// Never run - just used as a key for the cache's get() functions, since all of the other
@@ -360,19 +356,19 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
// We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = CollectionQueryInfo::get(collection).getPlanCache();
+ PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
// Run the CachedPlanStage with a long-running child plan. Replanning should be
// triggered and an inactive entry will be added.
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
// Check for an inactive cache entry.
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
// Run the plan again, to create an active entry.
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
// The works should be 1 for the entry since the query we ran should not have any results.
ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive);
@@ -387,7 +383,7 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
// value doubled from 1 to 2.
auto highWorksCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}"));
- forceReplanning(collection, highWorksCq.get());
+ forceReplanning(collection.getCollection(), highWorksCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 2U);
@@ -395,7 +391,7 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
// planner will choose a plan with works value lower than the existing inactive
// entry. Replanning will thus deactivate the existing entry (it's already
// inactive so this is a noop), then create a new entry with a works value of 1.
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 1U);
}
@@ -405,8 +401,7 @@ TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled
internalQueryCacheDisableInactiveEntries.store(true);
ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
ASSERT(collection);
// Never run - just used as a key for the cache's get() functions, since all of the other
@@ -419,26 +414,26 @@ TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
// We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = CollectionQueryInfo::get(collection).getPlanCache();
+ PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
ASSERT(cache);
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
// Run the CachedPlanStage with a long-running child plan. Replanning should be
// triggered and an _active_ entry will be added (since the disableInactiveEntries flag is on).
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
// Check for an inactive cache entry.
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
// Run the plan again. The entry should still be active.
- forceReplanning(collection, noResultsCq.get());
+ forceReplanning(collection.getCollection(), noResultsCq.get());
ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive);
// Run another query which takes long enough to evict the active cache entry. After replanning
// is triggered, be sure that the the cache entry is still active.
auto highWorksCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}"));
- forceReplanning(collection, highWorksCq.get());
+ forceReplanning(collection.getCollection(), highWorksCq.get());
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
}
@@ -449,7 +444,7 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe
boost::optional<AutoGetCollectionForReadCommand> readLock;
readLock.emplace(&_opCtx, nss);
- const Collection* collection = readLock->getCollection();
+ const auto& collection = readLock->getCollection();
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
@@ -491,7 +486,7 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
boost::optional<AutoGetCollectionForReadCommand> readLock;
readLock.emplace(&_opCtx, nss);
- const Collection* collection = readLock->getCollection();
+ const auto& collection = readLock->getCollection();
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index e765f7473a7..53aa2e78483 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -85,8 +85,7 @@ public:
}
int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- auto collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -102,13 +101,13 @@ public:
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
- _expCtx.get(), collection, params, ws.get(), filterExpr.get());
+ _expCtx.get(), collection.getCollection(), params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor =
plan_executor_factory::make(_expCtx,
std::move(ws),
std::move(ps),
- collection,
+ collection.getCollection(),
PlanYieldPolicy::YieldPolicy::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -123,7 +122,7 @@ public:
return count;
}
- void getRecordIds(const Collection* collection,
+ void getRecordIds(const CollectionPtr& collection,
CollectionScanParams::Direction direction,
vector<RecordId>* out) {
WorkingSet ws;
@@ -185,8 +184,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanBasicBackwardWithMatch) {
// Get objects in the order we inserted them.
TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- auto collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -195,11 +193,15 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
-
- auto statusWithPlanExecutor = plan_executor_factory::make(
- _expCtx, std::move(ws), std::move(ps), collection, PlanYieldPolicy::YieldPolicy::NO_YIELD);
+ unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
+
+ auto statusWithPlanExecutor =
+ plan_executor_factory::make(_expCtx,
+ std::move(ws),
+ std::move(ps),
+ collection.getCollection(),
+ PlanYieldPolicy::YieldPolicy::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -216,19 +218,22 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
// Get objects in the reverse order we inserted them when we go backwards.
TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- auto collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
CollectionScanParams params;
params.direction = CollectionScanParams::BACKWARD;
params.tailable = false;
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
-
- auto statusWithPlanExecutor = plan_executor_factory::make(
- _expCtx, std::move(ws), std::move(ps), collection, PlanYieldPolicy::YieldPolicy::NO_YIELD);
+ unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
+
+ auto statusWithPlanExecutor =
+ plan_executor_factory::make(_expCtx,
+ std::move(ws),
+ std::move(ps),
+ collection.getCollection(),
+ PlanYieldPolicy::YieldPolicy::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -247,7 +252,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) {
TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
vector<RecordId> recordIds;
@@ -300,7 +305,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
// object we would have gotten after that. But, do it in reverse!
TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackward) {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
vector<RecordId> recordIds;
@@ -352,12 +357,11 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
// Verify that successfully seeking to the resumeAfterRecordId returns PlanStage::NEED_TIME and
// that we can complete the collection scan afterwards.
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuccess) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- auto collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
// Get the RecordIds that would be returned by an in-order scan.
vector<RecordId> recordIds;
- getRecordIds(collection, CollectionScanParams::FORWARD, &recordIds);
+ getRecordIds(collection.getCollection(), CollectionScanParams::FORWARD, &recordIds);
// We will resume the collection scan this many results in.
auto offset = 10;
@@ -371,8 +375,8 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
// Create plan stage.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
+ unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -380,8 +384,12 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
ASSERT_EQUALS(PlanStage::NEED_TIME, ps->work(&id));
// Run the rest of the scan and verify the results.
- auto statusWithPlanExecutor = plan_executor_factory::make(
- _expCtx, std::move(ws), std::move(ps), collection, PlanYieldPolicy::YieldPolicy::NO_YIELD);
+ auto statusWithPlanExecutor =
+ plan_executor_factory::make(_expCtx,
+ std::move(ws),
+ std::move(ps),
+ collection.getCollection(),
+ PlanYieldPolicy::YieldPolicy::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 422d37161e0..cf6f4005405 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -89,7 +89,7 @@ public:
return countWorks;
}
- const Collection* getCollection() {
+ CollectionPtr getCollection() {
return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, ns());
}
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index b362e23516f..7bdd12bb81e 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -81,7 +81,7 @@ public:
_client.remove(nss.ns(), obj);
}
- void getRecordIds(const Collection* collection,
+ void getRecordIds(const CollectionPtr& collection,
CollectionScanParams::Direction direction,
vector<RecordId>* out) {
WorkingSet ws;
@@ -133,7 +133,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
ASSERT(coll);
// Get the RecordIds that would be returned by an in-order scan.
@@ -194,7 +194,7 @@ public:
void run() {
// Various variables we'll need.
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
ASSERT(coll);
const int targetDocIndex = 0;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 5c6f026cbbb..e79b260493d 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -126,7 +126,7 @@ public:
addIndex(BSON("a" << 1));
AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Set up the distinct stage.
std::vector<const IndexDescriptor*> indexes;
@@ -192,7 +192,7 @@ public:
addIndex(BSON("a" << 1));
AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Set up the distinct stage.
std::vector<const IndexDescriptor*> indexes;
@@ -259,7 +259,7 @@ public:
addIndex(BSON("a" << 1 << "b" << 1));
AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
std::vector<const IndexDescriptor*> indices;
coll->getIndexCatalog()->findIndexesByKeyPattern(
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 4d040923e34..87ff271b8c5 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -62,7 +62,7 @@ public:
_client.dropCollection(ns());
}
- void getRecordIds(set<RecordId>* out, const Collection* coll) {
+ void getRecordIds(set<RecordId>* out, const CollectionPtr& coll) {
auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -102,7 +102,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -169,7 +169,7 @@ public:
Lock::DBLock lk(&_opCtx, nss().db(), MODE_X);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 2e386c1b114..90a23259594 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -72,7 +72,7 @@ public:
ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
- const IndexDescriptor* getIndex(const BSONObj& obj, const Collection* coll) {
+ const IndexDescriptor* getIndex(const BSONObj& obj, const CollectionPtr& coll) {
std::vector<const IndexDescriptor*> indexes;
coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
@@ -101,7 +101,7 @@ public:
_client.update(ns(), predicate, update);
}
- void getRecordIds(set<RecordId>* out, const Collection* coll) {
+ void getRecordIds(set<RecordId>* out, const CollectionPtr& coll) {
auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -148,7 +148,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -218,7 +218,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -287,7 +287,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -357,7 +357,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -431,7 +431,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -499,7 +499,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -557,7 +557,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -677,7 +677,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -780,7 +780,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -852,7 +852,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 3d77785ac5c..9ee623edb87 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -138,7 +138,7 @@ std::unique_ptr<CanonicalQuery> makeCanonicalQuery(OperationContext* opCtx,
}
unique_ptr<PlanStage> getIxScanPlan(ExpressionContext* expCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
WorkingSet* sharedWs,
int desiredFooValue) {
std::vector<const IndexDescriptor*> indexes;
@@ -168,7 +168,7 @@ unique_ptr<MatchExpression> makeMatchExpressionFromFilter(ExpressionContext* exp
unique_ptr<PlanStage> getCollScanPlan(ExpressionContext* expCtx,
- const Collection* coll,
+ const CollectionPtr& coll,
WorkingSet* sharedWs,
MatchExpression* matchExpr) {
CollectionScanParams csparams;
@@ -181,7 +181,7 @@ unique_ptr<PlanStage> getCollScanPlan(ExpressionContext* expCtx,
std::unique_ptr<MultiPlanStage> runMultiPlanner(ExpressionContext* expCtx,
const NamespaceString& nss,
- const Collection* coll,
+ const CollectionPtr& coll,
int desiredFooValue) {
// Plan 0: IXScan over foo == desiredFooValue
// Every call to work() returns something so this should clearly win (by current scoring
@@ -227,7 +227,7 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
addIndex(BSON("foo" << 1));
AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
@@ -288,7 +288,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
addIndex(BSON("foo" << 1));
AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
@@ -343,7 +343,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesCreatesActiveEntryWhenInactiveEntriesDisa
addIndex(BSON("foo" << 1));
AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
@@ -370,8 +370,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(_opCtx.get(), nss);
// Query for both 'a' and 'b' and sort on 'b'.
auto qr = std::make_unique<QueryRequest>(nss);
@@ -388,7 +387,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(_opCtx.get(), collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(_opCtx.get(), collection.getCollection(), cq.get(), &plannerParams);
// Plan.
auto statusWithSolutions = QueryPlanner::plan(*cq, plannerParams);
@@ -400,12 +399,13 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
ASSERT_EQUALS(solutions.size(), 3U);
// Fill out the MultiPlanStage.
- unique_ptr<MultiPlanStage> mps(new MultiPlanStage(_expCtx.get(), collection, cq.get()));
+ unique_ptr<MultiPlanStage> mps(
+ new MultiPlanStage(_expCtx.get(), collection.getCollection(), cq.get()));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
auto&& root = stage_builder::buildClassicExecutableTree(
- _opCtx.get(), collection, *cq, *solutions[i], ws.get());
+ _opCtx.get(), collection.getCollection(), *cq, *solutions[i], ws.get());
mps->addPlan(std::move(solutions[i]), std::move(root), ws.get());
}
@@ -546,7 +546,7 @@ TEST_F(QueryStageMultiPlanTest, MPSSummaryStats) {
addIndex(BSON("foo" << -1 << "bar" << 1));
AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
// Create the executor (Matching all documents).
auto qr = std::make_unique<QueryRequest>(nss);
@@ -586,20 +586,20 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
addIndex(BSON("foo" << 1));
addIndex(BSON("foo" << -1 << "bar" << 1));
- AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const auto coll = ctx.getCollection();
+ AutoGetCollectionForReadCommand coll(_opCtx.get(), nss);
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot =
+ getIxScanPlan(_expCtx.get(), coll.getCollection(), sharedWs.get(), 7);
// Make the filter.
BSONObj filterObj = BSON("foo" << 7);
unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll.getCollection(), sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
@@ -607,7 +607,7 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
MultiPlanStage multiPlanStage(
- _expCtx.get(), ctx.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache);
+ _expCtx.get(), coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache);
multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
@@ -627,27 +627,27 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
addIndex(BSON("foo" << 1));
addIndex(BSON("foo" << -1 << "bar" << 1));
- AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
- const auto coll = ctx.getCollection();
+ AutoGetCollectionForReadCommand coll(_opCtx.get(), nss);
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot =
+ getIxScanPlan(_expCtx.get(), coll.getCollection(), sharedWs.get(), 7);
// Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << 7);
unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll.getCollection(), sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(BSON("foo" << BSON("$gte" << 0)));
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
MultiPlanStage multiPlanStage(
- _expCtx.get(), ctx.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache);
+ _expCtx.get(), coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache);
multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index ca9fe26c240..3c99ad3dfcf 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -67,14 +67,14 @@ public:
ASSERT_OK(dbtests::createIndex(_opCtx, kTestNamespace, kTestKeyPattern));
_autoColl.emplace(_opCtx, NamespaceString{kTestNamespace});
- auto* coll = _autoColl->getCollection();
+ const auto& coll = _autoColl->getCollection();
ASSERT(coll);
_mockGeoIndex = coll->getIndexCatalog()->findIndexByKeyPatternAndOptions(
_opCtx, kTestKeyPattern, _makeMinimalIndexSpec(kTestKeyPattern));
ASSERT(_mockGeoIndex);
}
- const Collection* getCollection() const {
+ const CollectionPtr& getCollection() const {
return _autoColl->getCollection();
}
@@ -112,7 +112,7 @@ public:
MockNearStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* workingSet,
- const Collection* coll,
+ const CollectionPtr& coll,
const IndexDescriptor* indexDescriptor)
: NearStage(expCtx.get(),
"MOCK_DISTANCE_SEARCH_STAGE",
@@ -128,7 +128,7 @@ public:
std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
- const Collection* collection) final {
+ const CollectionPtr& collection) final {
if (_pos == static_cast<int>(_intervals.size()))
return nullptr;
@@ -234,7 +234,7 @@ TEST_F(QueryStageNearTest, EmptyResults) {
WorkingSet workingSet;
AutoGetCollectionForRead autoColl(_opCtx, NamespaceString{kTestNamespace});
- auto* coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
ASSERT(coll);
MockNearStage nearStage(_expCtx.get(), &workingSet, coll, _mockGeoIndex);
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 6b09933f62a..16640fb9e5e 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -76,7 +76,7 @@ public:
_client.insert(ns(), obj);
}
- void getRecordIds(set<RecordId>* out, const Collection* coll) {
+ void getRecordIds(set<RecordId>* out, const CollectionPtr& coll) {
auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
@@ -86,7 +86,7 @@ public:
/**
* We feed a mix of (key, unowned, owned) data to the sort stage.
*/
- void insertVarietyOfObjects(WorkingSet* ws, QueuedDataStage* ms, const Collection* coll) {
+ void insertVarietyOfObjects(WorkingSet* ws, QueuedDataStage* ms, const CollectionPtr& coll) {
set<RecordId> recordIds;
getRecordIds(&recordIds, coll);
@@ -111,7 +111,7 @@ public:
* which is owned by the caller.
*/
unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutorWithSortStage(
- const Collection* coll) {
+ const CollectionPtr& coll) {
// Build the mock scan stage which feeds the data.
auto ws = std::make_unique<WorkingSet>();
_workingSet = ws.get();
@@ -152,7 +152,7 @@ public:
* If extAllowed is true, sorting will use use external sorting if available.
* If limit is not zero, we limit the output of the sort stage to 'limit' results.
*/
- void sortAndCheck(int direction, const Collection* coll) {
+ void sortAndCheck(int direction, const CollectionPtr& coll) {
auto ws = std::make_unique<WorkingSet>();
auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
@@ -259,7 +259,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -282,7 +282,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -314,7 +314,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -340,7 +340,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -395,7 +395,7 @@ public:
coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, nullptr, &args);
wuow.commit();
}
- exec->restoreState();
+ exec->restoreState(&coll);
// Read the rest of the data from the queued data stage.
while (!queuedDataStage->isEOF()) {
@@ -414,7 +414,7 @@ public:
wuow.commit();
}
}
- exec->restoreState();
+ exec->restoreState(&coll);
// Verify that it's sorted, the right number of documents are returned, and they're all
// in the expected range.
@@ -455,7 +455,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
@@ -499,7 +499,7 @@ public:
coll->deleteDocument(&_opCtx, kUninitializedStmtId, *it++, nullOpDebug);
wuow.commit();
}
- exec->restoreState();
+ exec->restoreState(&coll);
// Read the rest of the data from the queued data stage.
while (!queuedDataStage->isEOF()) {
@@ -516,7 +516,7 @@ public:
wuow.commit();
}
}
- exec->restoreState();
+ exec->restoreState(&coll);
// Regardless of storage engine, all the documents should come back with their objects
int count = 0;
@@ -559,7 +559,7 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 51c00d629b8..facd2746bbe 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -139,7 +139,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
// Get planner params.
QueryPlannerParams plannerParams;
@@ -168,7 +168,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon
test->insert(BSON("a" << 1 << "b" << i << "c" << i));
}
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
auto qr = std::make_unique<QueryRequest>(nss);
qr->setFilter(query);
@@ -252,7 +252,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) {
// one relevant index.
BSONObj query = fromjson("{$or: [{a: 1, b: 15}, {c: 1}]}");
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
auto qr = std::make_unique<QueryRequest>(nss);
qr->setFilter(query);
@@ -308,7 +308,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) {
// ranking. For the second branch it's because there is only one relevant index.
BSONObj query = fromjson("{$or: [{a: 1, e: 1}, {d: 1}]}");
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
auto qr = std::make_unique<QueryRequest>(nss);
qr->setFilter(query);
@@ -487,7 +487,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) {
qr->setSort(BSON("d" << 1));
auto cq = unittest::assertGet(CanonicalQuery::canonicalize(opCtx(), std::move(qr)));
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
QueryPlannerParams plannerParams;
fillOutPlannerParams(opCtx(), collection, cq.get(), &plannerParams);
@@ -571,7 +571,7 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) {
}
TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelection) {
- const Collection* collection = nullptr;
+ CollectionPtr collection = nullptr;
{
dbtests::WriteContextForTests ctx{opCtx(), nss.ns()};
addIndex(BSON("p1" << 1 << "opt1" << 1));
@@ -615,7 +615,7 @@ TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelect
}
TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSelection) {
- const Collection* collection = nullptr;
+ CollectionPtr collection = nullptr;
{
dbtests::WriteContextForTests ctx{opCtx(), nss.ns()};
addIndex(BSON("p1" << 1 << "opt1" << 1));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index bb79ac22a1c..50948a6cf32 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -123,8 +123,7 @@ public:
}
const IndexDescriptor* getIndex(const BSONObj& obj) {
- AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, NamespaceString(ns()));
std::vector<const IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 439531d9ff9..084012ca61c 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -125,7 +125,7 @@ public:
* Uses a forward collection scan stage to get the docs, and populates 'out' with
* the results.
*/
- void getCollContents(const Collection* collection, vector<BSONObj>* out) {
+ void getCollContents(const CollectionPtr& collection, vector<BSONObj>* out) {
WorkingSet ws;
CollectionScanParams params;
@@ -145,7 +145,7 @@ public:
}
}
- void getRecordIds(const Collection* collection,
+ void getRecordIds(const CollectionPtr& collection,
CollectionScanParams::Direction direction,
vector<RecordId>* out) {
WorkingSet ws;
@@ -204,7 +204,7 @@ public:
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
UpdateDriver driver(_expCtx);
- const Collection* collection = ctx.getCollection();
+ CollectionPtr collection = ctx.getCollection();
ASSERT(collection);
// Collection should be empty.
@@ -244,11 +244,10 @@ public:
// Verify the contents of the resulting collection.
{
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
vector<BSONObj> objs;
- getCollContents(collection, &objs);
+ getCollContents(collection.getCollection(), &objs);
// Expect a single document, {_id: 0, x: 1, y: 2}.
ASSERT_EQUALS(1U, objs.size());
@@ -276,7 +275,7 @@ public:
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
UpdateDriver driver(_expCtx);
- const Collection* coll =
+ const CollectionPtr& coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
ASSERT(coll);
@@ -352,11 +351,10 @@ public:
// Check the contents of the collection.
{
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- const Collection* collection = ctx.getCollection();
+ AutoGetCollectionForReadCommand collection(&_opCtx, nss);
vector<BSONObj> objs;
- getCollContents(collection, &objs);
+ getCollContents(collection.getCollection(), &objs);
// Verify that the collection now has 9 docs (one was deleted).
ASSERT_EQUALS(9U, objs.size());
@@ -388,7 +386,7 @@ public:
// Various variables we'll need.
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
ASSERT(coll);
auto request = UpdateRequest();
request.setNamespaceString(nss);
@@ -481,7 +479,7 @@ public:
// Various variables we'll need.
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
- const Collection* coll = ctx.getCollection();
+ const CollectionPtr& coll = ctx.getCollection();
ASSERT(coll);
auto request = UpdateRequest();
request.setNamespaceString(nss);
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 4ba7d36c94c..2bd9ba0a18c 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -74,7 +74,7 @@ public:
}
collection = _database->createCollection(&_opCtx, nss());
wunit.commit();
- _collection = collection;
+ _collection = std::move(collection);
}
addIndex(IndexSpec().addKey("a").unique(false));
@@ -129,7 +129,7 @@ protected:
wunit.commit();
}
abortOnExit.dismiss();
- _collection = collection.get();
+ _collection = collection.get().detached();
}
void insert(const char* s) {
@@ -161,7 +161,7 @@ protected:
OldClientContext _context;
Database* _database;
- const Collection* _collection;
+ CollectionPtr _collection;
};
class FindOneOr : public Base {
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 4a11b748c29..0bff979ed11 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -128,7 +128,7 @@ public:
dbtests::WriteContextForTests ctx(&_opCtx, ns());
WriteUnitOfWork wuow(&_opCtx);
- const Collection* c =
+ CollectionPtr c =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!c) {
c = ctx.db()->createCollection(&_opCtx, nss());
@@ -201,7 +201,7 @@ protected:
Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wunit(&_opCtx);
@@ -275,7 +275,7 @@ protected:
OldClientContext ctx(&_opCtx, ns());
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- const Collection* coll =
+ CollectionPtr coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
coll = db->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 73160539cac..80c40965418 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -62,7 +62,8 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
}
}
bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const string& ns) {
- return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString(ns));
+ return (bool)CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx,
+ NamespaceString(ns));
}
void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index b2d9a744e77..fcacf76374d 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -260,7 +260,7 @@ public:
});
}
- void insertDocument(const Collection* coll, const InsertStatement& stmt) {
+ void insertDocument(const CollectionPtr& coll, const InsertStatement& stmt) {
// Insert some documents.
OpDebug* const nullOpDebug = nullptr;
const bool fromMigrate = false;
@@ -306,7 +306,7 @@ public:
abortOnExit.dismiss();
}
- std::int32_t itCount(const Collection* coll) {
+ std::int32_t itCount(const CollectionPtr& coll) {
std::uint64_t ret = 0;
auto cursor = coll->getRecordStore()->getCursor(_opCtx);
while (cursor->next() != boost::none) {
@@ -316,7 +316,7 @@ public:
return ret;
}
- BSONObj findOne(const Collection* coll) {
+ BSONObj findOne(const CollectionPtr& coll) {
auto optRecord = coll->getRecordStore()->getCursor(_opCtx)->next();
if (optRecord == boost::none) {
// Print a stack trace to help disambiguate which `findOne` failed.
@@ -385,7 +385,7 @@ public:
const Timestamp& ts,
const repl::MinValidDocument& expectedDoc) {
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- const Collection* coll = autoColl.getCollection();
+ const CollectionPtr& coll = autoColl.getCollection();
OneOffRead oor(_opCtx, ts);
@@ -405,7 +405,7 @@ public:
<< ". Expected: " << expectedDoc.toBSON() << ". Found: " << doc.toBSON();
}
- void assertDocumentAtTimestamp(const Collection* coll,
+ void assertDocumentAtTimestamp(const CollectionPtr& coll,
const Timestamp& ts,
const BSONObj& expectedDoc) {
OneOffRead oor(_opCtx, ts);
@@ -421,7 +421,7 @@ public:
}
}
- void assertFilteredDocumentAtTimestamp(const Collection* coll,
+ void assertFilteredDocumentAtTimestamp(const CollectionPtr& coll,
const BSONObj& query,
const Timestamp& ts,
boost::optional<const BSONObj&> expectedDoc) {
@@ -669,7 +669,7 @@ public:
}
void assertMultikeyPaths(OperationContext* opCtx,
- const Collection* collection,
+ const CollectionPtr& collection,
StringData indexName,
Timestamp ts,
bool shouldBeMultikey,
@@ -1207,10 +1207,10 @@ public:
{
AutoGetCollectionForReadCommand autoColl1(_opCtx, nss1);
- auto coll1 = autoColl1.getCollection();
+ const auto& coll1 = autoColl1.getCollection();
ASSERT(coll1);
AutoGetCollectionForReadCommand autoColl2(_opCtx, nss2);
- auto coll2 = autoColl2.getCollection();
+ const auto& coll2 = autoColl2.getCollection();
ASSERT(coll2);
assertDocumentAtTimestamp(coll1, pastTs, BSONObj());
@@ -1761,7 +1761,7 @@ public:
txnParticipant.stashTransactionResources(_opCtx);
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
// Make sure the transaction committed and its writes were timestamped correctly.
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
@@ -2435,8 +2435,7 @@ public:
// Assert that the index build is removed from config.system.indexBuilds collection after
// completion.
{
- AutoGetCollectionForRead autoColl(_opCtx, NamespaceString::kIndexBuildEntryNamespace);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, NamespaceString::kIndexBuildEntryNamespace);
ASSERT_TRUE(collection);
// At the commitIndexBuild entry time, the index build be still be present in the
@@ -2444,12 +2443,12 @@ public:
{
OneOffRead oor(_opCtx, indexBComplete);
// Fails if the collection is empty.
- findOne(collection);
+ findOne(collection.getCollection());
}
// After the index build has finished, we should not see the doc in the indexBuilds
// collection.
- ASSERT_EQUALS(0, itCount(collection));
+ ASSERT_EQUALS(0, itCount(collection.getCollection()));
}
}
};
@@ -2661,8 +2660,7 @@ public:
// Assert that the index build is removed from config.system.indexBuilds collection after
// completion.
{
- AutoGetCollectionForRead autoColl(_opCtx, NamespaceString::kIndexBuildEntryNamespace);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(_opCtx, NamespaceString::kIndexBuildEntryNamespace);
ASSERT_TRUE(collection);
// At the commitIndexBuild entry time, the index build be still be present in the
@@ -2670,12 +2668,12 @@ public:
{
OneOffRead oor(_opCtx, indexAbortTs);
// Fails if the collection is empty.
- findOne(collection);
+ findOne(collection.getCollection());
}
// After the index build has finished, we should not see the doc in the indexBuilds
// collection.
- ASSERT_EQUALS(0, itCount(collection));
+ ASSERT_EQUALS(0, itCount(collection.getCollection()));
}
}
};
@@ -3188,13 +3186,12 @@ public:
{
// Sanity check everything exists.
- AutoGetCollectionForReadCommand autoColl(_opCtx, nss);
- auto coll = autoColl.getCollection();
+ AutoGetCollectionForReadCommand coll(_opCtx, nss);
ASSERT(coll);
const auto currentTime = _clock->getTime();
const auto presentTs = currentTime.clusterTime().asTimestamp();
- assertDocumentAtTimestamp(coll, presentTs, doc);
+ assertDocumentAtTimestamp(coll.getCollection(), presentTs, doc);
}
// Simulate a scenario where the node is a primary, but does not accept writes. This is
@@ -3414,8 +3411,7 @@ public:
reset(nss);
UUID ui = UUID::gen();
{
- AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ AutoGetCollection coll(_opCtx, nss, LockMode::MODE_IX);
ASSERT(coll);
ui = coll->uuid();
}
@@ -3451,7 +3447,7 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IS);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, commitEntryTs, BSONObj());
@@ -3517,7 +3513,7 @@ public:
assertNoStartOpTime();
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, commitEntryTs, doc);
@@ -3577,7 +3573,7 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
const BSONObj query1 = BSON("_id" << 1);
const BSONObj query2 = BSON("_id" << 2);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
// Collection should be empty until commit, at which point both documents
// should show up.
@@ -3675,7 +3671,7 @@ public:
const auto commitFilter = BSON("ts" << commitEntryTs);
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IS);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, firstOplogEntryTs, BSONObj());
@@ -3715,7 +3711,7 @@ public:
txnParticipant.stashTransactionResources(_opCtx);
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IS);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, firstOplogEntryTs, BSONObj());
@@ -3763,7 +3759,7 @@ public:
txnParticipant.stashTransactionResources(_opCtx);
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, firstOplogEntryTs, BSONObj());
@@ -3915,8 +3911,7 @@ public:
UUID ui = UUID::gen();
{
- AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ AutoGetCollection coll(_opCtx, nss, LockMode::MODE_IX);
ASSERT(coll);
ui = coll->uuid();
}
@@ -3974,7 +3969,7 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IS);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, prepareTs, BSONObj());
assertDocumentAtTimestamp(coll, commitEntryTs, BSONObj());
@@ -4028,7 +4023,7 @@ public:
txnParticipant.stashTransactionResources(_opCtx);
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, prepareTs, BSONObj());
@@ -4076,7 +4071,7 @@ public:
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IS);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, prepareTs, BSONObj());
assertDocumentAtTimestamp(coll, abortEntryTs, BSONObj());
@@ -4126,7 +4121,7 @@ public:
txnParticipant.stashTransactionResources(_opCtx);
{
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
- auto coll = autoColl.getCollection();
+ const auto& coll = autoColl.getCollection();
assertDocumentAtTimestamp(coll, presentTs, BSONObj());
assertDocumentAtTimestamp(coll, beforeTxnTs, BSONObj());
assertDocumentAtTimestamp(coll, prepareTs, BSONObj());
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 110fa706cce..e75eadbebf0 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -194,7 +194,7 @@ public:
// Create a new collection, insert records {_id: 1} and {_id: 2} and check it's valid.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
@@ -256,7 +256,7 @@ public:
// Create a new collection, insert two documents.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
@@ -328,7 +328,7 @@ public:
// Create a new collection, insert three records.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -391,7 +391,7 @@ public:
// Create a new collection, insert records {_id: 1} and {_id: 2} and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -471,7 +471,7 @@ public:
// Create a new collection, insert three records and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
// {a: [b: 1, c: 2]}, {a: [b: 2, c: 2]}, {a: [b: 1, c: 1]}
auto doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(BSON("b" << 1) << BSON("c" << 2)));
@@ -557,7 +557,7 @@ public:
// Create a new collection, insert three records and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -621,7 +621,7 @@ public:
// Create a new collection, insert three records and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -692,7 +692,7 @@ public:
// field.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -754,7 +754,7 @@ public:
// Create a new collection, insert five records and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -839,7 +839,7 @@ public:
// Create a new collection, insert three records and check it's valid.
lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -927,7 +927,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1050,7 +1050,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1148,7 +1148,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1244,7 +1244,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1361,7 +1361,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1448,7 +1448,7 @@ public:
void run() {
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1629,7 +1629,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1807,7 +1807,7 @@ public:
void run() {
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -1962,7 +1962,7 @@ public:
// Create a new collection and insert a document.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
OpDebug* const nullOpDebug = nullptr;
{
WriteUnitOfWork wunit(&_opCtx);
@@ -2180,7 +2180,7 @@ public:
// Create a new collection and insert non-multikey document.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
BSONObj doc = BSON("_id" << 1 << "a" << 1);
{
@@ -2358,7 +2358,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -2505,7 +2505,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -2708,7 +2708,7 @@ public:
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -2772,7 +2772,7 @@ public:
void run() {
// Create a new collection.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
{
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
@@ -2942,7 +2942,7 @@ public:
// Create a new collection and insert non-multikey document.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
BSONObj doc = BSON("_id" << 1 << "a" << 1);
{
@@ -3164,7 +3164,7 @@ public:
// Create a new collection and insert multikey document.
lockDb(MODE_X);
- const Collection* coll;
+ CollectionPtr coll;
RecordId id1;
BSONObj doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2) << "b" << 1);
{
diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
index 5fd52a6ebf8..19b45551ccb 100644
--- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
+++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
@@ -94,15 +94,14 @@ protected:
const NamespaceString& nss = kDefaultNSS,
const std::string& indexName = kDefaultIndexName) {
// Subsequent operations must take place under a collection lock.
- AutoGetCollectionForRead autoColl(opCtx(), nss);
- auto collection = autoColl.getCollection();
+ AutoGetCollectionForRead collection(opCtx(), nss);
// Verify whether or not the index has been marked as multikey.
ASSERT_EQ(expectIndexIsMultikey,
- getIndexDesc(collection, indexName)->getEntry()->isMultikey());
+ getIndexDesc(collection.getCollection(), indexName)->getEntry()->isMultikey());
// Obtain a cursor over the index, and confirm that the keys are present in order.
- auto indexCursor = getIndexCursor(collection, indexName);
+ auto indexCursor = getIndexCursor(collection.getCollection(), indexName);
KeyString::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(
BSONObj(), KeyString::Version::V1, Ordering::make(BSONObj()), true, true);
@@ -144,9 +143,8 @@ protected:
}
ASSERT_EQ(expectedPaths.size(), expectedFieldRefs.size());
- AutoGetCollectionForRead autoColl(opCtx(), nss);
- auto collection = autoColl.getCollection();
- auto indexAccessMethod = getIndex(collection, indexName);
+ AutoGetCollectionForRead collection(opCtx(), nss);
+ auto indexAccessMethod = getIndex(collection.getCollection(), indexName);
MultikeyMetadataAccessStats stats;
auto wam = dynamic_cast<const WildcardAccessMethod*>(indexAccessMethod);
ASSERT(wam != nullptr);
@@ -231,17 +229,18 @@ protected:
return docs;
}
- const IndexDescriptor* getIndexDesc(const Collection* collection, const StringData indexName) {
+ const IndexDescriptor* getIndexDesc(const CollectionPtr& collection,
+ const StringData indexName) {
return collection->getIndexCatalog()->findIndexByName(opCtx(), indexName);
}
- const IndexAccessMethod* getIndex(const Collection* collection, const StringData indexName) {
+ const IndexAccessMethod* getIndex(const CollectionPtr& collection, const StringData indexName) {
return collection->getIndexCatalog()
->getEntry(getIndexDesc(collection, indexName))
->accessMethod();
}
- std::unique_ptr<SortedDataInterface::Cursor> getIndexCursor(const Collection* collection,
+ std::unique_ptr<SortedDataInterface::Cursor> getIndexCursor(const CollectionPtr& collection,
const StringData indexName) {
return getIndex(collection, indexName)->newCursor(opCtx());
}